text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid(self): """ Validates single instance. Returns boolean value and store errors in self.errors """
self.errors = [] for field in self.get_all_field_names_declared_by_user(): getattr(type(self), field).is_valid(self, type(self), field) field_errors = getattr(type(self), field).errors(self) self.errors.extend(field_errors) return len(self.errors) == 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(self): """ Run the analyses using the inputted values for forward and reverse read length. However, if not all strains pass the quality thresholds, continue to periodically run the analyses on these incomplete strains until either all strains are complete, or the sequencing run is finished """
logging.info('Starting {} analysis pipeline'.format(self.analysistype)) self.createobjects() # Run the genesipping analyses self.methods() # Determine if the analyses are complete self.complete() self.additionalsipping() # Update the report object self.reports = Reports(self) # Once all the analyses are complete, create reports for each sample Reports.methodreporter(self.reports) # Print the metadata printer = MetadataPrinter(self) printer.printmetadata()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def methods(self): """ Run the typing methods """
self.contamination_detection() ReportImage(self, 'confindr') self.run_genesippr() ReportImage(self, 'genesippr') self.run_sixteens() self.run_mash() self.run_gdcs() ReportImage(self, 'gdcs')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def contamination_detection(self): """ Calculate the levels of contamination in the reads """
self.qualityobject = quality.Quality(self) self.qualityobject.contamination_finder(input_path=self.sequencepath, report_path=self.reportpath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_genesippr(self): """ Run the genesippr analyses """
GeneSippr(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='genesippr', cutoff=0.95, pipeline=False, revbait=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_sixteens(self): """ Run the 16S analyses using the filtered database """
SixteensFull(args=self, pipelinecommit=self.commit, startingtime=self.starttime, scriptpath=self.homepath, analysistype='sixteens_full', cutoff=0.985)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_mash(self): """ Run MASH to determine the closest refseq genomes """
self.pipeline = True mash.Mash(inputobject=self, analysistype='mash')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def protected_view(view, info): """allows adding `protected=True` to a view_config`"""
if info.options.get('protected'): def wrapper_view(context, request): response = _advice(request) if response is not None: return response else: return view(context, request) return wrapper_view return view
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkInstalledPip(package, speak=True, speakSimilar=True): """checks if a given package is installed on pip"""
packages = sorted([i.key for i in pip.get_installed_distributions()]) installed = package in packages similar = None if not installed: similar = [pkg for pkg in packages if package in pkg] if speak: speakInstalledPackages(package, "pip", installed, similar, speakSimilar) return (installed, similar)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkInstalledBrew(package, similar=True, speak=True, speakSimilar=True): """checks if a given package is installed on homebrew"""
packages = subprocess.check_output(['brew', 'list']).split() installed = package in packages similar = [] if not installed: similar = [pkg for pkg in packages if package in pkg] if speak: speakInstalledPackages(package, "homebrew", installed, similar, speakSimilar) return (installed, similar)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def placeholders(cls,dic): """Placeholders for fields names and value binds"""
keys = [str(x) for x in dic] entete = ",".join(keys) placeholders = ",".join(cls.named_style.format(x) for x in keys) entete = f"({entete})" placeholders = f"({placeholders})" return entete, placeholders
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(table, datas, avoid_conflict=False): """ Insert row from datas :param table: Safe table name :param datas: List of dicts. :param avoid_conflict: Allows ignoring error if already exists (do nothing then) :return: """
if avoid_conflict: debut = """INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} ON CONFLICT DO NOTHING""" else: debut = """INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} RETURNING *""" l = [abstractRequetesSQL.formate(debut, table=table, INSERT=d, args=d) for d in datas if d] return Executant(l)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(cls,table, dic, Id): """ Update row with Id from table. Set fields given by dic."""
if dic: req = "UPDATE {table} SET {SET} WHERE id = " + cls.named_style.format('__id') + " RETURNING * " r = abstractRequetesSQL.formate(req, SET=dic, table=table, args=dict(dic, __id=Id)) return MonoExecutant(r) return MonoExecutant((f"SELECT * FROM {table} WHERE id = " + cls.named_style.format('__id'), {"__id": Id}))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cree(table, dic, avoid_conflict=False): """ Create ONE row from dic and returns the entry created """
if avoid_conflict: req = """ INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} ON CONFLICT DO NOTHING RETURNING *""" else: req = """ INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} RETURNING *""" r = abstractRequetesSQL.formate(req, table=table, INSERT=dic, args=dic) return MonoExecutant(r)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trace(self, context, obj): """Enumerate the children of the given object, as would be visible and utilized by dispatch."""
root = obj if isroutine(obj): yield Crumb(self, root, endpoint=True, handler=obj, options=opts(obj)) return for name, attr in getmembers(obj if isclass(obj) else obj.__class__): if name == '__getattr__': sig = signature(attr) path = '{' + list(sig.parameters.keys())[1] + '}' reta = sig.return_annotation if reta is not sig.empty: if callable(reta) and not isclass(reta): yield Crumb(self, root, path, endpoint=True, handler=reta, options=opts(reta)) else: yield Crumb(self, root, path, handler=reta) else: yield Crumb(self, root, path, handler=attr) del sig, path, reta continue elif name == '__call__': yield Crumb(self, root, None, endpoint=True, handler=obj) continue if self.protect and name[0] == '_': continue yield Crumb(self, root, name, endpoint=callable(attr) and not isclass(attr), handler=attr, options=opts(attr))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def main(argv=None): ''' Main entry-point for calling layouts directly as a program. ''' # Prep argparse ap = argparse.ArgumentParser( description='Basic query options for Python HID-IO Layouts repository', ) ap.add_argument('--list', action='store_true', help='List available layout aliases.') ap.add_argument('--get', metavar='NAME', help='Retrieve the given layout, and return the JSON data') # Parse arguments args = ap.parse_args(argv) # Create layouts context manager mgr = Layouts() # Check if generating a list if args.list: for name in mgr.list_layouts(): print(name) # Retrieve JSON layout if args.get is not None: layout = mgr.get_layout(args.get) print(json.dumps(layout.json()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def retrieve_github_cache(self, github_path, version, cache_dir, token): ''' Retrieves a cache of the layouts git repo from GitHub @param github_path: Location of the git repo on GitHub (e.g. hid-io/layouts) @param version: git reference for the version to download (e.g. master) @param cache_dir: Directory to operate on external cache from @param token: GitHub access token ''' # Check for environment variable Github token token = os.environ.get('GITHUB_APIKEY', None) # Retrieve repo information try: gh = Github(token) repo = gh.get_repo(github_path) commit = repo.get_commit(version) commits = repo.get_commits() total_commits = 0 commit_number = 0 for cmt in commits: if commit == cmt: commit_number = total_commits total_commits += 1 commit_number = total_commits - commit_number tar_url = repo.get_archive_link('tarball', commit.sha) except GithubException.RateLimitExceededException: if token is None: log.warning("GITHUB_APIKEY is not set!") raise # GitHub only uses the first 7 characters of the sha in the download dirname_orig = "{}-{}".format(github_path.replace('/', '-'), commit.sha[:7]) dirname_orig_path = os.path.join(cache_dir, dirname_orig) # Adding a commit number so it's clear which is the latest version without requiring git dirname = "{}-{}".format(commit_number, dirname_orig) dirname_path = os.path.join(cache_dir, dirname) # If directory doesn't exist, check if tarball does if not os.path.isdir(dirname_path): filename = "{}.tar.gz".format(dirname) filepath = os.path.join(cache_dir, filename) # If tarball doesn't exist, download it if not os.path.isfile(filepath): # Retrieve tar file chunk_size = 2000 req = requests.get(tar_url, stream=True) with open(filepath, 'wb') as infile: for chunk in req.iter_content(chunk_size): infile.write(chunk) # Extract tarfile tar = tarfile.open(filepath) tar.extractall(cache_dir) os.rename(dirname_orig_path, dirname_path) tar.close() # Remove tar.gz os.remove(filepath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_layout(self, name): ''' Returns the layout with the given name ''' layout_chain = [] # Retrieve initial layout file try: json_data = self.json_files[self.layout_names[name]] except KeyError: log.error('Could not find layout: %s', name) log.error('Layouts path: %s', self.layout_path) raise layout_chain.append(Layout(name, json_data)) # Recursively locate parent layout files parent = layout_chain[-1].parent() while parent is not None: # Find the parent parent_path = None for path in self.json_file_paths: if os.path.normcase(os.path.normpath(parent)) in os.path.normcase(path): parent_path = path # Make sure a path was found if parent_path is None: raise UnknownLayoutPathException('Could not find: {}'.format(parent_path)) # Build layout for parent json_data = self.json_files[parent_path] layout_chain.append(Layout(parent_path, json_data)) # Check parent of parent parent = layout_chain[-1].parent() # Squash layout files layout = self.squash_layouts(layout_chain) return layout
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def dict_merge(self, merge_to, merge_in): ''' Recursively merges two dicts Overwrites any non-dictionary items merge_to <- merge_in Modifies merge_to dictionary @param merge_to: Base dictionary to merge into @param merge_in: Dictionary that may overwrite elements in merge_in ''' for key, value in merge_in.items(): # Just add, if the key doesn't exist yet # Or if set to None/Null if key not in merge_to.keys() or merge_to[key] is None: merge_to[key] = copy.copy(value) continue # Overwrite case, check for types # Make sure types are matching if not isinstance(value, type(merge_to[key])): raise MergeException('Types do not match! {}: {} != {}'.format(key, type(value), type(merge_to[key]))) # Check if this is a dictionary item, in which case recursively merge if isinstance(value, dict): self.dict_merge(merge_to[key], value) continue # Otherwise just overwrite merge_to[key] = copy.copy(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def squash_layouts(self, layouts): ''' Returns a squashed layout The first element takes precedence (i.e. left to right). Dictionaries are recursively merged, overwrites only occur on non-dictionary entries. [0,1] 0: test: 'my data' 1: test: 'stuff' Result: test: 'my data' @param layouts: List of layouts to merge together @return: New layout with list of layouts squash merged ''' top_layout = layouts[0] json_data = {} # Generate a new container Layout layout = Layout(top_layout.name(), json_data, layouts) # Merge in each of the layouts for mlayout in reversed(layouts): # Overwrite all fields, *except* dictionaries # For dictionaries, keep recursing until non-dictionaries are found self.dict_merge(layout.json(), mlayout.json()) return layout
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def dict(self, name, key_caps=False, value_caps=False): ''' Returns a JSON dict @key_caps: Converts all dictionary keys to uppercase @value_caps: Converts all dictionary values to uppercase @return: JSON item (may be a variable, list or dictionary) ''' # Invalid Dictionary if not isinstance(self.json_data[name], dict): raise InvalidDictionaryException # Convert key and/or values of dictionary to uppercase output = {} for key, value in self.json_data[name].items(): output[key.upper() if key_caps else key] = value.upper() if value_caps else value return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def locale(self): ''' Do a lookup for the locale code that is set for this layout. NOTE: USB HID specifies only 35 different locales. If your layout does not fit, it should be set to Undefined/0 @return: Tuple (<USB HID locale code>, <name>) ''' name = self.json_data['hid_locale'] # Set to Undefined/0 if not set if name is None: name = "Undefined" return (int(self.json_data['from_hid_locale'][name]), name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def compose(self, text, minimal_clears=False, no_clears=False): ''' Returns the sequence of combinations necessary to compose given text. If the text expression is not possible with the given layout an ComposeException is thrown. Iterate over the string, converting each character into a key sequence. Between each character, an empty combo is inserted to handle duplicate strings (and USB HID codes between characters) @param text: Input UTF-8 string @param minimal_clears: Set to True to minimize the number of code clears. False (default) includes a clear after every character. @param no_clears: Set to True to not add any code clears (useful for input sequences). False (default) to include code clears. @returns: Sequence of combinations needed to generate the given text string ''' sequence = [] clear = self.json_data['to_hid_keyboard']['0x00'] # No Event for char in text: # Make sure the composition element is available if char not in self.json_data['composition']: raise ComposeException("'{}' is not defined as a composition in the layout '{}'".format(char, self.name)) # Lookup the sequence to handle this character lookup = self.json_data['composition'][char] # If using minimal clears, check to see if we need to re-use any codes # Only need to check the most recent addition with the first combo if sequence and set(tuple(lookup[0])) & set(tuple(sequence[-1])) and not no_clears: sequence.extend([[clear]]) # Add to overall sequence sequence.extend(lookup) # Add empty combo for sequence splitting if not minimal_clears and not no_clears: # Blindly add a clear combo between characters sequence.extend([[clear]]) # When using minimal clears, we still need to add a final clear if minimal_clears and not no_clears: sequence.extend([[clear]]) return sequence
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def genus_specific(self): """ For genus-specific targets, MLST and serotyping, determine if the closest refseq genus is known - i.e. if 16S analyses have been performed. Perform the analyses if required """
# Initialise a variable to store whether the necessary analyses have already been performed closestrefseqgenus = False for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: closestrefseqgenus = sample.general.closestrefseqgenus except AttributeError: pass # Perform the 16S analyses as required if not closestrefseqgenus: logging.info('Must perform MASH analyses to determine genera of samples') self.pipeline = True # Run the analyses mash.Mash(self, 'mash')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pause(): """Tell iTunes to pause"""
if not settings.platformCompatible(): return False (output, error) = subprocess.Popen(["osascript", "-e", PAUSE], stdout=subprocess.PIPE).communicate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resume(): """Tell iTunes to resume"""
if not settings.platformCompatible(): return False (output, error) = subprocess.Popen(["osascript", "-e", RESUME], stdout=subprocess.PIPE).communicate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def skip(): """Tell iTunes to skip a song"""
if not settings.platformCompatible(): return False (output, error) = subprocess.Popen(["osascript", "-e", SKIP], stdout=subprocess.PIPE).communicate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def service_provider(*services): """ This is a class decorator that declares a class to provide a set of services. It is expected that the class has a no-arg constructor and will be instantiated as a singleton. """
def real_decorator(clazz): instance = clazz() for service in services: global_lookup.add(service, instance) return clazz return real_decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def wsp(word): '''Return the number of unstressed heavy syllables.''' HEAVY = r'[ieaAoO]{1}[\.]*(u|y)[^ieaAoO]+(\.|$)' # # if the word is not monosyllabic, lop off the final syllable, which is # # extrametrical # if '.' in word: # word = word[:word.rindex('.')] # gather the indices of syllable boundaries delimiters = [i for i, char in enumerate(word) if char == '.'] if len(delimiters) % 2 != 0: delimiters.append(len(word)) unstressed = [] # gather the indices of unstressed positions for i, d in enumerate(delimiters): if i % 2 == 0: unstressed.extend(range(d + 1, delimiters[i + 1])) # find the number of unstressed heavy syllables heavies = re.finditer(HEAVY, word) violations = sum(1 for m in heavies if m.start(0) in unstressed) return violations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_object_ns(name_space, import_str, *args, **kwargs): """Tries to import object from default namespace. Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """
import_value = "%s.%s" % (name_space, import_str) try: return import_class(import_value)(*args, **kwargs) except ImportError: return import_class(import_str)(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_vagrant_plugin(plugin, use_sudo=False): """ install vagrant plugin """
cmd = 'vagrant plugin install %s' % plugin with settings(hide('running', 'stdout')): if use_sudo: if plugin not in sudo('vagrant plugin list'): sudo(cmd) else: if plugin not in run('vagrant plugin list'): run(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cree_widgets(self): """Create widgets and store them in self.widgets"""
for t in self.FIELDS: if type(t) is str: attr, kwargs = t, {} else: attr, kwargs = t[0], t[1].copy() self.champs.append(attr) is_editable = kwargs.pop("is_editable", self.is_editable) args = [self.acces[attr], is_editable] with_base = kwargs.pop("with_base", False) if with_base: args.append(self.acces.base) if 'with_label' in kwargs: label = kwargs.pop('with_label') else: label = ASSOCIATION[attr][0] if kwargs: w = ASSOCIATION[attr][3](*args, **kwargs) else: w = ASSOCIATION[attr][3](*args) self.widgets[attr] = (w, label)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cree_ws_lecture(self, champs_ligne): """Alternative to create read only widgets. They should be set after."""
for c in champs_ligne: label = ASSOCIATION[c][0] w = ASSOCIATION[c][3](self.acces[c], False) w.setObjectName("champ-lecture-seule-details") self.widgets[c] = (w, label)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preservesurrogates(s): """ Function for splitting a string into a list of characters, preserving surrogate pairs. In python 2, unicode characters above 0x10000 are stored as surrogate pairs. For example, the Unicode character u"\U0001e900" is stored as the surrogate pair u"\ud83a\udd00": s = u"AB\U0001e900CD" len(s) -> 6 list(s) -> [u'A', u'B', u'\ud83a', u'\udd00', u'C', 'D'] len(preservesurrogates(s)) -> 5 list(preservesurrogates(s)) -> [u'A', u'B', u'\U0001e900', u'C', u'D'] :param s: String to split :return: List of characters """
if not isinstance(s, six.text_type): raise TypeError(u"String to split must be of type 'unicode'!") surrogates_regex_str = u"[{0}-{1}][{2}-{3}]".format(HIGH_SURROGATE_START, HIGH_SURROGATE_END, LOW_SURROGATE_START, LOW_SURROGATE_END) surrogates_regex = re.compile(u"(?:{0})|.".format(surrogates_regex_str)) return surrogates_regex.findall(s)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unichr(i): """ Helper function for taking a Unicode scalar value and returning a Unicode character. :param s: Unicode scalar value to convert. :return: Unicode character """
if not isinstance(i, int): raise TypeError try: return six.unichr(i) except ValueError: # Workaround the error "ValueError: unichr() arg not in range(0x10000) (narrow Python build)" return struct.pack("i", i).decode("utf-32")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_nr_prefix(i): """ Helper function for looking up the derived name prefix associated with a Unicode scalar value. :param i: Unicode scalar value. :return: String with the derived name prefix. """
for lookup_range, prefix_string in _nr_prefix_strings.items(): if i in lookup_range: return prefix_string raise ValueError("No prefix string associated with {0}!".format(i))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def casefold(s, fullcasefold=True, useturkicmapping=False): """ Function for performing case folding. This function will take the input string s and return a copy of the string suitable for caseless comparisons. The input string must be of type 'unicode', otherwise a TypeError will be raised. For more information on case folding, see section 3.13 of the Unicode Standard. See also the following FAQ on the Unicode website: https://unicode.org/faq/casemap_charprop.htm By default, full case folding (where the string length may change) is done. It is possible to use simple case folding (single character mappings only) by setting the boolean parameter fullcasefold=False. By default, case folding does not handle the Turkic case of dotted vs dotless 'i'. To perform case folding using the special Turkic mappings, pass the boolean parameter useturkicmapping=True. For more info on the dotted vs dotless 'i', see the following web pages: https://en.wikipedia.org/wiki/Dotted_and_dotless_I http://www.i18nguy.com/unicode/turkish-i18n.html#problem :param s: String to transform :param fullcasefold: Boolean indicating if a full case fold (default is True) should be done. If False, a simple case fold will be performed. :param useturkicmapping: Boolean indicating if the special turkic mapping (default is False) for the dotted and dotless 'i' should be used. :return: Copy of string that has been transformed for caseless comparison. """
if not isinstance(s, six.text_type): raise TypeError(u"String to casefold must be of type 'unicode'!") lookup_order = "CF" if not fullcasefold: lookup_order = "CS" if useturkicmapping: lookup_order = "T" + lookup_order return u"".join([casefold_map.lookup(c, lookup_order=lookup_order) for c in preservesurrogates(s)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookup(self, c, lookup_order="CF"): """ Function to lookup a character in the casefold map. The casefold map has four sub-tables, the 'C' or common table, the 'F' or full table, the 'S' or simple table and the 'T' or the Turkic special case table. These tables correspond to the statuses defined in the CaseFolding.txt file. We can specify the order of the tables to use for performing the lookup by the lookup_order parameter. Per the usage specified in the CaseFolding.txt file, we can use the 'C' and 'S' tables for doing a simple case fold. To perform a full case fold, we can use the 'C' and 'F' tables. The default behavior for this function is a full case fold (lookup_order="CF"). :param c: character to lookup :param lookup_order: """
if not isinstance(c, six.text_type): raise TypeError(u"Character to lookup must be of type 'unicode'!") for d in lookup_order: try: return self._casefold_map[d][c] except KeyError: pass return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def rank(syllabifications): '''Rank syllabifications.''' # def key(s): # word = s[0] # w = wsp(word) # p = pk_prom(word) # n = nuc(word) # t = w + p + n # print('%s\twsp: %s\tpk: %s\tnuc: %s\ttotal: %s' % (word, w, p, n, t)) # return w + p + n # syllabifications.sort(key=key) syllabifications.sort(key=lambda s: wsp(s[0]) + pk_prom(s[0]) + nuc(s[0])) return syllabifications
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ Purge a single fastly url """
parser = OptionParser(description= "Purge a single url from fastly.") parser.add_option("-k", "--key", dest="apikey", default="", help="fastly api key") parser.add_option("-H", "--host", dest="host", help="host to purge from") parser.add_option("-p", "--path", dest="path", help="path to purge") (options, args) = parser.parse_args() for val in options.__dict__.values(): if val is None: print "Missing required options" parser.print_help() sys.exit(1) client = fastly.connect(options.apikey) purge = client.purge_url(options.host, options.path) print purge
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populate(self, obj=None, section=None, parse_types=True): """Set attributes in ``obj`` with ``setattr`` from the all values in ``section``. """
section = self.default_section if section is None else section obj = Settings() if obj is None else obj is_dict = isinstance(obj, dict) for k, v in self.get_options(section).items(): if parse_types: if v == 'None': v = None elif self.FLOAT_REGEXP.match(v): v = float(v) elif self.INT_REGEXP.match(v): v = int(v) elif self.BOOL_REGEXP.match(v): v = v == 'True' else: m = self.EVAL_REGEXP.match(v) if m: evalstr = m.group(1) v = eval(evalstr) logger.debug('setting {} => {} on {}'.format(k, v, obj)) if is_dict: obj[k] = v else: setattr(obj, k, v) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_calling_module(self): """Get the last module in the call stack that is not this module or ``None`` if the call originated from this module. """
for frame in inspect.stack(): mod = inspect.getmodule(frame[0]) logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if mod_name != __name__: return mod
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resource_filename(self, resource_name, module_name=None): """Return a resource based on a file name. This uses the ``pkg_resources`` package first to find the resources. If it doesn't find it, it returns a path on the file system. :param: resource_name the file name of the resource to obtain (or name if obtained from an installed module) :param module_name: the name of the module to obtain the data, which defaults to ``__name__`` :return: a path on the file system or resource of the installed module """
if module_name is None: mod = self._get_calling_module() logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if module_name is None: module_name = __name__ if pkg_resources.resource_exists(mod_name, resource_name): res = pkg_resources.resource_filename(mod_name, resource_name) else: res = resource_name return Path(res)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_options(self, section='default', opt_keys=None, vars=None): """ Get all options for a section. If ``opt_keys`` is given return only options with those keys. """
vars = vars if vars else self.default_vars conf = self.parser opts = {} if opt_keys is None: if conf is None: opt_keys = {} else: if not self.robust or conf.has_section(section): opt_keys = conf.options(section) else: opt_keys = {} else: logger.debug('conf: %s' % conf) copts = conf.options(section) if conf else {} opt_keys = set(opt_keys).intersection(set(copts)) for option in opt_keys: logger.debug(f'option: {option}, vars: {vars}') opts[option] = conf.get(section, option, vars=vars) return opts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_option(self, name, section=None, vars=None, expect=None): """Return an option from ``section`` with ``name``. :param section: section in the ini file to fetch the value; defaults to constructor's ``default_section`` """
vars = vars if vars else self.default_vars if section is None: section = self.default_section opts = self.get_options(section, opt_keys=[name], vars=vars) if opts: return opts[name] else: if self._narrow_expect(expect): raise ValueError('no option \'{}\' found in section {}'. format(name, section))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_option_list(self, name, section=None, vars=None, expect=None, separator=','): """Just like ``get_option`` but parse as a list using ``split``. """
val = self.get_option(name, section, vars, expect) return val.split(separator) if val else []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_option_int(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as an integer."""
val = self.get_option(name, section, vars, expect) if val: return int(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_option_float(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as a float."""
val = self.get_option(name, section, vars, expect) if val: return float(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_option_path(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but return a ``pathlib.Path`` object of the string. """
val = self.get_option(name, section, vars, expect) return Path(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def property_get( prop, instance, **kwargs ): """Wrapper for property reads which auto-dereferences Refs if required. prop A Ref (which gets dereferenced and returned) or any other value (which gets returned). instance The context object used to dereference the Ref. """
if isinstance( prop, Ref ): return prop.get( instance, **kwargs ) return prop
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def property_set( prop, instance, value, **kwargs ): """Wrapper for property writes which auto-deferences Refs. prop A Ref (which gets dereferenced and the target value set). instance The context object used to dereference the Ref. value The value to set the property to. Throws AttributeError if prop is not a Ref. """
if isinstance( prop, Ref ): return prop.set( instance, value, **kwargs ) raise AttributeError( "can't change value of constant {} (context: {})".format( prop, instance ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get( self, instance, **kwargs ): """Return an attribute from an object using the Ref path. instance The object instance to traverse. """
target = instance for attr in self._path: target = getattr( target, attr ) return target
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set( self, instance, value, **kwargs ): """Set an attribute on an object using the Ref path. instance The object instance to traverse. value The value to set. Throws AttributeError if allow_write is False. """
if not self._allow_write: raise AttributeError( "can't set Ref directly, allow_write is disabled" ) target = instance for attr in self._path[:-1]: target = getattr( target, attr ) setattr( target, self._path[-1], value ) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def traverse_until_fixpoint(predicate, tree): """Traverses the tree again and again until it is not modified."""
old_tree = None tree = simplify(tree) while tree and old_tree != tree: old_tree = tree tree = tree.traverse(predicate) if not tree: return None tree = simplify(tree) return tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fasta(self): """ Create FASTA files of the PointFinder results to be fed into PointFinder """
logging.info('Extracting FASTA sequences matching PointFinder database') for sample in self.runmetadata.samples: # Ensure that there are sequence data to extract from the GenObject if GenObject.isattr(sample[self.analysistype], 'sequences'): # Set the name of the FASTA file sample[self.analysistype].pointfinderfasta = \ os.path.join(sample[self.analysistype].outputdir, '{seqid}_pointfinder.fasta'.format(seqid=sample.name)) # Create a list to store all the SeqRecords created sequences = list() with open(sample[self.analysistype].pointfinderfasta, 'w') as fasta: for gene, sequence in sample[self.analysistype].sequences.items(): # Create a SeqRecord using a Seq() of the sequence - both SeqRecord and Seq are from BioPython seq = SeqRecord(seq=Seq(sequence), id=gene, name=str(), description=str()) sequences.append(seq) # Write all the SeqRecords to file SeqIO.write(sequences, fasta, 'fasta')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_pointfinder(self): """ Run PointFinder on the FASTA sequences extracted from the raw reads """
logging.info('Running PointFinder on FASTA files') for i in range(len(self.runmetadata.samples)): # Start threads threads = Thread(target=self.pointfinder_threads, args=()) # Set the daemon to True - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() # PointFinder requires the path to the blastn executable blast_path = shutil.which('blastn') for sample in self.runmetadata.samples: # Ensure that the attribute storing the name of the FASTA file has been created if GenObject.isattr(sample[self.analysistype], 'pointfinderfasta'): sample[self.analysistype].pointfinder_outputs = os.path.join(sample[self.analysistype].outputdir, 'pointfinder_outputs') # Don't run the analyses if the outputs have already been created if not os.path.isfile(os.path.join(sample[self.analysistype].pointfinder_outputs, '{samplename}_blastn_results.tsv'.format(samplename=sample.name))): make_path(sample[self.analysistype].pointfinder_outputs) # Create and run the PointFinder system call pointfinder_cmd = \ 'python -m pointfinder.PointFinder -i {input} -s {species} -p {db_path} -m blastn ' \ '-o {output_dir} -m_p {blast_path}'\ .format(input=sample[self.analysistype].pointfinderfasta, species=sample[self.analysistype].pointfindergenus, db_path=self.targetpath, output_dir=sample[self.analysistype].pointfinder_outputs, blast_path=blast_path) self.queue.put(pointfinder_cmd) self.queue.join()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_pointfinder(self): """ Create summary reports for the PointFinder outputs """
# Create the nested dictionary that stores the necessary values for creating summary reports self.populate_summary_dict() # Clear out any previous reports for organism in self.summary_dict: for report in self.summary_dict[organism]: try: os.remove(self.summary_dict[organism][report]['summary']) except FileNotFoundError: pass for sample in self.runmetadata.samples: # Find the PointFinder outputs. If the outputs don't exist, create the appropriate entries in the # summary dictionary as required try: self.summary_dict[sample.general.referencegenus]['prediction']['output'] = \ glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*prediction.txt' .format(seq=sample.name)))[0] except IndexError: try: self.summary_dict[sample.general.referencegenus]['prediction']['output'] = str() except KeyError: self.populate_summary_dict(genus=sample.general.referencegenus, key='prediction') try: self.summary_dict[sample.general.referencegenus]['table']['output'] = \ glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*table.txt' .format(seq=sample.name)))[0] except IndexError: try: self.summary_dict[sample.general.referencegenus]['table']['output'] = str() except KeyError: self.populate_summary_dict(genus=sample.general.referencegenus, key='table') try: self.summary_dict[sample.general.referencegenus]['results']['output'] = \ glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*results.tsv' .format(seq=sample.name)))[0] except IndexError: try: self.summary_dict[sample.general.referencegenus]['results']['output'] = str() except KeyError: self.populate_summary_dict(genus=sample.general.referencegenus, key='results') # Process the predictions self.write_report(summary_dict=self.summary_dict, seqid=sample.name, genus=sample.general.referencegenus, key='prediction') # Process the results summary self.write_report(summary_dict=self.summary_dict, seqid=sample.name, genus=sample.general.referencegenus, key='results') # Process the table summary self.write_table_report(summary_dict=self.summary_dict, seqid=sample.name, genus=sample.general.referencegenus)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sequence_prep(self): """ Create metadata objects for all PacBio assembly FASTA files in the sequencepath. Create individual subdirectories for each sample. Relative symlink the original FASTA file to the appropriate subdirectory """
# Create a sorted list of all the FASTA files in the sequence path strains = sorted(glob(os.path.join(self.fastapath, '*.fa*'.format(self.fastapath)))) for sample in strains: # Create the object metadata = MetadataObject() # Set the sample name to be the file name of the sequence by removing the path and file extension sample_name = os.path.splitext(os.path.basename(sample))[0] if sample_name in self.strainset: # Extract the OLNID from the dictionary using the SEQID samplename = self.straindict[sample_name] # samplename = sample_name # Set and create the output directory outputdir = os.path.join(self.path, samplename) make_path(outputdir) # Set the name of the JSON file json_metadata = os.path.join(outputdir, '{name}.json'.format(name=samplename)) if not os.path.isfile(json_metadata): # Create the name and output directory attributes metadata.name = samplename metadata.seqid = sample_name metadata.outputdir = outputdir metadata.jsonfile = json_metadata # Set the name of the FASTA file to use in the analyses metadata.bestassemblyfile = os.path.join(metadata.outputdir, '{name}.fasta'.format(name=metadata.name)) # Symlink the original file to the output directory relative_symlink(sample, outputdir, '{sn}.fasta'.format(sn=metadata.name)) # Associate the corresponding FASTQ files with the assembly metadata.fastqfiles = sorted(glob(os.path.join(self.fastqpath, '{name}*.gz'.format(name=metadata.name)))) metadata.forward_fastq, metadata.reverse_fastq = metadata.fastqfiles # Write the object to file self.write_json(metadata) else: metadata = self.read_json(json_metadata) # Add the metadata object to the list of objects self.metadata.append(metadata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def assembly_length(self): """ Use SeqIO.parse to extract the total number of bases in each assembly file """
for sample in self.metadata: # Only determine the assembly length if is has not been previously calculated if not GenObject.isattr(sample, 'assembly_length'): # Create the assembly_length attribute, and set it to 0 sample.assembly_length = 0 for record in SeqIO.parse(sample.bestassemblyfile, 'fasta'): # Update the assembly_length attribute with the length of the current contig sample.assembly_length += len(record.seq) # Write the updated object to file self.write_json(sample)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample_reads(self): """ For each PacBio assembly, sample reads from corresponding FASTQ files for appropriate forward and reverse lengths and sequencing depths using reformat.sh from the bbtools suite """
logging.info('Read sampling') for sample in self.metadata: # Iterate through all the desired depths of coverage for depth in self.read_depths: for read_pair in self.read_lengths: # Set the name of the output directory sample.sampled_reads[depth][read_pair].sampled_outputdir \ = os.path.join(sample.sampled_reads[depth][read_pair].outputdir, 'sampled') # Set the name of the forward reads - include the depth and read length information sample.sampled_reads[depth][read_pair].forward_reads.fastq = \ os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir, '{name}_{depth}_{read_pair}_R1.fastq.gz' .format(name=sample.name, depth=depth, read_pair=read_pair)) # Reverse reads sample.sampled_reads[depth][read_pair].reverse_reads.fastq = \ os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir, '{name}_{depth}_{read_pair}_R2.fastq.gz' .format(name=sample.name, depth=depth, read_pair=read_pair)) logging.info( 'Sampling {num_reads} paired reads for sample {name} with the following parameters:\n' 'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp' .format(num_reads=sample.simulated_reads[depth][read_pair].num_reads, dp=depth, name=sample.name, fl=sample.sampled_reads[depth][read_pair].forward_reads.length, rl=sample.sampled_reads[depth][read_pair].reverse_reads.length)) # Use the reformat method in the OLCTools bbtools wrapper # Note that upsample=t is used to ensure that the target number of reads (samplereadstarget) is met if not os.path.isfile(sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq): out, \ err, \ sample.sampled_reads[depth][read_pair].sample_call = bbtools \ .reformat_reads(forward_in=sample.sampled_reads[depth][read_pair].trimmed_forwardfastq, reverse_in=sample.sampled_reads[depth][read_pair].trimmed_reversefastq, forward_out=sample.sampled_reads[depth][read_pair].forward_reads.fastq, reverse_out=sample.sampled_reads[depth][read_pair].reverse_reads.fastq, returncmd=True, **{'samplereadstarget': sample.simulated_reads[depth][read_pair].num_reads, 'upsample': 't', 'minlength': sample.sampled_reads[depth][read_pair].forward_reads.length, 'ziplevel': '9', 'tossbrokenreads': 't', 'tossjunk': 't', 'Xmx': self.mem } ) # # Remove the trimmed reads, as they are no longer necessary # try: # os.remove(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq) # os.remove(sample.sampled_reads[depth][read_pair].trimmed_reversefastq) # except FileNotFoundError: # pass # Update the JSON file self.write_json(sample)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_genesippr(self): """ Run GeneSippr on each of the samples """
from pathlib import Path home = str(Path.home()) logging.info('GeneSippr') # These unfortunate hard coded paths appear to be necessary miniconda_path = os.path.join(home, 'miniconda3') miniconda_path = miniconda_path if os.path.isdir(miniconda_path) else os.path.join(home, 'miniconda') logging.debug(miniconda_path) activate = 'source {mp}/bin/activate {mp}/envs/sipprverse'.format(mp=miniconda_path) sippr_path = '{mp}/envs/sipprverse/bin/sippr.py'.format(mp=miniconda_path) for sample in self.metadata: logging.info(sample.name) # Run the pipeline. Check to make sure that the serosippr report, which is created last doesn't exist if not os.path.isfile(os.path.join(sample.genesippr_dir, 'reports', 'genesippr.csv')): cmd = 'python {py_path} -o {outpath} -s {seqpath} -r {refpath} -F'\ .format(py_path=sippr_path, outpath=sample.genesippr_dir, seqpath=sample.genesippr_dir, refpath=self.referencefilepath ) logging.critical(cmd) # Create another shell script to execute within the PlasmidExtractor conda environment template = "#!/bin/bash\n{activate} && {cmd}".format(activate=activate, cmd=cmd) genesippr_script = os.path.join(sample.genesippr_dir, 'run_genesippr.sh') with open(genesippr_script, 'w+') as file: file.write(template) # Modify the permissions of the script to allow it to be run on the node self.make_executable(genesippr_script) # Run shell script os.system('/bin/bash {}'.format(genesippr_script))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_level(self, level): """ Set the logging level of this logger. :param level: must be an int or a str. """
for handler in self.__coloredlogs_handlers: handler.setLevel(level=level) self.logger.setLevel(level=level)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disable_logger(self, disabled=True): """ Disable all logging calls. """
# Disable standard IO streams if disabled: sys.stdout = _original_stdout sys.stderr = _original_stderr else: sys.stdout = self.__stdout_stream sys.stderr = self.__stderr_stream # Disable handlers self.logger.disabled = disabled
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def redirect_stdout(self, enabled=True, log_level=logging.INFO): """ Redirect sys.stdout to file-like object. """
if enabled: if self.__stdout_wrapper: self.__stdout_wrapper.update_log_level(log_level=log_level) else: self.__stdout_wrapper = StdOutWrapper(logger=self, log_level=log_level) self.__stdout_stream = self.__stdout_wrapper else: self.__stdout_stream = _original_stdout # Assign the new stream to sys.stdout sys.stdout = self.__stdout_stream
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def redirect_stderr(self, enabled=True, log_level=logging.ERROR): """ Redirect sys.stderr to file-like object. """
if enabled: if self.__stderr_wrapper: self.__stderr_wrapper.update_log_level(log_level=log_level) else: self.__stderr_wrapper = StdErrWrapper(logger=self, log_level=log_level) self.__stderr_stream = self.__stderr_wrapper else: self.__stderr_stream = _original_stderr # Assign the new stream to sys.stderr sys.stderr = self.__stderr_stream
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def use_file(self, enabled=True, file_name=None, level=logging.WARNING, when='d', interval=1, backup_count=30, delay=False, utc=False, at_time=None, log_format=None, date_format=None): """ Handler for logging to a file, rotating the log file at certain timed intervals. """
if enabled: if not self.__file_handler: assert file_name, 'File name is missing!' # Create new TimedRotatingFileHandler instance kwargs = { 'filename': file_name, 'when': when, 'interval': interval, 'backupCount': backup_count, 'encoding': 'UTF-8', 'delay': delay, 'utc': utc, } if sys.version_info[0] >= 3: kwargs['atTime'] = at_time self.__file_handler = TimedRotatingFileHandler(**kwargs) # Use this format for default case if not log_format: log_format = '%(asctime)s %(name)s[%(process)d] ' \ '%(programname)s/%(module)s/%(funcName)s[%(lineno)d] ' \ '%(levelname)s %(message)s' # Set formatter formatter = logging.Formatter(fmt=log_format, datefmt=date_format) self.__file_handler.setFormatter(fmt=formatter) # Set level for this handler self.__file_handler.setLevel(level=level) # Add this handler to logger self.add_handler(hdlr=self.__file_handler) elif self.__file_handler: # Remove handler from logger self.remove_handler(hdlr=self.__file_handler) self.__file_handler = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def use_loggly(self, enabled=True, loggly_token=None, loggly_tag=None, level=logging.WARNING, log_format=None, date_format=None): """ Enable handler for sending the record to Loggly service. """
if enabled: if not self.__loggly_handler: assert loggly_token, 'Loggly token is missing!' # Use logger name for default Loggly tag if not loggly_tag: loggly_tag = self.name # Create new LogglyHandler instance self.__loggly_handler = LogglyHandler(token=loggly_token, tag=loggly_tag) # Use this format for default case if not log_format: log_format = '{"name":"%(name)s","process":"%(process)d",' \ '"levelname":"%(levelname)s","time":"%(asctime)s",' \ '"filename":"%(filename)s","programname":"%(programname)s",' \ '"module":"%(module)s","funcName":"%(funcName)s",' \ '"lineno":"%(lineno)d","message":"%(message)s"}' # Set formatter formatter = logging.Formatter(fmt=log_format, datefmt=date_format) self.__loggly_handler.setFormatter(fmt=formatter) # Set level for this handler self.__loggly_handler.setLevel(level=level) # Add this handler to logger self.add_handler(hdlr=self.__loggly_handler) elif self.__loggly_handler: # Remove handler from logger self.remove_handler(hdlr=self.__loggly_handler) self.__loggly_handler = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _log(self, level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) """
if not isinstance(level, int): if logging.raiseExceptions: raise TypeError('Level must be an integer!') else: return if self.logger.isEnabledFor(level=level): """ Low-level logging routine which creates a LogRecord and then calls all the handlers of this logger to handle the record. """ exc_info = kwargs.get('exc_info', None) extra = kwargs.get('extra', None) stack_info = kwargs.get('stack_info', False) record_filter = kwargs.get('record_filter', None) tb_info = None if _logone_src: # IronPython doesn't track Python frames, so findCaller raises an # exception on some versions of IronPython. We trap it here so that # IronPython can use logging. try: fn, lno, func, tb_info = self.__find_caller(stack_info=stack_info) except ValueError: # pragma: no cover fn, lno, func = '(unknown file)', 0, '(unknown function)' else: # pragma: no cover fn, lno, func = '(unknown file)', 0, '(unknown function)' if exc_info: if sys.version_info[0] >= 3: if isinstance(exc_info, BaseException): # noinspection PyUnresolvedReferences exc_info = type(exc_info), exc_info, exc_info.__traceback__ elif not isinstance(exc_info, tuple): exc_info = sys.exc_info() else: if not isinstance(exc_info, tuple): exc_info = sys.exc_info() if sys.version_info[0] >= 3: # noinspection PyArgumentList record = self.logger.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra, tb_info) else: record = self.logger.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra) if record_filter: record = record_filter(record) self.logger.handle(record=record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flush(self): """ Flush the buffer, if applicable. """
if self.__buffer.tell() > 0: # Write the buffer to log # noinspection PyProtectedMember self.__logger._log(level=self.__log_level, msg=self.__buffer.getvalue().strip(), record_filter=StdErrWrapper.__filter_record) # Remove the old buffer self.__buffer.truncate(0) self.__buffer.seek(0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format(self, record): """Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params # to an empty string so we don't throw an exception if # they get used for key in ('instance', 'color'): if key not in record.__dict__: record.__dict__[key] = '' if record.__dict__.get('request_id', None): self._fmt = CONF.logging_context_format_string else: self._fmt = CONF.logging_default_format_string if (record.levelno == logging.DEBUG and CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix # Cache this on the record, Logger will respect our formated copy if record.exc_info: record.exc_text = self.formatException(record.exc_info, record) return logging.Formatter.format(self, record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def formatException(self, exc_info, record=None): """Format exception output with CONF.logging_exception_prefix."""
if not record: return logging.Formatter.formatException(self, exc_info) stringbuffer = cStringIO.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, stringbuffer) lines = stringbuffer.getvalue().split('\n') stringbuffer.close() if CONF.logging_exception_prefix.find('%(asctime)') != -1: record.asctime = self.formatTime(record, self.datefmt) formatted_lines = [] for line in lines: pl = CONF.logging_exception_prefix % record.__dict__ fl = '%s%s' % (pl, line) formatted_lines.append(fl) return '\n'.join(formatted_lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_interface(self, interface): """Add update toolbar callback to the interface"""
self.interface = interface self.interface.callbacks.update_toolbar = self._update self._update()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update(self): """Update the display of button after querying data from interface"""
self.clear() self._set_boutons_communs() if self.interface: self.addSeparator() l_actions = self.interface.get_actions_toolbar() self._set_boutons_interface(l_actions)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_response(self, status, content_type, response): """Shortcut for making a response to the client's request."""
headers = [('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Methods', 'GET, POST, OPTIONS'), ('Access-Control-Allow-Headers', 'Content-Type'), ('Access-Control-Max-Age', '86400'), ('Content-type', content_type) ] self.start_response(status, headers) return [response.encode()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_request(self, request): """Processes a request."""
try: request = Request.from_json(request.read().decode()) except ValueError: raise ClientError('Data is not valid JSON.') except KeyError: raise ClientError('Missing mandatory field in request object.') except AttributeNotProvided as exc: raise ClientError('Attribute not provided: %s.' % exc.args[0]) (start_wall_time, start_process_time) = self._get_times() answers = self.router_class(request).answer() self._add_times_to_answers(answers, start_wall_time, start_process_time) answers = [x.as_dict() for x in answers] return self.make_response('200 OK', 'application/json', json.dumps(answers) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def on_post(self): """Extracts the request, feeds the module, and returns the response."""
request = self.environ['wsgi.input'] try: return self.process_request(request) except ClientError as exc: return self.on_client_error(exc) except BadGateway as exc: return self.on_bad_gateway(exc) except InvalidConfig: raise except Exception as exc: # pragma: no cover # pylint: disable=W0703 logging.error('Unknown exception: ', exc_info=exc) return self.on_internal_error()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dispatch(self): """Handles dispatching of the request."""
method_name = 'on_' + self.environ['REQUEST_METHOD'].lower() method = getattr(self, method_name, None) if method: return method() else: return self.on_bad_method()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialised( self ): """Tuple containing the contents of the Block."""
klass = self.__class__ return ((klass.__module__, klass.__name__), tuple( (name, field.serialise( self._field_data[name], parent=self ) ) for name, field in klass._fields.items()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clone_data( self, source ): """Clone data from another Block. source Block instance to copy from. """
klass = self.__class__ assert isinstance( source, klass ) for name in klass._fields: self._field_data[name] = getattr( source, name )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_data( self, raw_buffer ): """Import data from a byte array. raw_buffer Byte array to import from. """
klass = self.__class__ if raw_buffer: assert common.is_bytes( raw_buffer ) # raw_buffer = memoryview( raw_buffer ) self._field_data = {} for name in klass._fields: if raw_buffer: self._field_data[name] = klass._fields[name].get_from_buffer( raw_buffer, parent=self ) else: self._field_data[name] = klass._fields[name].default if raw_buffer: for name, check in klass._checks.items(): check.check_buffer( raw_buffer, parent=self ) # if we have debug logging on, check the roundtrip works if logger.isEnabledFor( logging.INFO ): test = self.export_data() if logger.getEffectiveLevel() <= logging.DEBUG: logger.debug( 'Stats for {}:'.format( self ) ) logger.debug( 'Import buffer size: {}'.format( len( raw_buffer ) ) ) logger.debug( 'Export size: {}'.format( len( test ) ) ) if test == raw_buffer: logger.debug( 'Content: exact match!' ) elif test == raw_buffer[:len( test )]: logger.debug( 'Content: partial match!' ) else: logger.debug( 'Content: different!' ) for x in utils.hexdump_diff_iter( raw_buffer[:len( test )], test ): logger.debug( x ) elif test != raw_buffer[:len( test )]: logger.info( '{} export produced changed output from import'.format( self ) ) # if raw_buffer: # raw_buffer.release() return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def export_data( self ): """Export data to a byte array."""
klass = self.__class__ output = bytearray( b'\x00'*self.get_size() ) # prevalidate all data before export. # this is important to ensure that any dependent fields # are updated beforehand, e.g. a count referenced # in a BlockField queue = [] for name in klass._fields: self.scrub_field( name ) self.validate_field( name ) self.update_deps() for name in klass._fields: klass._fields[name].update_buffer_with_value( self._field_data[name], output, parent=self ) for name, check in klass._checks.items(): check.update_buffer( output, parent=self ) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_deps( self ): """Update dependencies on all the fields on this Block instance."""
klass = self.__class__ for name in klass._fields: self.update_deps_on_field( name ) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate( self ): """Validate all the fields on this Block instance."""
klass = self.__class__ for name in klass._fields: self.validate_field( name ) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, path, compressed=True, exist_ok=False): """ Save the GADDAG to file. Args: path: path to save the GADDAG to. compressed: compress the saved GADDAG using gzip. exist_ok: overwrite existing file at `path`. """
path = os.path.expandvars(os.path.expanduser(path)) if os.path.isfile(path) and not exist_ok: raise OSError(17, os.strerror(17), path) if os.path.isdir(path): path = os.path.join(path, "out.gdg") if compressed: bytes_written = cgaddag.gdg_save_compressed(self.gdg, path.encode("ascii")) else: bytes_written = cgaddag.gdg_save(self.gdg, path.encode("ascii")) if bytes_written == -1: errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value raise OSError(errno, os.strerror(errno), path) return bytes_written
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, path): """ Load a GADDAG from file, replacing the words currently in this GADDAG. Args: path: path to saved GADDAG to be loaded. """
path = os.path.expandvars(os.path.expanduser(path)) gdg = cgaddag.gdg_load(path.encode("ascii")) if not gdg: errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value raise OSError(errno, os.strerror(errno), path) self.__del__() self.gdg = gdg.contents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def starts_with(self, prefix): """ Find all words starting with a prefix. Args: prefix: A prefix to be searched for. Returns: A list of all words found. """
prefix = prefix.lower() found_words = [] res = cgaddag.gdg_starts_with(self.gdg, prefix.encode(encoding="ascii")) tmp = res while tmp: word = tmp.contents.str.decode("ascii") found_words.append(word) tmp = tmp.contents.next cgaddag.gdg_destroy_result(res) return found_words
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def contains(self, sub): """ Find all words containing a substring. Args: sub: A substring to be searched for. Returns: A list of all words found. """
sub = sub.lower() found_words = set() res = cgaddag.gdg_contains(self.gdg, sub.encode(encoding="ascii")) tmp = res while tmp: word = tmp.contents.str.decode("ascii") found_words.add(word) tmp = tmp.contents.next cgaddag.gdg_destroy_result(res) return list(found_words)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ends_with(self, suffix): """ Find all words ending with a suffix. Args: suffix: A suffix to be searched for. Returns: A list of all words found. """
suffix = suffix.lower() found_words = [] res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding="ascii")) tmp = res while tmp: word = tmp.contents.str.decode("ascii") found_words.append(word) tmp = tmp.contents.next cgaddag.gdg_destroy_result(res) return found_words
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_word(self, word): """ Add a word to the GADDAG. Args: word: A word to be added to the GADDAG. """
word = word.lower() if not (word.isascii() and word.isalpha()): raise ValueError("Invalid character in word '{}'".format(word)) word = word.encode(encoding="ascii") result = cgaddag.gdg_add_word(self.gdg, word) if result == 1: raise ValueError("Invalid character in word '{}'".format(word)) elif result == 2: raise MemoryError("Out of memory, GADDAG is in an undefined state")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def formatLog(source="", level="", title="", data={}): """ Similar to format, but takes additional reserved params to promote logging best-practices :param level - severity of message - how bad is it? :param source - application context - where did it come from? :param title - brief description - what kind of event happened? :param data - additional information - what details help to investigate? """
# consistently output empty string for unset params, because null values differ by language source = "" if source is None else source level = "" if level is None else level title = "" if title is None else title if not type(data) is dict: data = {} data['source'] = source data['level'] = level data['title'] = title return format(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _str_to_list(value, separator): """Convert a string to a list with sanitization."""
value_list = [item.strip() for item in value.split(separator)] value_list_sanitized = builtins.list(filter(None, value_list)) if len(value_list_sanitized) > 0: return value_list_sanitized else: raise ValueError('Invalid list variable.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(name, value): """Write a raw env value. A ``None`` value clears the environment variable. Args: name: The environment variable name value: The value to write """
if value is not None: environ[name] = builtins.str(value) elif environ.get(name): del environ[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(name, default=None, allow_none=False, fallback=None): """Read the raw env value. Read the raw environment variable or use the default. If the value is not found and no default is set throw an exception. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional) fallback: A list of fallback env variables to try and read if the primary environment variable is unavailable. """
raw_value = environ.get(name) if raw_value is None and fallback is not None: if not isinstance(fallback, builtins.list) and not isinstance(fallback, builtins.tuple): fallback = [fallback] for fall in fallback: raw_value = environ.get(fall) if raw_value is not None: break if raw_value or raw_value == '': return raw_value elif default is not None or allow_none: return default else: raise KeyError('Set the "{0}" environment variable'.format(name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def str(name, default=None, allow_none=False, fallback=None): """Get a string based environment value or the default. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional) """
value = read(name, default, allow_none, fallback=fallback) if value is None and allow_none: return None else: return builtins.str(value).strip()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bool(name, default=None, allow_none=False, fallback=None): """Get a boolean based environment value or the default. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional) """
value = read(name, default, allow_none, fallback=fallback) if isinstance(value, builtins.bool): return value elif isinstance(value, builtins.int): return True if value > 0 else False elif value is None and allow_none: return None else: value_str = builtins.str(value).lower().strip() return _strtobool(value_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def int(name, default=None, allow_none=False, fallback=None): """Get a string environment value or the default. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional) """
value = read(name, default, allow_none, fallback=fallback) if isinstance(value, builtins.str): value = value.strip() if value is None and allow_none: return None else: return builtins.int(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list(name, default=None, allow_none=False, fallback=None, separator=','): """Get a list of strings or the default. The individual list elements are whitespace-stripped. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional) separator: The list item separator character or pattern """
value = read(name, default, allow_none, fallback=fallback) if isinstance(value, builtins.list): return value elif isinstance(value, builtins.str): return _str_to_list(value, separator) elif value is None and allow_none: return None else: return [builtins.str(value)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def includeme(config): """this function adds some configuration for the application"""
config.add_route('references', '/references') _add_referencer(config.registry) config.add_view_deriver(protected_resources.protected_view) config.add_renderer('json_item', json_renderer) config.scan()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_referencer(registry): """ Gets the Referencer from config and adds it to the registry. """
referencer = registry.queryUtility(IReferencer) if referencer is not None: return referencer ref = registry.settings['urireferencer.referencer'] url = registry.settings['urireferencer.registry_url'] r = DottedNameResolver() registry.registerUtility(r.resolve(ref)(url), IReferencer) return registry.queryUtility(IReferencer)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_referencer(registry): """ Get the referencer class :rtype: pyramid_urireferencer.referencer.AbstractReferencer """
# Argument might be a config or request regis = getattr(registry, 'registry', None) if regis is None: regis = registry return regis.queryUtility(IReferencer)