text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_manifest(self, location): """ An iterator through the files in a location which yields item elements suitable for insertion into the package manifest. """
#Maps file extensions to mimetypes mimetypes = {'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.xml': 'application/xhtml+xml', '.png': 'image/png', '.css': 'text/css', '.ncx': 'application/x-dtbncx+xml', '.gif': 'image/gif', '.tif': 'image/tif', '.pdf': 'application/pdf', '.xhtml': 'application/xhtml+xml', '.ttf': 'application/vnd.ms-opentype', '.otf': 'application/vnd.ms-opentype'} current_dir = os.getcwd() os.chdir(location) for dirpath, _dirnames, filenames in os.walk('.'): dirpath = dirpath[2:] # A means to avoid dirpath prefix of './' for fn in filenames: fn_ext = os.path.splitext(fn)[-1] item = etree.Element('item') #Here we set three attributes: href, media-type, and id if not dirpath: item.attrib['href'] = fn else: item.attrib['href'] = '/'.join([dirpath, fn]) item.attrib['media-type'] = mimetypes[fn_ext] #Special handling for common image types if fn_ext in ['.jpg', '.png', '.tif', '.jpeg']: #the following lines assume we are using the convention #where the article doi is prefixed by 'images-' item.attrib['id'] = '-'.join([dirpath[7:], fn.replace('.', '-')]) else: item.attrib['id'] = fn.replace('.', '-') yield item os.chdir(current_dir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_contrib_names(self, contrib): """ Returns an appropriate Name and File-As-Name for a contrib element. This code was refactored out of nav_contributors and package_contributors to provide a single definition point for a common job. This is a useful utility that may be well-employed for other publishers as well. """
collab = contrib.find('collab') anon = contrib.find('anonymous') if collab is not None: proper_name = serialize(collab, strip=True) file_as_name = proper_name elif anon is not None: proper_name = 'Anonymous' file_as_name = proper_name else: name = contrib.find('name') surname = name.find('surname').text given = name.find('given-names') if given is not None: if given.text: # Sometimes these tags are empty proper_name = ' '.join([surname, given.text]) #File-as name is <surname>, <given-initial-char> file_as_name = ', '.join([surname, given.text[0]]) else: proper_name = surname file_as_name = proper_name else: proper_name = surname file_as_name = proper_name return proper_name, file_as_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def package_description(self): """ Given an Article class instance, this is responsible for returning an article description. For this method I have taken the approach of serializing the article's first abstract, if it has one. This results in 0 or 1 descriptions per article. """
abstract = self.article.root.xpath('./front/article-meta/abstract') return serialize(abstract[0], strip=True) if abstract else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def heading_title(self): """ Makes the Article Title for the Heading. Metadata element, content derived from FrontMatter """
art_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0] article_title = deepcopy(art_title) article_title.tag = 'h1' article_title.attrib['id'] = 'title' article_title.attrib['class'] = 'article-title' return article_title
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_heading_authors(self, authors): """ Constructs the Authors content for the Heading. This should display directly after the Article Title. Metadata element, content derived from FrontMatter """
author_element = etree.Element('h3', {'class': 'authors'}) #Construct content for the author element first = True for author in authors: if first: first = False else: append_new_text(author_element, ',', join_str='') collab = author.find('collab') anon = author.find('anon') if collab is not None: append_all_below(author_element, collab) elif anon is not None: # If anonymous, just add "Anonymous" append_new_text(author_element, 'Anonymous') else: # Author is neither Anonymous or a Collaboration author_name, _ = self.get_contrib_names(author) append_new_text(author_element, author_name) #TODO: Handle author footnote references, also put footnotes in the ArticleInfo #Example: journal.pbio.0040370.xml first = True for xref in author.xpath("./xref[@ref-type='corresp' or @ref-type='aff']"): _sup = xref.find('sup') sup_text = all_text(_sup) if _sup is not None else '' auth_sup = etree.SubElement(author_element, 'sup') sup_link = etree.SubElement(auth_sup, 'a', {'href': self.main_fragment.format(xref.attrib['rid'])}) sup_link.text = sup_text if first: first = False else: append_new_text(auth_sup, ', ', join_str='') #for xref in author.findall('xref'): #if xref.attrs['ref-type'] in ['corresp', 'aff']: #try: #sup_element = xref.sup[0].node #except IndexError: #sup_text = '' #else: #sup_text = all_text(sup_element) #new_sup = etree.SubElement(author_element, 'sup') #sup_link = etree.SubElement(new_sup, 'a') #sup_link.attrib['href'] = self.main_fragment.format(xref.attrs['rid']) #sup_link.text = sup_text #if first: #first = False #else: #new_sup.text = ',' return author_element
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_heading_affiliations(self, heading_div): """ Makes the content for the Author Affiliations, displays after the Authors segment in the Heading. Metadata element, content derived from FrontMatter """
#Get all of the aff element tuples from the metadata affs = self.article.root.xpath('./front/article-meta/aff') #Create a list of all those pertaining to the authors author_affs = [i for i in affs if 'aff' in i.attrib['id']] #Count them, used for formatting if len(author_affs) == 0: return None else: affs_list = etree.SubElement(heading_div, 'ul', {'id': 'affiliations', 'class': 'simple'}) for aff in author_affs: #Create a span element to accept extracted content aff_item = etree.SubElement(affs_list, 'li') aff_item.attrib['id'] = aff.attrib['id'] #Get the first label node and the first addr-line node label = aff.find('label') addr_line = aff.find('addr-line') if label is not None: bold = etree.SubElement(aff_item, 'b') bold.text = all_text(label) + ' ' if addr_line is not None: append_new_text(aff_item, all_text(addr_line)) else: append_new_text(aff_item, all_text(aff))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_heading_abstracts(self, heading_div): """ An article may contain data for various kinds of abstracts. This method works on those that are included in the Heading. This is displayed after the Authors and Affiliations. Metadata element, content derived from FrontMatter """
for abstract in self.article.root.xpath('./front/article-meta/abstract'): #Make a copy of the abstract abstract_copy = deepcopy(abstract) abstract_copy.tag = 'div' #Abstracts are a rather diverse bunch, keep an eye on them! title_text = abstract_copy.xpath('./title[1]/text()') for title in abstract_copy.findall('.//title'): remove(title) #Create a header for the abstract abstract_header = etree.Element('h2') remove_all_attributes(abstract_copy) #Set the header text and abstract id according to abstract type abstract_type = abstract.attrib.get('abstract-type') log.debug('Handling Abstrace of with abstract-type="{0}"'.format(abstract_type)) if abstract_type == 'summary': abstract_header.text = 'Author Summary' abstract_copy.attrib['id'] = 'author-summary' elif abstract_type == 'editors-summary': abstract_header.text = 'Editors\' Summary' abstract_copy.attrib['id'] = 'editor-summary' elif abstract_type == 'synopsis': abstract_header.text = 'Synopsis' abstract_copy.attrib['id'] = 'synopsis' elif abstract_type == 'alternate': #Right now, these will only be included if there is a title to #give it if title_text: abstract_header.text= title_text[0] abstract_copy.attrib['id'] = 'alternate' else: continue elif abstract_type is None: abstract_header.text = 'Abstract' abstract_copy.attrib['id'] = 'abstract' elif abstract_type == 'toc': # We don't include these continue else: # Warn about these, then skip log.warning('No handling for abstract-type="{0}"'.format(abstract_type)) continue #abstract_header.text = abstract_type #abstract_copy.attrib['id'] = abstract_type heading_div.append(abstract_header) heading_div.append(abstract_copy)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_article_info_funding(self, article_info_div): """ Creates the element for declaring Funding in the article info. """
funding_group = self.article.root.xpath('./front/article-meta/funding-group') if funding_group: funding_div = etree.SubElement(article_info_div, 'div', {'id': 'funding'}) funding_b = etree.SubElement(funding_div, 'b') funding_b.text = 'Funding: ' #As far as I can tell, PLoS only uses one funding-statement funding_statement = funding_group[0].find('funding-statement') append_all_below(funding_div, funding_statement)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_article_info_competing_interests(self, article_info_div): """ Creates the element for declaring competing interests in the article info. """
#Check for author-notes con_expr = "./front/article-meta/author-notes/fn[@fn-type='conflict']" conflict = self.article.root.xpath(con_expr) if not conflict: return conflict_div = etree.SubElement(article_info_div, 'div', {'id': 'conflict'}) b = etree.SubElement(conflict_div, 'b') b.text = 'Competing Interests: ' fn_p = conflict[0].find('p') if fn_p is not None: append_all_below(conflict_div, fn_p)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_article_info_correspondences(self, article_info_div): """ Articles generally provide a first contact, typically an email address for one of the authors. This will supply that content. """
corresps = self.article.root.xpath('./front/article-meta/author-notes/corresp') if corresps: corresp_div = etree.SubElement(article_info_div, 'div', {'id': 'correspondence'}) for corresp in corresps: sub_div = etree.SubElement(corresp_div, 'div', {'id': corresp.attrib['id']}) append_all_below(sub_div, corresp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_back_glossary(self, body): """ Glossaries are a fairly common item in papers for PLoS, but it also seems that they are rarely incorporated into the PLoS web-site or PDF formats. They are included in the ePub output however because they are helpful and because we can. """
for glossary in self.article.root.xpath('./back/glossary'): gloss_copy = deepcopy(glossary) gloss_copy.tag = 'div' gloss_copy.attrib['class'] = 'back-glossary' body.append(gloss_copy)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_disp_quote_elements(self): """ Extract or extended quoted passage from another work, usually made typographically distinct from surrounding text <disp-quote> elements have a relatively complex content model, but PLoS appears to employ either <p>s or <list>s. """
for disp_quote in self.main.getroot().findall('.//disp-quote'): if disp_quote.getparent().tag == 'p': elevate_element(disp_quote) disp_quote.tag = 'div' disp_quote.attrib['class'] = 'disp-quote'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_single_representation(self, item_xlink_href): """ This function will render a formatted URL for accessing the PLoS' server SingleRepresentation of an object. """
#A dict of URLs for PLoS subjournals journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}', 'pcbi': 'http://www.ploscompbiol.org/article/{0}', 'ppat': 'http://www.plospathogens.org/article/{0}', 'pntd': 'http://www.plosntds.org/article/{0}', 'pmed': 'http://www.plosmedicine.org/article/{0}', 'pbio': 'http://www.plosbiology.org/article/{0}', 'pone': 'http://www.plosone.org/article/{0}', 'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'} #Identify subjournal name for base URl subjournal_name = self.article.doi.split('.')[2] base_url = journal_urls[subjournal_name] #Compose the address for fetchSingleRepresentation resource = 'fetchSingleRepresentation.action?uri=' + item_xlink_href return base_url.format(resource)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_verse_group_elements(self): """ A song, poem, or verse Implementor’s Note: No attempt has been made to retain the look or visual form of the original poetry. This unusual element, <verse-group> is used to convey poetry and is recursive in nature (it may contain further <verse-group> elements). Examples of these tags are sparse, so it remains difficult to ensure full implementation. This method will attempt to handle the label, title, and subtitle elements correctly, while converting <verse-lines> to italicized lines. """
for verse_group in self.main.getroot().findall('.//verse-group'): #Find some possible sub elements for the heading label = verse_group.find('label') title = verse_group.find('title') subtitle = verse_group.find('subtitle') #Modify the verse-group element verse_group.tag = 'div' verse_group.attrib['id'] = 'verse-group' #Create a title for the verse_group if label is not None or title is not None or subtitle is not None: new_verse_title = etree.Element('b') #Insert it at the beginning verse_group.insert(0, new_verse_title) #Induct the title elements into the new title if label is not None: append_all_below(new_verse_title, label) remove(label) if title is not None: append_all_below(new_verse_title, title) remove(title) if subtitle is not None: append_all_below(new_verse_title, subtitle) remove(subtitle) for verse_line in verse_group.findall('verse-line'): verse_line.tag = 'p' verse_line.attrib['class'] = 'verse-line'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_list_elements(self): """ A sequence of two or more items, which may or may not be ordered. The <list> element has an optional <label> element and optional <title> element, followed by one or more <list-item> elements. This is element is recursive as the <list-item> elements may contain further <list> or <def-list> elements. Much of the potential complexity in dealing with lists comes from this recursion. """
#I have yet to gather many examples of this element, and may have to #write a recursive method for the processing of lists depending on how #PLoS produces their XML, for now this method is ignorant of nesting #TODO: prefix-words, one possible solution would be to have this method #edit the CSS to provide formatting support for arbitrary prefixes... #This is a block level element, so elevate it if found in p for list_el in self.main.getroot().findall('.//list'): if list_el.getparent().tag == 'p': elevate_element(list_el) #list_el is used instead of list (list is reserved) for list_el in self.main.getroot().findall('.//list'): if 'list-type' not in list_el.attrib: list_el_type = 'order' else: list_el_type = list_el.attrib['list-type'] #Unordered lists if list_el_type in ['', 'bullet', 'simple']: list_el.tag = 'ul' #CSS must be used to recognize the class and suppress bullets if list_el_type == 'simple': list_el.attrib['class'] = 'simple' #Ordered lists else: list_el.tag = 'ol' list_el.attrib['class'] = list_el_type #Convert the list-item element tags to 'li' for list_item in list_el.findall('list-item'): list_item.tag = 'li' remove_all_attributes(list_el, exclude=['id', 'class'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def absolute_path(user_path): """ Some paths must be made absolute, this will attempt to convert them. """
if os.path.abspath(user_path): return unix_path_coercion(user_path) else: try: openaccess_epub.utils.evaluate_relative_path(relative=user_path) except: raise ValidationError('This path could not be rendered as absolute')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encloses(self, location: FileLocation ) -> Optional[FunctionDesc]: """ Returns the function, if any, that encloses a given location. """
for func in self.in_file(location.filename): if location in func.location: return func return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def in_file(self, filename: str) -> Iterator[FunctionDesc]: """ Returns an iterator over all of the functions definitions that are contained within a given file. """
yield from self.__filename_to_functions.get(filename, [])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_config(args): """ Try to load config, to load other journal locations Otherwise, return default location Returns journal location """
# Try user config or return default location early config_path = path.expanduser(args.config_file) if not path.exists(config_path): # Complain if they provided non-existant config if args.config_file != DEFAULT_JOURNAL_RC: print("journal: error: config file '" + args.config_file + "' not found") sys.exit() else: # If no config file, use default journal location return DEFAULT_JOURNAL # If we get here, assume valid config file config = ConfigParser.SafeConfigParser({ 'journal':{'default':'__journal'}, '__journal':{'location':DEFAULT_JOURNAL} }) config.read(config_path) journal_location = config.get(config.get('journal', 'default'), 'location'); if args.journal: journal_location = config.get(args.journal, 'location'); return journal_location
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def record_entries(journal_location, entries): """ args entry - list of entries to record """
check_journal_dest(journal_location) current_date = datetime.datetime.today() date_header = current_date.strftime("%a %H:%M:%S %Y-%m-%d") + "\n" with open(build_journal_path(journal_location, current_date), "a") as date_file: entry_output = date_header # old style # for entry in entries: # entry_output += "-" + entry + "\n" # new style entry_output += '-' + ' '.join(entries) + "\n" entry_output += "\n" date_file.write(entry_output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_entry(journal_location, date): """ args date - date object returns entry text or None if entry doesn't exist """
if not isinstance(date, datetime.date): return None try: with open(build_journal_path(journal_location, date), "r") as entry_file: return entry_file.read() except IOError: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace( fname1, fname2, dfilter1, dfilter2, has_header1=True, has_header2=True, frow1=0, frow2=0, ofname=None, ocols=None, ): r""" Replace data in one file with data from another file. :param fname1: Name of the input comma-separated values file, the file that contains the columns to be replaced :type fname1: FileNameExists_ :param fname2: Name of the replacement comma-separated values file, the file that contains the replacement data :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the input file :type dfilter1: :ref:`CsvDataFilter` :param dfilter2: Row and/or column filter for the replacement file :type dfilter2: :ref:`CsvDataFilter` :param has_header1: Flag that indicates whether the input comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the replacement comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: Input comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Replacement comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the input file data but with some columns replaced with data from the replacement file. If None the input file is replaced "in place" :type ofname: FileName_ :param ocols: Names of the replaced columns in the output comma-separated values file. If None the column names in the input file are used if **has_header1** is True, otherwise no header is used :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.replace.replace :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * RuntimeError (Number of input and output columns are different) * RuntimeError (Number of input and replacement columns are different) * ValueError (Column *[column_identifier]* not found) * ValueError (Number of rows mismatch between input and replacement data) .. [[[end]]] """
# pylint: disable=R0913,R0914 irmm_ex = pexdoc.exh.addex( RuntimeError, "Number of input and replacement columns are different" ) iomm_ex = pexdoc.exh.addex( RuntimeError, "Number of input and output columns are different" ) # Read and validate input data iobj = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1) # Read and validate replacement data robj = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2) # Assign output data structure ofname = fname1 if ofname is None else ofname icfilter = iobj.header() if iobj.cfilter is None else iobj.cfilter rcfilter = robj.header() if robj.cfilter is None else robj.cfilter ocols = icfilter if ocols is None else ocols # Miscellaneous data validation irmm_ex(len(icfilter) != len(rcfilter)) iomm_ex(len(icfilter) != len(ocols)) # Replace data iobj.replace(rdata=robj.data(filtered=True), filtered=True) iheader_upper = [ item.upper() if isinstance(item, str) else item for item in iobj.header() ] icfilter_index = [ iheader_upper.index(item.upper() if isinstance(item, str) else item) for item in icfilter ] # Create new header orow = [] if has_header1: for col_num, idata in enumerate(iobj.header()): orow.append( ocols[icfilter_index.index(col_num)] if col_num in icfilter_index else idata ) # Write (new) file iobj.write(fname=ofname, header=orow if orow else False, append=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6): """Normalized SPM HRF function from sum of two gamma PDFs Parameters t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF function as used in SPM. It has the following defaults: - delay of response (relative to onset) : 6s - delay of undershoot (relative to onset) : 16s - dispersion of response : 1s - dispersion of undershoot : 1s - ratio of response to undershoot : 6s - onset : 0s - length of kernel : 32s References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation """
return spm_hrf_compat(t, peak_delay=peak_delay, under_delay=under_delay, peak_disp=peak_disp, under_disp=under_disp, p_u_ratio=p_u_ratio, normalize=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ddspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6): """ SPM canonical HRF dispersion derivative, values for time values `t` Parameters t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF dispersion derivative function as used in SPM [2] It is the numerical difference between the HRF sampled at time `t`, and values at `t` for another HRF shape with a small change in the peak dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`). References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation """
_spm_dd_func = partial(spmt, peak_delay=peak_delay, under_delay=under_delay, under_disp=under_disp, p_u_ratio=p_u_ratio, peak_disp=1.01) return (spmt(t) - _spm_dd_func(t)) / 0.01
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol, aryExclCnd=None, varTmpOvsmpl=1000.): """ Creation of condition time courses in temporally upsampled space. Parameters aryCnd : np.array 1D array with condition identifiers (every condition has its own int) aryOns : np.array, same len as aryCnd 1D array with condition onset times in seconds. aryDrt : np.array, same len as aryCnd 1D array with condition durations of different conditions in seconds. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varNumVol : float, positive Number of volumes of the (fMRI) data. aryExclCnd : array 1D array containing condition identifiers for conditions to be excluded varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. Returns ------- aryBxCrOut : np.array, float16 Condition time courses in temporally upsampled space. References: ----- [1] https://github.com/fabianp/hrf_estimation """
if aryExclCnd is not None: for cond in aryExclCnd: aryOns = aryOns[aryCnd != cond] aryDrt = aryDrt[aryCnd != cond] aryCnd = aryCnd[aryCnd != cond] resolution = varTr / float(varTmpOvsmpl) aryCnd = np.asarray(aryCnd) aryOns = np.asarray(aryOns, dtype=np.float) unique_conditions = np.sort(np.unique(aryCnd)) boxcar = [] for c in unique_conditions: tmp = np.zeros(int(varNumVol * varTr/resolution)) onset_c = aryOns[aryCnd == c] duration_c = aryDrt[aryCnd == c] onset_idx = np.round(onset_c / resolution).astype(np.int) duration_idx = np.round(duration_c / resolution).astype(np.int) aux = np.arange(int(varNumVol * varTr/resolution)) for start, dur in zip(onset_idx, duration_idx): lgc = np.logical_and(aux >= start, aux < start + dur) tmp = tmp + lgc assert np.all(np.less(tmp, 2)) boxcar.append(tmp) aryBxCrOut = np.array(boxcar).T if aryBxCrOut.shape[1] == 1: aryBxCrOut = np.squeeze(aryBxCrOut) return aryBxCrOut.astype('float16')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_inputs_to_reference(job_data, input_files, input_directories): """ Creates a dictionary with the summarized information in job_data, input_files and input_directories :param job_data: The job data specifying input parameters other than files and directories. :param input_files: A dictionary describing the input files. :param input_directories: A dictionary describing the input directories. :return: A summarized dictionary containing information about all given inputs. """
return {**deepcopy(job_data), **deepcopy(input_files), **deepcopy(input_directories)}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split_all(reference, sep): """ Splits a given string at a given separator or list of separators. :param reference: The reference to split. :param sep: Separator string or list of separator strings. :return: A list of split strings """
parts = partition_all(reference, sep) return [p for p in parts if p not in sep]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_file(attributes, input_file, input_identifier, input_reference): """ Returns the attributes in demand of the input file. :param attributes: A list of attributes to get from the input_file. :param input_file: The file from which to get the attributes. :param input_identifier: The input identifier of the given file. :param input_reference: The reference string :return: The attribute in demand """
if input_file['isArray']: raise InvalidInputReference('Input References to Arrays of input files are currently not supported.\n' '"{}" is an array of files and can not be resolved for input references:' '\n{}'.format(input_identifier, input_reference)) single_file = input_file['files'][0] try: return _get_dict_element(single_file, attributes) except KeyError: raise InvalidInputReference('Could not get attributes "{}" from input file "{}", needed in input reference:' '\n{}'.format(attributes, input_identifier, input_reference))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_directory(attributes, input_directory, input_identifier, input_reference): """ Returns the attributes in demand of the input directory. :param attributes: A list of attributes to get from the input directory. :param input_directory: The directory from which to get the attributes. :param input_identifier: The input identifier of the given directory. :param input_reference: The reference string :return: The attribute in demand """
if input_directory['isArray']: raise InvalidInputReference('Input References to Arrays of input directories are currently not supported.\n' 'input directory "{}" is an array of directories and can not be resolved for input' 'references:\n{}'.format(input_identifier, input_reference)) single_directory = input_directory['directories'][0] try: return _get_dict_element(single_directory, attributes) except KeyError: raise InvalidInputReference('Could not get attributes "{}" from input directory "{}", needed in input' 'reference:\n{}'.format(attributes, input_identifier, input_reference))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolve_input_reference(reference, inputs_to_reference): """ Replaces a given input_reference by a string extracted from inputs_to_reference. :param reference: The input reference to resolve. :param inputs_to_reference: A dictionary containing information about the given inputs. :raise InvalidInputReference: If the given input reference could not be resolved. :return: A string which is the resolved input reference. """
if not reference.startswith('{}inputs.'.format(INPUT_REFERENCE_START)): raise InvalidInputReference('An input reference must have the following form' '"$(inputs.<input_name>[.<attribute>]".\n' 'The invalid reference is: "{}"'.format(reference)) # remove "$(inputs." and ")" reference = reference[2:-1] parts = split_all(reference, ATTRIBUTE_SEPARATOR_SYMBOLS) if len(parts) < 2: raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input' 'reference does not comply with it:\n{}'.format(reference)) elif parts[0] != "inputs": raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input' ' reference does not comply with it:\n$({})'.format(reference)) else: input_identifier = parts[1] input_to_reference = inputs_to_reference.get(input_identifier) if input_to_reference is None: raise InvalidInputReference('Input identifier "{}" not found in inputs, but needed in input reference:\n{}' .format(input_identifier, reference)) elif isinstance(input_to_reference, dict): if 'files' in input_to_reference: return _resolve_file(parts[2:], input_to_reference, input_identifier, reference) elif 'directories' in input_to_reference: return _resolve_directory(parts[2:], input_to_reference, input_identifier, reference) else: raise InvalidInputReference('Unknown input type for input identifier "{}"'.format(input_identifier)) else: if len(parts) > 2: raise InvalidInputReference('Attribute "{}" of input reference "{}" could not be resolved' .format(parts[2], reference)) else: return parts[1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolve_input_references(to_resolve, inputs_to_reference): """ Resolves input references given in the string to_resolve by using the inputs_to_reference. See http://www.commonwl.org/user_guide/06-params/index.html for more information. Example: "$(inputs.my_file.nameroot).md" -> "filename.md" :param to_resolve: The path to match :param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename). :return: A string in which the input references are replaced with actual values. """
splitted = split_input_references(to_resolve) result = [] for part in splitted: if is_input_reference(part): result.append(str(resolve_input_reference(part, inputs_to_reference))) else: result.append(part) return ''.join(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def TemplateValidator(value): """Try to compile a string into a Django template"""
try: Template(value) except Exception as e: raise ValidationError( _("Cannot compile template (%(exception)s)"), params={"exception": e} )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge( fname1, fname2, dfilter1=None, dfilter2=None, has_header1=True, has_header2=True, frow1=0, frow2=0, ofname=None, ocols=None, ): r""" Merge two comma-separated values files. Data columns from the second file are appended after data columns from the first file. Empty values in columns are used if the files have different number of rows :param fname1: Name of the first comma-separated values file, the file whose columns appear first in the output file :type fname1: FileNameExists_ :param fname2: Name of the second comma-separated values file, the file whose columns appear last in the output file :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the first file. If None no data filtering is done on the file :type dfilter1: :ref:`CsvDataFilter` or None :param dfilter2: Row and/or column filter for the second file. If None no data filtering is done on the file :type dfilter2: :ref:`CsvDataFilter` or None :param has_header1: Flag that indicates whether the first comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the second comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: First comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Second comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the data from the first and second files. If None the first file is replaced "in place" :type ofname: FileName_ or None :param ocols: Column names of the output comma-separated values file. If None the column names in the first and second files are used if **has_header1** and/or **has_header2** are True. The column labels :code:`'Column [column_number]'` are used when one of the two files does not have a header, where :code:`[column_number]` is an integer representing the column number (column 0 is the leftmost column). No header is used if **has_header1** and **has_header2** are False :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.merge.merge :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (Combined columns in data files and output columns are different) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """
# pylint: disable=R0913,R0914 iomm_ex = pexdoc.exh.addex( RuntimeError, "Combined columns in data files and output columns are different" ) # Read and validate file 1 obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1) # Read and validate file 2 obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2) # Assign output data structure ofname = fname1 if ofname is None else ofname cfilter1 = obj1.header() if obj1.cfilter is None else obj1.cfilter cfilter2 = obj2.header() if obj1.cfilter is None else obj2.cfilter # Create new header cols1 = len(cfilter1) cols2 = len(cfilter2) if (ocols is None) and has_header1 and has_header2: ocols = [cfilter1 + cfilter2] elif (ocols is None) and has_header1 and (not has_header2): ocols = [ cfilter1 + [ "Column {0}".format(item) for item in range(cols1 + 1, cols1 + cols2 + 1) ] ] elif (ocols is None) and (not has_header1) and has_header2: ocols = [["Column {0}".format(item) for item in range(1, cols1 + 1)] + cfilter2] elif ocols is None: ocols = [] else: iomm_ex(cols1 + cols2 != len(ocols)) ocols = [ocols] # Even out rows delta = obj1.rows(filtered=True) - obj2.rows(filtered=True) data1 = obj1.data(filtered=True) data2 = obj2.data(filtered=True) if delta > 0: row = [cols2 * [None]] data2 += delta * row elif delta < 0: row = [cols1 * [None]] data1 += abs(delta) * row data = ocols for item1, item2 in zip(data1, data2): data.append(item1 + item2) write(fname=ofname, data=data, append=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL): """ Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :return: List of functions retrieved from the `url` given :rtype: list """
try: # Grab the page content docs_page = urlopen(url) # Read the contents of the actual url we grabbed and decode them into UTF-8 contents = docs_page.read().decode("utf-8") # Return the contents loaded as JSON return loads(contents) # If we get an exception, simply exit except URLError as UE: print(UE) except Exception as E: print(E) return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_method_definition(func): """ Generates the body for the given function :param dict func: dict of a JSON-Formatted function as defined by the API docs :return: A String containing the definition for the function as it should be written in code :rtype: str """
indent = 4 # initial definition method_definition = (" " * indent) + "def " + func["name"] # Here we just create a queue and put all the parameters # into the queue in the order that they were given, params_required = [ param for param in func["arguments"] if param["is_required"] ] params_optional = [ param for param in func["arguments"] if not param["is_required"] ] # Open the parameter definitions method_definition += "(self, " for param in params_required: # Put the parameter into the queue method_definition += param["name"] method_definition += ", " for param in params_optional: method_definition += param["name"] # Default methods not required method_definition += "=None, " # Peel off the final ", " and close off the parameter definition method_definition = method_definition.rstrip(", ") + "):\n" indent += 4 # re-indent method_definition += " " * indent # Begin with description. method_definition += '"""' + func["description"] # re-indent method_definition += "\n\n" + " " * indent # Go through each parameter and insert description & type hint for param in params_required + params_optional: # Add the type method_definition += ":param " + DTYPE_MAPPING[param["type"].lower()] # Add the name method_definition += " " + param["name"] + ": " # Add the description method_definition += param["description"] # Add optionality & reindent method_definition += "\n" if param[ "is_required"] else " (Optional)\n" method_definition += " " * indent open_index = func["returns"].find('(') close_index = func["returns"].find( ')', (open_index if open_index > -1 else 0)) func["returns"] = func["returns"].replace("\t", " " * 4) return_string = func["returns"].replace("\n", "") if open_index < close_index and func["returns"][ open_index + 1:close_index] in DTYPE_MAPPING: method_definition += ":rtype: " + DTYPE_MAPPING[ func["returns"][open_index + 1:close_index]] func["returns"] = func["returns"].replace( func["returns"][open_index:close_index + 1], "") method_definition += "\n" + " " * indent method_definition += ":return: " + return_string for i in range(0, len(return_string) + 1, 80 - (indent + 2)): method_definition += return_string[i:i + ( 80 - (indent + 2))] + "\n" + " " * indent # Close it off & reindent method_definition += '"""' + "\n" + " " * indent # Create the params map params_map = "__params_map = {" # Save the indent params_indent, num_params = len( params_map), len(params_required) + len(params_optional) # Append the map to the method_definition method_definition += params_map # Go through the required parameters first for i, param in enumerate(params_required + params_optional): # append the methods to the map method_definition += "'" + param["name"] + "': " + param["name"] if not param["is_required"]: method_definition + " if " + param[ "name"] + "is not None else None" # add commas or ending bracket if needed & reindent correctly method_definition += ",\n" + " " * indent + ' ' * params_indent if i + 1 < num_params else "" method_definition += '}\n\n' + ' ' * indent method_definition += "return self.make_request(SERVER_ADDRESS, '" + func["name"] + "', " \ + params_map.rstrip(" = {") + ", timeout=self.timeout)\n\n" return method_definition
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_lbryd_wrapper(url=LBRY_API_RAW_JSON_URL, read_file=__LBRYD_BASE_FPATH__, write_file=LBRYD_FPATH): """ Generates the actual functions for lbryd_api.py based on lbry's documentation :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :param str read_file: This is the path to the file from which we will be reading :param str write_file: Path from project root to the file we'll be writing to. """
functions = get_lbry_api_function_docs(url) # Open the actual file for appending with open(write_file, 'w') as lbry_file: lbry_file.write("# This file was generated at build time using the generator function\n") lbry_file.write("# You may edit but do so with caution\n") with open(read_file, 'r') as template: header = template.read() lbry_file.write(header) # Iterate through all the functions we retrieved for func in functions: method_definition = generate_method_definition(func) # Write to file lbry_file.write(method_definition) try: from yapf.yapflib.yapf_api import FormatFile # Now we should format the file using the yapf formatter FormatFile(write_file, in_place=True) except ImportError as IE: print("[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard") print(IE)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_nii(strPathIn, varSzeThr=5000.0): """ Load nii file. Parameters strPathIn : str Path to nii file to load. varSzeThr : float If the nii file is larger than this threshold (in MB), the file is loaded volume-by-volume in order to prevent memory overflow. Default threshold is 1000 MB. Returns ------- aryNii : np.array Array containing nii data. 32 bit floating point precision. objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. Notes ----- If the nii file is larger than the specified threshold (`varSzeThr`), the file is loaded volume-by-volume in order to prevent memory overflow. The reason for this is that nibabel imports data at float64 precision, which can lead to a memory overflow even for relatively small files. """
# Load nii file (this does not load the data into memory yet): objNii = nb.load(strPathIn) # Get size of nii file: varNiiSze = os.path.getsize(strPathIn) # Convert to MB: varNiiSze = np.divide(float(varNiiSze), 1000000.0) # Load volume-by-volume or all at once, depending on file size: if np.greater(varNiiSze, float(varSzeThr)): # Load large nii file print(('---------Large file size (' + str(np.around(varNiiSze)) + ' MB), reading volume-by-volume')) # Get image dimensions: tplSze = objNii.shape # Create empty array for nii data: aryNii = np.zeros(tplSze, dtype=np.float32) # Loop through volumes: for idxVol in range(tplSze[3]): aryNii[..., idxVol] = np.asarray( objNii.dataobj[..., idxVol]).astype(np.float32) else: # Load small nii file # Load nii file (this doesn't load the data into memory yet): objNii = nb.load(strPathIn) # Load data into array: aryNii = np.asarray(objNii.dataobj).astype(np.float32) # Get headers: objHdr = objNii.header # Get 'affine': aryAff = objNii.affine # Output nii data (as numpy array), header, and 'affine': return aryNii, objHdr, aryAff
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_res_prm(lstFunc, lstFlsMsk=None): """Load result parameters from multiple nii files, with optional mask. Parameters lstFunc : list, list of str with file names of 3D or 4D nii files lstFlsMsk : list, optional list of str with paths to 3D nii files that can act as mask/s Returns ------- lstPrmAry : list The list will contain as many numpy arrays as masks were provided. Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc] objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. """
# load parameter/functional maps into a list lstPrm = [] for ind, path in enumerate(lstFunc): aryFnc = load_nii(path)[0].astype(np.float32) if aryFnc.ndim == 3: lstPrm.append(aryFnc) # handle cases where nii array is 4D, in this case split arrays up in # 3D arrays and appenbd those elif aryFnc.ndim == 4: for indAx in range(aryFnc.shape[-1]): lstPrm.append(aryFnc[..., indAx]) # load mask/s if available if lstFlsMsk is not None: lstMsk = [None] * len(lstFlsMsk) for ind, path in enumerate(lstFlsMsk): aryMsk = load_nii(path)[0].astype(np.bool) lstMsk[ind] = aryMsk else: print('------------No masks were provided') if lstFlsMsk is None: # if no mask was provided we just flatten all parameter array in list # and return resulting list lstPrmAry = [ary.flatten() for ary in lstPrm] else: # if masks are available, we loop over masks and then over parameter # maps to extract selected voxels and parameters lstPrmAry = [None] * len(lstFlsMsk) for indLst, aryMsk in enumerate(lstMsk): # prepare array that will hold parameter values of selected voxels aryPrmSel = np.empty((np.sum(aryMsk), len(lstPrm)), dtype=np.float32) # loop over different parameter maps for indAry, aryPrm in enumerate(lstPrm): # get voxels specific to this mask aryPrmSel[:, indAry] = aryPrm[aryMsk, ...] # put array away in list, if only one parameter map was provided # the output will be squeezed lstPrmAry[indLst] = aryPrmSel # also get header object and affine array # we simply take it for the first functional nii file, cause that is the # only file that has to be provided by necessity objHdr, aryAff = load_nii(lstFunc[0])[1:] return lstPrmAry, objHdr, aryAff
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_crt_to_pol(aryXCrds, aryYrds): """Remap coordinates from cartesian to polar Parameters aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. Returns ------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. """
aryRad = np.sqrt(aryXCrds**2+aryYrds**2) aryTht = np.arctan2(aryYrds, aryXCrds) return aryTht, aryRad
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_pol_to_crt(aryTht, aryRad): """Remap coordinates from polar to cartesian Parameters aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. Returns ------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. """
aryXCrds = aryRad * np.cos(aryTht) aryYrds = aryRad * np.sin(aryTht) return aryXCrds, aryYrds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_near_pol_ang(aryEmpPlrAng, aryExpPlrAng): """Return index of nearest expected polar angle. Parameters aryEmpPlrAng : 1D numpy array Empirically found polar angle estimates aryExpPlrAng : 1D numpy array Theoretically expected polar angle estimates Returns ------- aryXCrds : 1D numpy array Indices of nearest theoretically expected polar angle. aryYrds : 1D numpy array Distances to nearest theoretically expected polar angle. """
dist = np.abs(np.subtract(aryEmpPlrAng[:, None], aryExpPlrAng[None, :])) return np.argmin(dist, axis=-1), np.min(dist, axis=-1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rmp_rng(aryVls, varNewMin, varNewMax, varOldThrMin=None, varOldAbsMax=None): """Remap values in an array from one range to another. Parameters aryVls : 1D numpy array Array with values that need to be remapped. varNewMin : float Desired minimum value of new, remapped array. varNewMax : float Desired maximum value of new, remapped array. varOldThrMin : float Theoretical minimum of old distribution. Can be specified if this theoretical minimum does not occur in empirical distribution but should be considered nontheless. varOldThrMin : float Theoretical maximum of old distribution. Can be specified if this theoretical maximum does not occur in empirical distribution but should be considered nontheless. Returns ------- aryVls : 1D numpy array Array with remapped values. """
if varOldThrMin is None: varOldMin = aryVls.min() else: varOldMin = varOldThrMin if varOldAbsMax is None: varOldMax = aryVls.max() else: varOldMax = varOldAbsMax aryNewVls = np.empty((aryVls.shape), dtype=aryVls.dtype) for ind, val in enumerate(aryVls): aryNewVls[ind] = (((val - varOldMin) * (varNewMax - varNewMin)) / (varOldMax - varOldMin)) + varNewMin return aryNewVls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax): """Remap x, y, sigma parameters from degrees to pixel. Parameters vecX : 1D numpy array Array with possible x parametrs in degree vecY : 1D numpy array Array with possible y parametrs in degree vecPrfSd : 1D numpy array Array with possible sd parametrs in degree tplPngSize : tuple, 2 Pixel dimensions of the visual space in pixel (width, height). varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varExtYmin : float Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) Returns ------- vecX : 1D numpy array Array with possible x parametrs in pixel vecY : 1D numpy array Array with possible y parametrs in pixel vecPrfSd : 1D numpy array Array with possible sd parametrs in pixel """
# Remap modelled x-positions of the pRFs: vecXpxl = rmp_rng(vecX, 0.0, (tplPngSize[0] - 1), varOldThrMin=varExtXmin, varOldAbsMax=varExtXmax) # Remap modelled y-positions of the pRFs: vecYpxl = rmp_rng(vecY, 0.0, (tplPngSize[1] - 1), varOldThrMin=varExtYmin, varOldAbsMax=varExtYmax) # We calculate the scaling factor from degrees of visual angle to # pixels separately for the x- and the y-directions (the two should # be the same). varDgr2PixX = np.divide(tplPngSize[0], (varExtXmax - varExtXmin)) varDgr2PixY = np.divide(tplPngSize[1], (varExtYmax - varExtYmin)) # Check whether varDgr2PixX and varDgr2PixY are similar: strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \ 'stimulus space (in degrees of visual angle) and the ' + \ 'ratio of X and Y dimensions in the upsampled visual space' + \ 'do not agree' assert 0.5 > np.absolute((varDgr2PixX - varDgr2PixY)), strErrMsg # Convert prf sizes from degrees of visual angles to pixel vecPrfSdpxl = np.multiply(vecPrfSd, varDgr2PixX) # Return new values in column stack. # Since values are now in pixel, they should be integer return np.column_stack((vecXpxl, vecYpxl, vecPrfSdpxl)).astype(np.int32)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cnvl_2D_gauss(idxPrc, aryMdlParamsChnk, arySptExpInf, tplPngSize, queOut, strCrd='crt'): """Spatially convolve input with 2D Gaussian model. Parameters idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params] Array with the model parameter combinations for this chunk. arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions] All spatial conditions stacked along second axis. tplPngSize : tuple, 2. Pixel dimensions of the visual space (width, height). queOut : multiprocessing.queues.Queue Queue to put the results on. If this is None, the user is not running multiprocessing but is just calling the function strCrd, string, either 'crt' or 'pol' Whether model parameters are provided in cartesian or polar coordinates Returns ------- data : 2d numpy array, shape [n_models, n_conditions] Closed data. Reference --------- [1] """
# Number of combinations of model parameters in the current chunk: varChnkSze = aryMdlParamsChnk.shape[0] # Number of conditions / time points of the input data varNumLstAx = arySptExpInf.shape[-1] # Output array with results of convolution: aryOut = np.zeros((varChnkSze, varNumLstAx)) # Loop through combinations of model parameters: for idxMdl in range(0, varChnkSze): # Spatial parameters of current model: if strCrd == 'pol': # Position was given in polar coordinates varTmpEcc = aryMdlParamsChnk[idxMdl, 0] varTmpPlrAng = aryMdlParamsChnk[idxMdl, 1] # Convert from polar to to cartesian coordinates varTmpX = varTmpEcc * np.cos(varTmpPlrAng) + tplPngSize[0]/2. varTmpY = varTmpEcc * np.sin(varTmpPlrAng) + tplPngSize[1]/2. elif strCrd == 'crt': varTmpX = aryMdlParamsChnk[idxMdl, 0] varTmpY = aryMdlParamsChnk[idxMdl, 1] # Standard deviation does not depend on coordinate system varTmpSd = aryMdlParamsChnk[idxMdl, 2] # Create pRF model (2D): aryGauss = crt_2D_gauss(tplPngSize[0], tplPngSize[1], varTmpX, varTmpY, varTmpSd) # Multiply pixel-time courses with Gaussian pRF models: aryCndTcTmp = np.multiply(arySptExpInf, aryGauss[:, :, None]) # Calculate sum across x- and y-dimensions - the 'area under the # Gaussian surface'. aryCndTcTmp = np.sum(aryCndTcTmp, axis=(0, 1)) # Put model time courses into function's output with 2d Gaussian # arrray: aryOut[idxMdl, :] = aryCndTcTmp if queOut is None: # if user is not using multiprocessing, return the array directly return aryOut else: # Put column with the indices of model-parameter-combinations into the # output array (in order to be able to put the pRF model time courses # into the correct order after the parallelised function): lstOut = [idxPrc, aryOut] # Put output to queue: queOut.put(lstOut)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(self, checksum, revision=None): """ Process a new revision and detect a revert if it occurred. Note that you can pass whatever you like as `revision` and it will be returned in the case that a revert occurs. :Parameters: checksum : str Any identity-machable string-based hash of revision content revision : `mixed` Revision metadata. Note that any data will just be returned in the case of a revert. :Returns: a :class:`~mwreverts.Revert` if one occured or `None` """
revert = None if checksum in self: # potential revert reverteds = list(self.up_to(checksum)) if len(reverteds) > 0: # If no reverted revisions, this is a noop revert = Revert(revision, reverteds, self[checksum]) self.insert(checksum, revision) return revert
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def funcSmthSpt(aryFuncChnk, varSdSmthSpt): """Apply spatial smoothing to the input data. Parameters aryFuncChnk : np.array TODO varSdSmthSpt : float (?) Extent of smoothing. Returns ------- aryFuncChnk : np.array Smoothed data. """
varNdim = aryFuncChnk.ndim # Number of time points in this chunk: varNumVol = aryFuncChnk.shape[-1] # Loop through volumes: if varNdim == 4: for idxVol in range(0, varNumVol): aryFuncChnk[:, :, :, idxVol] = gaussian_filter( aryFuncChnk[:, :, :, idxVol], varSdSmthSpt, order=0, mode='nearest', truncate=4.0) elif varNdim == 5: varNumMtnDrctns = aryFuncChnk.shape[3] for idxVol in range(0, varNumVol): for idxMtn in range(0, varNumMtnDrctns): aryFuncChnk[:, :, :, idxMtn, idxVol] = gaussian_filter( aryFuncChnk[:, :, :, idxMtn, idxVol], varSdSmthSpt, order=0, mode='nearest', truncate=4.0) # Output list: return aryFuncChnk
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def funcSmthTmp(aryFuncChnk, varSdSmthTmp): """Apply temporal smoothing to fMRI data & pRF time course models. Parameters aryFuncChnk : np.array TODO varSdSmthTmp : float (?) extend of smoothing Returns ------- aryFuncChnk : np.array TODO """
# For the filtering to perform well at the ends of the time series, we # set the method to 'nearest' and place a volume with mean intensity # (over time) at the beginning and at the end. aryFuncChnkMean = np.mean(aryFuncChnk, axis=1, keepdims=True) aryFuncChnk = np.concatenate((aryFuncChnkMean, aryFuncChnk, aryFuncChnkMean), axis=1) # In the input data, time goes from left to right. Therefore, we apply # the filter along axis=1. aryFuncChnk = gaussian_filter1d(aryFuncChnk, varSdSmthTmp, axis=1, order=0, mode='nearest', truncate=4.0) # Remove mean-intensity volumes at the beginning and at the end: aryFuncChnk = aryFuncChnk[:, 1:-1] # Output list: return aryFuncChnk
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prep_models(aryPrfTc, varSdSmthTmp=2.0, lgcPrint=True): """ Prepare pRF model time courses. Parameters aryPrfTc : np.array 4D numpy array with pRF time course models, with following dimensions: `aryPrfTc[x-position, y-position, SD, volume]`. varSdSmthTmp : float Extent of temporal smoothing that is applied to functional data and pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`, no temporal smoothing is applied. lgcPrint : boolean Whether print statements should be executed. Returns ------- aryPrfTc : np.array 4D numpy array with prepared pRF time course models, same dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`). """
if lgcPrint: print('------Prepare pRF time course models') # Define temporal smoothing of pRF time course models def funcSmthTmp(aryPrfTc, varSdSmthTmp, lgcPrint=True): """Apply temporal smoothing to fMRI data & pRF time course models. Parameters ---------- aryPrfTc : np.array 4D numpy array with pRF time course models, with following dimensions: `aryPrfTc[x-position, y-position, SD, volume]`. varSdSmthTmp : float, positive Extent of temporal smoothing that is applied to functional data and pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`, no temporal smoothing is applied. lgcPrint : boolean Whether print statements should be executed. Returns ------- aryPrfTc : np.array 4D numpy array with prepared pRF time course models, same dimension as input (`aryPrfTc[x-position, y-position, SD, volume]`). """ # adjust the input, if necessary, such that input is 2D, with last # dim time tplInpShp = aryPrfTc.shape aryPrfTc = aryPrfTc.reshape((-1, aryPrfTc.shape[-1])) # For the filtering to perform well at the ends of the time series, we # set the method to 'nearest' and place a volume with mean intensity # (over time) at the beginning and at the end. aryPrfTcMean = np.mean(aryPrfTc, axis=-1, keepdims=True).reshape(-1, 1) aryPrfTc = np.concatenate((aryPrfTcMean, aryPrfTc, aryPrfTcMean), axis=-1) # In the input data, time goes from left to right. Therefore, we apply # the filter along axis=1. aryPrfTc = gaussian_filter1d(aryPrfTc.astype('float32'), varSdSmthTmp, axis=-1, order=0, mode='nearest', truncate=4.0) # Remove mean-intensity volumes at the beginning and at the end: aryPrfTc = aryPrfTc[..., 1:-1] # Output array: return aryPrfTc.reshape(tplInpShp).astype('float16') # Perform temporal smoothing of pRF time course models if 0.0 < varSdSmthTmp: if lgcPrint: print('---------Temporal smoothing on pRF time course models') print('------------SD tmp smooth is: ' + str(varSdSmthTmp)) aryPrfTc = funcSmthTmp(aryPrfTc, varSdSmthTmp) # Z-score the prf time course models if lgcPrint: print('---------Zscore the pRF time course models') # De-mean the prf time course models: aryPrfTc = np.subtract(aryPrfTc, np.mean(aryPrfTc, axis=-1)[..., None]) # Standardize the prf time course models: # In order to avoid devision by zero, only divide those voxels with a # standard deviation greater than zero: aryTmpStd = np.std(aryPrfTc, axis=-1) aryTmpLgc = np.greater(aryTmpStd, np.array([0.0])) aryPrfTc[aryTmpLgc, :] = np.divide(aryPrfTc[aryTmpLgc, :], aryTmpStd[aryTmpLgc, None]) return aryPrfTc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self,style): """ what's the value of a style at the current stack level"""
level = len(self.stack) -1 while level >= 0: if style in self.stack[level]: return self.stack[level][style] else: level = level - 1 return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enforce_type(self, attr, val): """converts a value to the attribute's type"""
if not attr in self.types: return utfstr(val) elif self.types[attr] == 'int': return int(float(val)) elif self.types[attr] == 'float': return float(val) else: return utfstr(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, style={}): """overrides style values at the current stack level"""
_style = {} for attr in style: if attr in self.cmds and not style[attr] in self.cmds[attr]: print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr) else: self.stack[-1][attr] = self.enforce_type(attr, style[attr])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_escpos(self): """ converts the current style to an escpos command string """
cmd = '' ordered_cmds = self.cmds.keys() ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order'])) for style in ordered_cmds: cmd += self.cmds[style][self.get(style)] return cmd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_inline(self,stylestack=None): """ starts an inline entity with an optional style definition """
self.stack.append('inline') if self.dirty: self.escpos._raw(' ') if stylestack: self.style(stylestack)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_block(self,stylestack=None): """ starts a block entity with an optional style definition """
if self.dirty: self.escpos._raw('\n') self.dirty = False self.stack.append('block') if stylestack: self.style(stylestack)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pre(self,text): """ puts a string of text in the entity keeping the whitespace intact """
if text: self.escpos.text(text) self.dirty = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def text(self,text): """ puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text: text = utfstr(text) text = text.strip() text = re.sub('\s+',' ',text) if text: self.dirty = True self.escpos.text(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_image_size(self, size): """ Check and fix the size of the image to 32 bits """
if size % 32 == 0: return (0, 0) else: image_border = 32 - (size % 32) if (image_border % 2) == 0: return (image_border / 2, image_border / 2) else: return (image_border / 2, (image_border / 2) + 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _convert_image(self, im): """ Parse image and prepare it to a printable format """
pixels = [] pix_line = "" im_left = "" im_right = "" switch = 0 img_size = [ 0, 0 ] if im.size[0] > 512: print "WARNING: Image is wider than 512 and could be truncated at print time " if im.size[1] > 255: raise ImageSizeError() im_border = self._check_image_size(im.size[0]) for i in range(im_border[0]): im_left += "0" for i in range(im_border[1]): im_right += "0" for y in range(im.size[1]): img_size[1] += 1 pix_line += im_left img_size[0] += im_border[0] for x in range(im.size[0]): img_size[0] += 1 RGB = im.getpixel((x, y)) im_color = (RGB[0] + RGB[1] + RGB[2]) im_pattern = "1X0" pattern_len = len(im_pattern) switch = (switch - 1 ) * (-1) for x in range(pattern_len): if im_color <= (255 * 3 / pattern_len * (x+1)): if im_pattern[x] == "X": pix_line += "%d" % switch else: pix_line += im_pattern[x] break elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3): pix_line += im_pattern[-1] break pix_line += im_right img_size[0] += im_border[1] return (pix_line, img_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def image(self,path_img): """ Open image file """
im_open = Image.open(path_img) im = im_open.convert("RGB") # Convert the RGB image in printable image pix_line, img_size = self._convert_image(im) self._print_image(pix_line, img_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def qr(self,text): """ Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1) qr_code.add_data(text) qr_code.make(fit=True) qr_img = qr_code.make_image() im = qr_img._img.convert("RGB") # Convert the RGB image in printable image self._convert_image(im)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def text(self,txt): """ Print Utf8 encoded alpha-numeric text """
if not txt: return try: txt = txt.decode('utf-8') except: try: txt = txt.decode('utf-16') except: pass self.extra_chars = 0 def encode_char(char): """ Encodes a single utf-8 character into a sequence of esc-pos code page change instructions and character declarations """ char_utf8 = char.encode('utf-8') encoded = '' encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character encodings = { # TODO use ordering to prevent useless switches # TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis ) 'cp437': TXT_ENC_PC437, 'cp850': TXT_ENC_PC850, 'cp852': TXT_ENC_PC852, 'cp857': TXT_ENC_PC857, 'cp858': TXT_ENC_PC858, 'cp860': TXT_ENC_PC860, 'cp863': TXT_ENC_PC863, 'cp865': TXT_ENC_PC865, 'cp866': TXT_ENC_PC866, 'cp862': TXT_ENC_PC862, 'cp720': TXT_ENC_PC720, 'cp936': TXT_ENC_PC936, 'iso8859_2': TXT_ENC_8859_2, 'iso8859_7': TXT_ENC_8859_7, 'iso8859_9': TXT_ENC_8859_9, 'cp1254' : TXT_ENC_WPC1254, 'cp1255' : TXT_ENC_WPC1255, 'cp1256' : TXT_ENC_WPC1256, 'cp1257' : TXT_ENC_WPC1257, 'cp1258' : TXT_ENC_WPC1258, 'katakana' : TXT_ENC_KATAKANA, } remaining = copy.copy(encodings) if not encoding : encoding = 'cp437' while True: # Trying all encoding until one succeeds try: if encoding == 'katakana': # Japanese characters if jcconv: # try to convert japanese text to a half-katakanas kata = jcconv.kata2half(jcconv.hira2kata(char_utf8)) if kata != char_utf8: self.extra_chars += len(kata.decode('utf-8')) - 1 # the conversion may result in multiple characters return encode_str(kata.decode('utf-8')) else: kata = char_utf8 if kata in TXT_ENC_KATAKANA_MAP: encoded = TXT_ENC_KATAKANA_MAP[kata] break else: raise ValueError() else: encoded = char.encode(encoding) break except ValueError: #the encoding failed, select another one and retry if encoding in remaining: del remaining[encoding] if len(remaining) >= 1: encoding = remaining.items()[0][0] else: encoding = 'cp437' encoded = '\xb1' # could not encode, output error character break; if encoding != self.encoding: # if the encoding changed, remember it and prefix the character with # the esc-pos encoding change sequence self.encoding = encoding encoded = encodings[encoding] + encoded return encoded def encode_str(txt): buffer = '' for c in txt: buffer += encode_char(c) return buffer txt = encode_str(txt) # if the utf-8 -> codepage conversion inserted extra characters, # remove double spaces to try to restore the original string length # and prevent printing alignment issues while self.extra_chars > 0: dspace = txt.find(' ') if dspace > 0: txt = txt[:dspace] + txt[dspace+1:] self.extra_chars -= 1 else: break self._raw(txt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, align='left', font='a', type='normal', width=1, height=1): """ Set text properties """
# Align if align.upper() == "CENTER": self._raw(TXT_ALIGN_CT) elif align.upper() == "RIGHT": self._raw(TXT_ALIGN_RT) elif align.upper() == "LEFT": self._raw(TXT_ALIGN_LT) # Font if font.upper() == "B": self._raw(TXT_FONT_B) else: # DEFAULT FONT: A self._raw(TXT_FONT_A) # Type if type.upper() == "B": self._raw(TXT_BOLD_ON) self._raw(TXT_UNDERL_OFF) elif type.upper() == "U": self._raw(TXT_BOLD_OFF) self._raw(TXT_UNDERL_ON) elif type.upper() == "U2": self._raw(TXT_BOLD_OFF) self._raw(TXT_UNDERL2_ON) elif type.upper() == "BU": self._raw(TXT_BOLD_ON) self._raw(TXT_UNDERL_ON) elif type.upper() == "BU2": self._raw(TXT_BOLD_ON) self._raw(TXT_UNDERL2_ON) elif type.upper == "NORMAL": self._raw(TXT_BOLD_OFF) self._raw(TXT_UNDERL_OFF) # Width if width == 2 and height != 2: self._raw(TXT_NORMAL) self._raw(TXT_2WIDTH) elif height == 2 and width != 2: self._raw(TXT_NORMAL) self._raw(TXT_2HEIGHT) elif height == 2 and width == 2: self._raw(TXT_2WIDTH) self._raw(TXT_2HEIGHT) else: # DEFAULT SIZE: NORMAL self._raw(TXT_NORMAL)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cashdraw(self, pin): """ Send pulse to kick the cash drawer """
if pin == 2: self._raw(CD_KICK_2) elif pin == 5: self._raw(CD_KICK_5) else: raise CashDrawerError()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def control(self, ctl): """ Feed control sequences """
if ctl.upper() == "LF": self._raw(CTL_LF) elif ctl.upper() == "FF": self._raw(CTL_FF) elif ctl.upper() == "CR": self._raw(CTL_CR) elif ctl.upper() == "HT": self._raw(CTL_HT) elif ctl.upper() == "VT": self._raw(CTL_VT)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open(self): """ Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct) if self.device is None: raise NoDeviceError() try: if self.device.is_kernel_driver_active(self.interface): self.device.detach_kernel_driver(self.interface) self.device.set_configuration() usb.util.claim_interface(self.device, self.interface) except usb.core.USBError as e: raise HandleDeviceError(e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open(self): """ Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.device.connect((self.host, self.port)) if self.device is None: print "Could not open socket for %s" % self.host
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pinyin_generator(chars, format): """Generate pinyin for chars, if char is not chinese character, itself will be returned. Chars must be unicode list. """
for char in chars: key = "%X" % ord(char) pinyin = pinyin_dict.get(key, char) tone = pinyin_tone.get(key, 0) if tone == 0 or format == "strip": pass elif format == "numerical": pinyin += str(tone) elif format == "diacritical": # Find first vowel -- where we should put the diacritical mark vowels = itertools.chain((c for c in pinyin if c in "aeo"), (c for c in pinyin if c in "iuv")) vowel = pinyin.index(next(vowels)) + 1 pinyin = pinyin[:vowel] + tonemarks[tone] + pinyin[vowel:] else: error = "Format must be one of: numerical/diacritical/strip" raise ValueError(error) yield unicodedata.normalize('NFC', pinyin)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(s, delimiter='', format="diacritical"): """Return pinyin of string, the string must be unicode """
return delimiter.join(_pinyin_generator(u(s), format=format))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_initial(s, delimiter=' '): """Return the 1st char of pinyin of string, the string must be unicode """
initials = (p[0] for p in _pinyin_generator(u(s), format="strip")) return delimiter.join(initials)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def init(): ''' Load in the Chinese-English dictionary. This takes 1-2 seconds. It is done when the other functions are used, but this is public since preloading sometimes makes sense. ''' global dictionaries, trees dictionaries = { 'traditional': {}, 'simplified': {} } trees = { 'traditional': Tree(), 'simplified': Tree() } lines = gzip.open( os.path.join(os.path.dirname(__file__), "cedict.txt.gz"), mode='rt', encoding='utf-8' ) exp = re.compile("^([^ ]+) ([^ ]+) \[(.*)\] /(.+)/") parsed_lines = (exp.match(line).groups() for line in lines if line[0] != '#') for traditional, simplified, pinyin, meaning in parsed_lines: meaning = meaning.split('/') dictionaries['traditional'][traditional] = meaning dictionaries['simplified'][simplified] = meaning _add_to_tree(trees['traditional'], traditional, meaning) _add_to_tree(trees['simplified'], simplified, meaning)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def translate_word(word, dictionary=['simplified']): ''' Return the set of translations for a single character or word, if available. ''' if not dictionaries: init() for d in dictionary: if word in dictionaries[d]: return dictionaries[d][word] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _words_at_the_beginning(word, tree, prefix=""): ''' We return all portions of the tree corresponding to the beginning of `word`. This is used recursively, so we pass the prefix so we can return meaningful words+translations. ''' l = [] if "" in tree: l.append([prefix, tree[""]]) if len(word) > 0 and word[0] in tree: l.extend(_words_at_the_beginning( word[1:], tree[word[0]], prefix=prefix+word[0] )) return l
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def all_phrase_translations(phrase): ''' Return the set of translations for all possible words in a full phrase. Chinese is sometimes ambiguous. We do not attempt to disambiguate, or handle unknown letters especially well. Full parsing is left to upstream logic. ''' if not trees: init() phrase = phrase.split(string.whitespace) for word in phrase: for x in range(len(word)): for translation in _words_at_the_beginning( word[x+1:], trees['simplified'][word[x]], prefix=word[x]): yield translation
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_status_key(self, instance): """Generates a key used to set a status on a field"""
key_id = "inst_%s" % id(instance) if instance.pk is None else instance.pk return "%s.%s-%s-%s" % (instance._meta.app_label, get_model_name(instance), key_id, self.field.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_status(self, instance): """Retrives a status of a field from cache. Fields in state 'error' and 'complete' will not retain the status after the call. """
status_key, status = self._get_status(instance) if status['state'] in ['complete', 'error']: cache.delete(status_key) return status
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_status(self, instance, status): """Sets the field status for up to 5 minutes."""
status_key = self.get_status_key(instance) cache.set(status_key, status, timeout=300)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mode(self, old_mode=None): """Returns output mode. If `mode` not set it will try to guess best mode, or next best mode comparing to old mode """
if self.mode is not None: return self.mode assert self.can_write, "This format does not have a supported output mode." if old_mode is None: return self.output_modes[0] if old_mode in self.output_modes: return old_mode # now let's get best mode available from supported try: idx = PILLOW_MODES.index(old_mode) except ValueError: # maybe some unknown or uncommon mode return self.output_modes[0] for mode in PILLOW_MODES[idx+1:]: if mode in self.output_modes: return mode # since there is no better one, lets' look for closest one in opposite direction opposite = PILLOW_MODES[:idx] opposite.reverse() for mode in opposite: if mode in self.output_modes: return mode
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _send(self, data, msg_type='ok', silent=False): """ Send a response to the frontend and return an execute message @param data: response to send @param msg_type (str): message type: 'ok', 'raw', 'error', 'multi' @param silent (bool): suppress output @return (dict): the return value for the kernel """
# Data to send back if data is not None: # log the message try: self._klog.debug(u"msg to frontend (%d): %.160s...", silent, data) except Exception as e: self._klog.warn(u"can't log response: %s", e) # send it to the frontend if not silent: if msg_type != 'raw': data = data_msg(data, mtype=msg_type) self.send_response(self.iopub_socket, 'display_data', data) # Result message return {'status': 'error' if msg_type == 'error' else 'ok', # The base class will increment the execution count 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {} }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): """ Method called to execute a cell """
self._klog.info("[%.30s] [%d] [%s]", code, silent, user_expressions) # Split lines and remove empty lines & comments code_noc = [line.strip() for line in code.split('\n') if line and line[0] != '#'] if not code_noc: return self._send(None) # Process try: # Detect if we've got magics magic_lines = [] for line in code_noc: if line[0] != '%': break magic_lines.append(line) # Process magics. Once done, remove them from the query buffer if magic_lines: out = [self._k.magic(line) for line in magic_lines] self._send(out, 'multi', silent=silent) code = '\n'.join(code_noc[len(magic_lines):]) # If we have a regular SPARQL query, process it now result = self._k.query(code, num=self.execution_count) if code else None # Return the result return self._send(result, 'raw', silent=silent) except Exception as e: return self._send(e, 'error', silent=silent)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_inspect(self, code, cursor_pos, detail_level=0): """ Method called on help requests """
self._klog.info("{%s}", code[cursor_pos:cursor_pos+10]) # Find the token for which help is requested token, start = token_at_cursor(code, cursor_pos) self._klog.debug("token={%s} {%d}", token, detail_level) # Find the help for this token if not is_magic(token, start, code): info = sparql_help.get(token.upper(), None) elif token == '%': info = magic_help else: info = magics.get(token, None) if info: info = '{} {}\n\n{}'.format(token, *info) return {'status': 'ok', 'data': {'text/plain': info}, 'metadata': {}, 'found': info is not None }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_complete(self, code, cursor_pos): """ Method called on autocompletion requests """
self._klog.info("{%s}", code[cursor_pos:cursor_pos+10]) token, start = token_at_cursor(code, cursor_pos) tkn_low = token.lower() if is_magic(token, start, code): matches = [k for k in magics.keys() if k.startswith(tkn_low)] else: matches = [sparql_names[k] for k in sparql_names if k.startswith(tkn_low)] self._klog.debug("token={%s} matches={%r}", token, matches) if matches: return {'status': 'ok', 'cursor_start': start, 'cursor_end': start+len(token), 'matches': matches}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_msglist( msglist ): """ Return a Jupyter display_data message, in both HTML & text formats, by joining together all passed messages. @param msglist (iterable): an iterable containing a list of tuples (message, css_style) Each message is either a text string, or a list. In the latter case it is assumed to be a format string + parameters. """
txt = html = u'' for msg, css in msglist: if is_collection(msg): msg = msg[0].format(*msg[1:]) html += div( escape(msg).replace('\n','<br/>'), css=css or 'msg' ) txt += msg + "\n" return { 'data': {'text/html' : div(html), 'text/plain' : txt }, 'metadata' : {} }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copyresource( resource, filename, destdir ): """ Copy a resource file to a destination """
data = pkgutil.get_data(resource, os.path.join('resources',filename) ) #log.info( "Installing %s", os.path.join(destdir,filename) ) with open( os.path.join(destdir,filename), 'wb' ) as fp: fp.write(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_kernel_resources( destdir, resource=PKGNAME, files=None ): """ Copy the resource files to the kernelspec folder. """
if files is None: files = ['logo-64x64.png', 'logo-32x32.png'] for filename in files: try: copyresource( resource, filename, destdir ) except Exception as e: sys.stderr.write(str(e))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_custom_css( destdir, cssfile, resource=PKGNAME ): """ Add the kernel CSS to custom.css """
ensure_dir_exists( destdir ) custom = os.path.join( destdir, 'custom.css' ) prefix = css_frame_prefix(resource) # Check if custom.css already includes it. If so, let's remove it first exists = False if os.path.exists( custom ): with io.open(custom) as f: for line in f: if line.find( prefix ) >= 0: exists = True break if exists: remove_custom_css( destdir, resource ) # Fetch the CSS file cssfile += '.css' data = pkgutil.get_data( resource, os.path.join('resources',cssfile) ) # get_data() delivers encoded data, str (Python2) or bytes (Python3) # Add the CSS at the beginning of custom.css # io.open uses unicode strings (unicode in Python2, str in Python3) with io.open(custom + '-new', 'wt', encoding='utf-8') as fout: fout.write( u'{}START ======================== */\n'.format(prefix)) fout.write( data.decode('utf-8') ) fout.write( u'{}END ======================== */\n'.format(prefix)) if os.path.exists( custom ): with io.open( custom, 'rt', encoding='utf-8' ) as fin: for line in fin: fout.write( unicode(line) ) os.rename( custom+'-new',custom)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_custom_css(destdir, resource=PKGNAME ): """ Remove the kernel CSS from custom.css """
# Remove the inclusion in the main CSS if not os.path.isdir( destdir ): return False custom = os.path.join( destdir, 'custom.css' ) copy = True found = False prefix = css_frame_prefix(resource) with io.open(custom + '-new', 'wt') as fout: with io.open(custom) as fin: for line in fin: if line.startswith( prefix + 'START' ): copy = False found = True elif line.startswith( prefix + 'END' ): copy = True elif copy: fout.write( line ) if found: os.rename( custom+'-new',custom) else: os.unlink( custom+'-new') return found
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def html_elem(e, ct, withtype=False): """ Format a result element as an HTML table cell. @param e (list): a pair \c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type """
# Header cell if ct == 'th': return '<th>{0}</th><th>{1}</th>'.format(*e) if withtype else '<th>{}</th>'.format(e) # Content cell if e[1] in ('uri', 'URIRef'): html = u'<{0} class=val><a href="{1}" target="_other">{2}</a></{0}>'.format(ct, e[0], escape(e[0])) else: html = u'<{0} class=val>{1}</{0}>'.format(ct, escape(e[0])) # Create the optional cell for the type if withtype: html += u'<{0} class=typ>{1}</{0}>'.format(ct, e[1]) return html
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def html_table(data, header=True, limit=None, withtype=False): """ Return a double iterable as an HTML table @param data (iterable): the data to format @param header (bool): if the first row is a header row @param limit (int): maximum number of rows to render (excluding header) @param withtype (bool): if columns are to have an alternating CSS class (even/odd) or not. @return (int,string): a pair <number-of-rendered-rows>, <html-table> """
if header and limit: limit += 1 ct = 'th' if header else 'td' rc = 'hdr' if header else 'odd' # import codecs # import datetime # with codecs.open( '/tmp/dump', 'w', encoding='utf-8') as f: # print( '************', datetime.datetime.now(), file=f ) # for n, row in enumerate(data): # print( '-------', n, file=f ) # for n, c in enumerate(row): # print( type(c), repr(c), file=f ) html = u'<table>' rn = -1 for rn, row in enumerate(data): html += u'<tr class={}>'.format(rc) html += '\n'.join((html_elem(c, ct, withtype) for c in row)) html += u'</tr>' rc = 'even' if rc == 'odd' else 'odd' ct = 'td' if limit: limit -= 1 if not limit: break return (0, '') if rn < 0 else (rn+1-header, html+u'</table>')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jtype(c): """ Return the a string with the data type of a value, for JSON data """
ct = c['type'] return ct if ct != 'literal' else '{}, {}'.format(ct, c.get('xml:lang'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gtype(n): """ Return the a string with the data type of a value, for Graph data """
t = type(n).__name__ return str(t) if t != 'Literal' else 'Literal, {}'.format(n.language)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def lang_match_json(row, hdr, accepted_languages): '''Find if the JSON row contains acceptable language data''' if not accepted_languages: return True languages = set([row[c].get('xml:lang') for c in hdr if c in row and row[c]['type'] == 'literal']) return (not languages) or (languages & accepted_languages)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def lang_match_rdf(triple, accepted_languages): '''Find if the RDF triple contains acceptable language data''' if not accepted_languages: return True languages = set([n.language for n in triple if isinstance(n, Literal)]) return (not languages) or (languages & accepted_languages)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def lang_match_xml(row, accepted_languages): '''Find if the XML row contains acceptable language data''' if not accepted_languages: return True column_languages = set() for elem in row: lang = elem[0].attrib.get(XML_LANG, None) if lang: column_languages.add(lang) return (not column_languages) or (column_languages & accepted_languages)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_json(result, cfg, **kwargs): """ Render to output a result in JSON format """
result = json.loads(result.decode('utf-8')) head = result['head'] if 'results' not in result: if 'boolean' in result: r = u'Result: {}'.format(result['boolean']) else: r = u'Unsupported result: \n' + unicode(result) return {'data': {'text/plain': r}, 'metadata': {}} vars = head['vars'] nrow = len(result['results']['bindings']) if cfg.dis == 'table': j = json_iterator(vars, result['results']['bindings'], set(cfg.lan), add_vtype=cfg.typ) n, data = html_table(j, limit=cfg.lmt, withtype=cfg.typ) data += div('Total: {}, Shown: {}', nrow, n, css="tinfo") data = {'text/html': div(data)} else: result = json.dumps(result, ensure_ascii=False, indent=2, sort_keys=True) data = {'text/plain': unicode(result)} return {'data': data, 'metadata': {}}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def xml_row(row, lang): ''' Generator for an XML row ''' for elem in row: name = elem.get('name') child = elem[0] ftype = re.sub(r'\{[^}]+\}', '', child.tag) if ftype == 'literal': ftype = '{}, {}'.format(ftype, child.attrib.get(XML_LANG, 'none')) yield (name, (child.text, ftype))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_xml(result, cfg, **kwargs): """ Render to output a result in XML format """
# Raw mode if cfg.dis == 'raw': return {'data': {'text/plain': result.decode('utf-8')}, 'metadata': {}} # Table try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET root = ET.fromstring(result) try: ns = {'ns': re.match(r'\{([^}]+)\}', root.tag).group(1)} except Exception: raise KrnlException('Invalid XML data: cannot get namespace') columns = [c.attrib['name'] for c in root.find('ns:head', ns)] results = root.find('ns:results', ns) nrow = len(results) j = xml_iterator(columns, results, set(cfg.lan), add_vtype=cfg.typ) n, data = html_table(j, limit=cfg.lmt, withtype=cfg.typ) data += div('Total: {}, Shown: {}', nrow, n, css="tinfo") return {'data': {'text/html': div(data)}, 'metadata': {}}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_graph(result, cfg, **kwargs): """ Render to output a result that can be parsed as an RDF graph """
# Mapping from MIME types to formats accepted by RDFlib rdflib_formats = {'text/rdf+n3': 'n3', 'text/turtle': 'turtle', 'application/x-turtle': 'turtle', 'text/turtle': 'turtle', 'application/rdf+xml': 'xml', 'text/rdf': 'xml', 'application/rdf+xml': 'xml'} try: got = kwargs.get('format', 'text/rdf+n3') fmt = rdflib_formats[got] except KeyError: raise KrnlException('Unsupported format for graph processing: {!s}', got) g = ConjunctiveGraph() g.load(StringInputSource(result), format=fmt) display = cfg.dis[0] if is_collection(cfg.dis) else cfg.dis if display in ('png', 'svg'): try: literal = len(cfg.dis) > 1 and cfg.dis[1].startswith('withlit') opt = {'lang': cfg.lan, 'literal': literal, 'graphviz': []} data, metadata = draw_graph(g, fmt=display, options=opt) return {'data': data, 'metadata': metadata} except Exception as e: raise KrnlException('Exception while drawing graph: {!r}', e) elif display == 'table': it = rdf_iterator(g, set(cfg.lan), add_vtype=cfg.typ) n, data = html_table(it, limit=cfg.lmt, withtype=cfg.typ) data += div('Shown: {}, Total rows: {}', n if cfg.lmt else 'all', len(g), css="tinfo") data = {'text/html': div(data)} elif len(g) == 0: data = {'text/html': div(div('empty graph', css='krn-warn'))} else: data = {'text/plain': g.serialize(format='nt').decode('utf-8')} return {'data': data, 'metadata': {}}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_logging( logfilename=None, level=None ): """ Set a logging configuration, with a rolling file appender. If passed a filename, use it as the logfile, else use a default name. The default logfile is \c sparqlkernel.log, placed in the directory given by (in that order) the \c LOGDIR environment variable, the logdir specified upon kernel installation or the default temporal directory. """
if logfilename is None: # Find the logging diectory logdir = os.environ.get( 'LOGDIR' ) if logdir is None: logdir = os.environ.get( 'LOGDIR_DEFAULT', tempfile.gettempdir() ) # Define the log filename basename = __name__.split('.')[-2] logfilename = os.path.join( logdir, basename + '.log' ) LOGCONFIG['handlers']['default']['filename'] = logfilename if level is not None: LOGCONFIG['loggers']['sparqlkernel']['level'] = level dictConfig( LOGCONFIG )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smartfields_get_field_status(self, field_name): """A way to find out a status of a filed."""
manager = self._smartfields_managers.get(field_name, None) if manager is not None: return manager.get_status(self) return {'state': 'ready'}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_output_file(self, in_file, instance, field, **kwargs): """Creates a temporary file. With regular `FileSystemStorage` it does not need to be deleted, instaed file is safely moved over. With other cloud based storage it is a good idea to set `delete=True`."""
return NamedTemporaryFile(mode='rb', suffix='_%s_%s%s' % ( get_model_name(instance), field.name, self.get_ext()), delete=False)