text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Prints a prepared request to give the user info as to what they're sending <END_TASK> <USER_TASK:> Description: def print_request(request): """ Prints a prepared request to give the user info as to what they're sending :param request.PreparedRequest request: PreparedRequest object to be printed :return: Nothing """
print('{}\n{}\n{}\n\n{}'.format( '-----------START-----------', request.method + ' ' + request.url, '\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()), request.body, ))
<SYSTEM_TASK:> This filter validates input data against the resource's <END_TASK> <USER_TASK:> Description: def filter_validate_schemas(get_response, params): """ This filter validates input data against the resource's ``request_schema`` and fill the request's ``validated`` dict. Data from ``request.params`` and ``request.body`` (when the request body is of a form type) will be converted using the schema in order to get proper lists or unique values. .. important:: The request validation is only effective when a ``request_schema`` has been provided by the resource definition. """
request_schema = params.get('request_schema') if request_schema is None: return get_response def _convert_params(schema, data): for sc in schema.fields.values(): name = sc.serialized_name or sc.name val = data.getlist(name) if val is None: continue if len(val) == 1 and not isinstance(sc, ListType): val = val[0] data[name] = val async def decorated_filter(request, *args, **kwargs): data = { 'headers': CIDict(request.headers), 'path': request.app.router.get(request)[2], 'params': RequestParameters(request.args), 'body': {} } if request.body: # Get body if we have something there if request.form: data['body'] = RequestParameters(request.form) else: # will raise 400 if cannot parse json data['body'] = deepcopy(request.json) if hasattr(request_schema, 'body') and request.form: _convert_params(request_schema.body, data['body']) if hasattr(request_schema, 'params') and data['params']: _convert_params(request_schema.params, data['params']) # Now, validate the whole thing try: model = request_schema(data, strict=False, validate=False) model.validate() request.validated = model.to_native() except BaseError as e: raise ValidationErrors(e.to_primitive()) return await get_response(request, *args, **kwargs) return decorated_filter
<SYSTEM_TASK:> Standard workflow for creating an EPUB document. <END_TASK> <USER_TASK:> Description: def make_EPUB(parsed_article, output_directory, input_path, image_directory, config_module=None, epub_version=None, batch=False): """ Standard workflow for creating an EPUB document. make_EPUB is used to produce an EPUB file from a parsed article. In addition to the article it also requires a path to the appropriate image directory which it will insert into the EPUB file, as well the output directory location for the EPUB file. Parameters ---------- article : openaccess_epub.article.Article instance `article` is an Article instance for the XML document to be converted to EPUB. output_directory : str `output_directory` is a string path to the directory in which the EPUB will be produced. The name of the directory will be used as the EPUB's filename. input_path : str `input_path` is a string absolute path to the input XML file, used to locate input-relative images. image_directory : str `image_directory` is a string path indicating an explicit image directory. If supplied, other image input methods will not be used. config_module : config module, optional `config_module` is a pre-loaded config module for OpenAccess_EPUB; if not used then this function will load the global config file. Might be useful in certain cases to dynamically alter configuration. epub_version : {None, 2, 3} `epub_version` dictates which version of EPUB to be created. An error will be raised if the specified version is not supported for the publisher. If left to the default, the created version will defer to the publisher default version. batch : bool, optional `batch` indicates that batch creation is being used (such as with the `oaepub batch` command). In this case, directory conflicts will be automatically resolved (in favor of keeping previous data, skipping creation of EPUB). Returns False in the case of a fatal error, True if successful. """
#command_log.info('Creating {0}.epub'.format(output_directory)) if config_module is None: config_module = openaccess_epub.utils.load_config_module() if epub_version not in (None, 2, 3): log.error('Invalid EPUB version: {0}'.format(epub_version)) raise ValueError('Invalid EPUB version. Should be 2 or 3') if epub_version is None: epub_version = parsed_article.publisher.epub_default #Handle directory output conflicts if os.path.isdir(output_directory): if batch: # No user prompt, default to protect previous data log.error('Directory conflict during batch conversion, skipping.') return False else: # User prompting openaccess_epub.utils.dir_exists(output_directory) else: try: os.makedirs(output_directory) except OSError as err: if err.errno != 17: log.exception('Unable to recursively create output directories') #Copy over the basic epub directory make_epub_base(output_directory) #Get the images, if possible, fail gracefully if not success = openaccess_epub.utils.images.get_images(output_directory, image_directory, input_path, config_module, parsed_article) if not success: log.critical('Images for the article were not located! Aborting!') return False #Instantiate Navigation and Package epub_nav = Navigation() epub_package = Package() #Process the article for navigation and package info epub_nav.process(parsed_article) epub_package.process(parsed_article) #Render the content using publisher-specific methods parsed_article.publisher.render_content(output_directory, epub_version) if epub_version == 2: epub_nav.render_EPUB2(output_directory) epub_package.render_EPUB2(output_directory) elif epub_version == 3: epub_nav.render_EPUB3(output_directory) epub_package.render_EPUB3(output_directory) #Zip the directory into EPUB epub_zip(output_directory) return True
<SYSTEM_TASK:> Creates the base structure for an EPUB file in a specified location. <END_TASK> <USER_TASK:> Description: def make_epub_base(location): """ Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built """
log.info('Making EPUB base files in {0}'.format(location)) with open(os.path.join(location, 'mimetype'), 'w') as out: # mimetype file out.write('application/epub+zip') #Create EPUB and META-INF directorys os.mkdir(os.path.join(location, 'META-INF')) os.mkdir(os.path.join(location, 'EPUB')) os.mkdir(os.path.join(location, 'EPUB', 'css')) with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out: out.write('''\ <?xml version="1.0" encoding="UTF-8"?> <container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container"> <rootfiles> <rootfile full-path="EPUB/package.opf" media-type="application/oebps-package+xml"/> </rootfiles> </container>''') with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out: out.write(bytes(DEFAULT_CSS, 'UTF-8'))
<SYSTEM_TASK:> Zips up the input file directory into an EPUB file. <END_TASK> <USER_TASK:> Description: def epub_zip(outdirect): """ Zips up the input file directory into an EPUB file. """
def recursive_zip(zipf, directory, folder=None): if folder is None: folder = '' for item in os.listdir(directory): if os.path.isfile(os.path.join(directory, item)): zipf.write(os.path.join(directory, item), os.path.join(directory, item)) elif os.path.isdir(os.path.join(directory, item)): recursive_zip(zipf, os.path.join(directory, item), os.path.join(folder, item)) log.info('Zipping up the directory {0}'.format(outdirect)) epub_filename = outdirect + '.epub' epub = zipfile.ZipFile(epub_filename, 'w') current_dir = os.getcwd() os.chdir(outdirect) epub.write('mimetype') log.info('Recursively zipping META-INF and EPUB') for item in os.listdir('.'): if item == 'mimetype': continue recursive_zip(epub, item) os.chdir(current_dir) epub.close()
<SYSTEM_TASK:> Produces a directory description. A directory description is a dictionary containing the following information. <END_TASK> <USER_TASK:> Description: def _input_directory_description(input_identifier, arg_item, input_dir): """ Produces a directory description. A directory description is a dictionary containing the following information. - 'path': An array containing the paths to the specified directories. - 'debugInfo': A field to possibly provide debug information. - 'found': A boolean that indicates, if the directory exists in the local filesystem. - 'listing': A listing that shows which files are in the given directory. This could be None. :param input_identifier: The input identifier in the cwl description file :param arg_item: The corresponding job information :param input_dir: TODO :return: A directory description :raise DirectoryError: If the given directory does not exist or is not a directory. """
description = { 'path': None, 'found': False, 'debugInfo': None, 'listing': None, 'basename': None } try: path = location(input_identifier, arg_item) if input_dir and not os.path.isabs(path): path = os.path.join(os.path.expanduser(input_dir), path) description['path'] = path if not os.path.exists(path): raise DirectoryError('path does not exist') if not os.path.isdir(path): raise DirectoryError('path is not a directory') description['listing'] = arg_item.get('listing') description['basename'] = os.path.basename(path) description['found'] = True except: description['debugInfo'] = exception_format() return description
<SYSTEM_TASK:> Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem. <END_TASK> <USER_TASK:> Description: def _check_input_directory_listing(base_directory, listing): """ Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem. :param base_directory: The path to the directory to check :param listing: A listing given as dictionary :raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in the listing. """
for sub in listing: path = os.path.join(base_directory, sub['basename']) if sub['class'] == 'File': if not os.path.isfile(path): raise DirectoryError('File \'{}\' not found but specified in listing.'.format(path)) if sub['class'] == 'Directory': if not os.path.isdir(path): raise DirectoryError('Directory \'{}\' not found but specified in listing'.format(path)) sub_listing = sub.get('listing') if sub_listing: _check_input_directory_listing(path, sub_listing)
<SYSTEM_TASK:> Parses cwl type information from a cwl type string. <END_TASK> <USER_TASK:> Description: def parse_cwl_type(cwl_type_string): """ Parses cwl type information from a cwl type string. Examples: - "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False} - "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True} :param cwl_type_string: The cwl type string to extract information from :return: A dictionary containing information about the parsed cwl type string """
is_optional = cwl_type_string.endswith('?') if is_optional: cwl_type_string = cwl_type_string[:-1] is_array = cwl_type_string.endswith('[]') if is_array: cwl_type_string = cwl_type_string[:-2] return {'type': cwl_type_string, 'isArray': is_array, 'isOptional': is_optional}
<SYSTEM_TASK:> Searches for Directories and in the cwl data and produces a dictionary containing input file information. <END_TASK> <USER_TASK:> Description: def cwl_input_directories(cwl_data, job_data, input_dir=None): """ Searches for Directories and in the cwl data and produces a dictionary containing input file information. :param cwl_data: The cwl data as dictionary :param job_data: The job data as dictionary :param input_dir: TODO :return: Returns the a dictionary containing information about input files. The keys of this dictionary are the input/output identifiers of the files specified in the cwl description. The corresponding value is a dictionary again with the following keys and values: - 'isOptional': A bool indicating whether this input directory is optional - 'isArray': A bool indicating whether this could be a list of directories - 'files': A list of input file descriptions A input file description is a dictionary containing the following information - 'path': The path to the specified directory - 'debugInfo': A field to possibly provide debug information """
results = {} for input_identifier, input_data in cwl_data['inputs'].items(): cwl_type = parse_cwl_type(input_data['type']) (is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type) if cwl_type == 'Directory': result = { 'isOptional': is_optional, 'isArray': is_array, 'directories': None } if input_identifier in job_data: arg = job_data[input_identifier] if is_array: result['directories'] = [_input_directory_description(input_identifier, i, input_dir) for i in arg] else: result['directories'] = [_input_directory_description(input_identifier, arg, input_dir)] results[input_identifier] = result return results
<SYSTEM_TASK:> Returns a dictionary containing information about the output files given in cwl_data. <END_TASK> <USER_TASK:> Description: def cwl_output_files(cwl_data, inputs_to_reference, output_dir=None): """ Returns a dictionary containing information about the output files given in cwl_data. :param cwl_data: The cwl data from where to extract the output file information. :param inputs_to_reference: Inputs which are used to resolve input references. :param output_dir: Path to the directory where output files are expected. :return: A dictionary containing information about every output file. """
results = {} for key, val in cwl_data['outputs'].items(): cwl_type = parse_cwl_type(val['type']) (is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type) if not cwl_type == 'File': continue result = { 'isOptional': is_optional, 'path': None, 'size': None, 'debugInfo': None } glob_path = os.path.expanduser(val['outputBinding']['glob']) if output_dir and not os.path.isabs(glob_path): glob_path = os.path.join(os.path.expanduser(output_dir), glob_path) glob_path = resolve_input_references(glob_path, inputs_to_reference) matches = glob(glob_path) try: if len(matches) != 1: raise FileError('glob path "{}" does not match exactly one file'.format(glob_path)) file_path = matches[0] result['path'] = file_path if not os.path.isfile(file_path): raise FileError('path is not a file') result['size'] = os.path.getsize(file_path) / (1024 * 1024) except: result['debugInfo'] = exception_format() results[key] = result return results
<SYSTEM_TASK:> Reads from the FIFO. <END_TASK> <USER_TASK:> Description: def read(self, length=-1): """ Reads from the FIFO. Reads as much data as possible from the FIFO up to the specified length. If the length argument is negative or ommited all data currently available in the FIFO will be read. If there is no data available in the FIFO an empty string is returned. Args: length: The amount of data to read from the FIFO. Defaults to -1. """
if 0 <= length < len(self): newpos = self.pos + length data = self.buf[self.pos:newpos] self.pos = newpos self.__discard() return data data = self.buf[self.pos:] self.clear() return data
<SYSTEM_TASK:> Reads data from the FIFO until a token is encountered. <END_TASK> <USER_TASK:> Description: def readuntil(self, token, size=0): """ Reads data from the FIFO until a token is encountered. If no token is encountered as much data is read from the FIFO as possible keeping in mind that the FIFO must retain enough data to perform matches for the token across writes. Args: token: The token to read until. size: The minimum amount of data that should be left in the FIFO. This is only used if it is greater than the length of the token. When ommited this value will default to the length of the token. Returns: A tuple of (found, data) where found is a boolean indicating whether the token was found, and data is all the data that could be read from the FIFO. Note: When a token is found the token is also read from the buffer and returned in the data. """
self.__append() i = self.buf.find(token, self.pos) if i < 0: index = max(len(token) - 1, size) newpos = max(len(self.buf) - index, self.pos) data = self.buf[self.pos:newpos] self.pos = newpos self.__discard() return False, data newpos = i + len(token) data = self.buf[self.pos:newpos] self.pos = newpos self.__discard() return True, data
<SYSTEM_TASK:> Peeks a line into the FIFO. <END_TASK> <USER_TASK:> Description: def peekline(self): """ Peeks a line into the FIFO. Perfroms the same function as readline() without removing data from the FIFO. See readline() for further information. """
self.__append() i = self.buf.find(self.eol, self.pos) if i < 0: return '' newpos = i + len(self.eol) return self.buf[self.pos:newpos]
<SYSTEM_TASK:> Peeks for token into the FIFO. <END_TASK> <USER_TASK:> Description: def peekuntil(self, token, size=0): """ Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information. """
self.__append() i = self.buf.find(token, self.pos) if i < 0: index = max(len(token) - 1, size) newpos = max(len(self.buf) - index, self.pos) return False, self.buf[self.pos:newpos] newpos = i + len(token) return True, self.buf[self.pos:newpos]
<SYSTEM_TASK:> Converts a sequence of page-partitioned revision documents into a sequence <END_TASK> <USER_TASK:> Description: def revdocs2reverts(rev_docs, radius=defaults.RADIUS, use_sha1=False, resort=False, verbose=False): """ Converts a sequence of page-partitioned revision documents into a sequence of reverts. :Params: rev_docs : `iterable` ( `dict` ) a page-partitioned sequence of revision documents radius : `int` The maximum number of revisions that a revert can reference. use_sha1 : `bool` Use the sha1 field as the checksum for comparison. resort : `bool` If True, re-sort the revisions of each page. verbose : `bool` Print dots and stuff """
page_rev_docs = groupby(rev_docs, lambda rd: rd.get('page')) for page_doc, rev_docs in page_rev_docs: if verbose: sys.stderr.write(page_doc.get('title') + ": ") sys.stderr.flush() if resort: if verbose: sys.stderr.write("(sorting) ") sys.stderr.flush() rev_docs = sorted( rev_docs, key=lambda r: (r.get('timestamp'), r.get('id'))) detector = Detector(radius=radius) for rev_doc in rev_docs: if not use_sha1 and 'text' not in rev_doc: logger.warn("Skipping {0}: 'text' field not found in {0}" .format(rev_doc['id'], rev_doc)) continue if use_sha1: checksum = rev_doc.get('sha1') or DummyChecksum() elif 'text' in rev_doc: text_bytes = bytes(rev_doc['text'], 'utf8', 'replace') checksum = hashlib.sha1(text_bytes).digest() revert = detector.process(checksum, rev_doc) if revert: yield revert.to_json() if verbose: sys.stderr.write("r") sys.stderr.flush() else: if verbose: sys.stderr.write(".") sys.stderr.flush() if verbose: sys.stderr.write("\n") sys.stderr.flush()
<SYSTEM_TASK:> SPM HRF function from sum of two gamma PDFs <END_TASK> <USER_TASK:> Description: def spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6, normalize=True, ): """ SPM HRF function from sum of two gamma PDFs This function is designed to be partially compatible with SPMs `spm_hrf.m` function. The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`). Parameters ---------- t : array-like vector of times at which to sample HRF peak_delay : float, optional delay of peak peak_disp : float, optional width (dispersion) of peak under_delay : float, optional delay of undershoot under_disp : float, optional width (dispersion) of undershoot p_u_ratio : float, optional peak to undershoot ratio. Undershoot divided by this value before subtracting from peak. normalize : {True, False}, optional If True, divide HRF values by their sum before returning. SPM does this by default. Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- See ``spm_hrf.m`` in the SPM distribution. """
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp] if v <= 0]): raise ValueError("delays and dispersions must be > 0") # gamma.pdf only defined for t > 0 hrf = np.zeros(t.shape, dtype=np.float) pos_t = t[t > 0] peak = sps.gamma.pdf(pos_t, peak_delay / peak_disp, loc=0, scale=peak_disp) undershoot = sps.gamma.pdf(pos_t, under_delay / under_disp, loc=0, scale=under_disp) hrf[t > 0] = peak - undershoot / p_u_ratio if not normalize: return hrf return hrf / np.max(hrf)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def dsort(fname, order, has_header=True, frow=0, ofname=None): r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """
ofname = fname if ofname is None else ofname obj = CsvFile(fname=fname, has_header=has_header, frow=frow) obj.dsort(order) obj.write(fname=ofname, header=has_header, append=False)
<SYSTEM_TASK:> Makes a Call to the LBRY API <END_TASK> <USER_TASK:> Description: def call(cls, method, params=None, timeout=600): """ Makes a Call to the LBRY API :param str method: Method to call from the LBRY API. See the full list of methods at https://lbryio.github.io/lbry/cli/ :param dict params: Parameters to give the method selected :param float timeout: The number of seconds to wait for a connection until we time out; 600 By Default. :raises LBRYException: If the request returns an error when calling the API :return: A Python `dict` object containing the data requested from the API :rtype: dict """
params = [] if params is None else params return cls.make_request(SERVER_ADDRESS, method, params, timeout=timeout)
<SYSTEM_TASK:> This method may be used to create a new document for writing as xml <END_TASK> <USER_TASK:> Description: def make_document(self, titlestring): """ This method may be used to create a new document for writing as xml to the OPS subdirectory of the ePub structure. """
#root = etree.XML('''<?xml version="1.0"?>\ #<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\ #<html xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml" xmlns:ops="http://www.idpf.org/2007/ops">\ #</html>''') root = etree.XML('''<?xml version="1.0"?>\ <!DOCTYPE html>\ <html xmlns="http://www.w3.org/1999/xhtml">\ </html>''') document = etree.ElementTree(root) html = document.getroot() head = etree.SubElement(html, 'head') etree.SubElement(html, 'body') title = etree.SubElement(head, 'title') title.text = titlestring #The href for the css stylesheet is a standin, can be overwritten etree.SubElement(head, 'link', {'href': 'css/default.css', 'rel': 'stylesheet', 'type': 'text/css'}) return document
<SYSTEM_TASK:> This function will write a document to an XML file. <END_TASK> <USER_TASK:> Description: def write_document(self, name, document): """ This function will write a document to an XML file. """
with open(name, 'wb') as out: out.write(etree.tostring(document, encoding='utf-8', pretty_print=True))
<SYSTEM_TASK:> Receives a date_tuple object, and outputs a string <END_TASK> <USER_TASK:> Description: def format_date_string(self, date_tuple): """ Receives a date_tuple object, and outputs a string for placement in the article content. """
months = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] date_string = '' if date_tuple.season: return '{0}, {1}'.format(date_tuple.season, date_tuple.year) else: if not date_tuple.month and not date_tuple.day: return '{0}'.format(date_tuple.year) if date_tuple.month: date_string += months[int(date_tuple.month)] if date_tuple.day: date_string += ' ' + date_tuple.day return ', '.join([date_string, date_tuple.year])
<SYSTEM_TASK:> Returns True if the article has out-of-flow tables, indicates separate <END_TASK> <USER_TASK:> Description: def has_out_of_flow_tables(self): """ Returns True if the article has out-of-flow tables, indicates separate tables document. This method is used to indicate whether rendering this article's content will result in the creation of out-of-flow HTML tables. This method has a base class implementation representing a common logic; if an article has a graphic(image) representation of a table then the HTML representation will be placed out-of-flow if it exists, if there is no graphic(image) represenation then the HTML representation will be placed in-flow. Returns ------- bool True if there are out-of-flow HTML tables, False otherwise """
if self.article.body is None: return False for table_wrap in self.article.body.findall('.//table-wrap'): graphic = table_wrap.xpath('./graphic | ./alternatives/graphic') table = table_wrap.xpath('./table | ./alternatives/table') if graphic and table: return True return False
<SYSTEM_TASK:> Ingests an Article to create navigation structures and parse global <END_TASK> <USER_TASK:> Description: def process(self, article): """ Ingests an Article to create navigation structures and parse global metadata. """
if self.article is not None and not self.collection: log.warning('Could not process additional article. Navigation only \ handles one article unless collection mode is set.') return False if article.publisher is None: log.error('''Navigation cannot be generated for an Article \ without a publisher!''') return self.article = article self.article_doi = self.article.doi.split('/')[1] self.all_dois.append(self.article.doi) if self.collection: pass else: self.title = self.article.publisher.nav_title() for author in self.article.publisher.nav_contributors(): self.contributors.add(author) #Analyze the structure of the article to create internal mapping self.map_navigation()
<SYSTEM_TASK:> This is a wrapper for depth-first recursive analysis of the article <END_TASK> <USER_TASK:> Description: def map_navigation(self): """ This is a wrapper for depth-first recursive analysis of the article """
#All articles should have titles title_id = 'titlepage-{0}'.format(self.article_doi) title_label = self.article.publisher.nav_title() title_source = 'main.{0}.xhtml#title'.format(self.article_doi) title_navpoint = navpoint(title_id, title_label, self.play_order, title_source, []) self.nav.append(title_navpoint) #When processing a collection of articles, we will want all subsequent #navpoints for this article to be located under the title if self.collection: nav_insertion = title_navpoint.children else: nav_insertion = self.nav #If the article has a body, we'll need to parse it for navigation if self.article.body is not None: #Here is where we invoke the recursive parsing! for nav_pt in self.recursive_article_navmap(self.article.body): nav_insertion.append(nav_pt) #Add a navpoint to the references if appropriate if self.article.root.xpath('./back/ref'): ref_id = 'references-{0}'.format(self.article_doi) ref_label = 'References' ref_source = 'biblio.{0}.xhtml#references'.format(self.article_doi) ref_navpoint = navpoint(ref_id, ref_label, self.play_order, ref_source, []) nav_insertion.append(ref_navpoint)
<SYSTEM_TASK:> This function recursively traverses the content of an input article to <END_TASK> <USER_TASK:> Description: def recursive_article_navmap(self, src_element, depth=0, first=True): """ This function recursively traverses the content of an input article to add the correct elements to the NCX file's navMap and Lists. """
if depth > self.nav_depth: self.nav_depth = depth navpoints = [] tagnames = ['sec', 'fig', 'table-wrap'] for child in src_element: try: tagname = child.tag except AttributeError: continue else: if tagname not in tagnames: continue #Safely handle missing id attributes if 'id' not in child.attrib: child.attrib['id'] = self.auto_id #If in collection mode, we'll prepend the article DOI to avoid #collisions if self.collection: child_id = '-'.join([self.article_doi, child.attrib['id']]) else: child_id = child.attrib['id'] #Attempt to infer the correct text as a label #Skip the element if we cannot child_title = child.find('title') if child_title is None: continue # If there is no immediate title, skip this element label = element_methods.all_text(child_title) if not label: continue # If no text in the title, skip this element source = 'main.{0}.xhtml#{1}'.format(self.article_doi, child.attrib['id']) if tagname == 'sec': children = self.recursive_article_navmap(child, depth=depth + 1) navpoints.append(navpoint(child_id, label, self.play_order, source, children)) #figs and table-wraps do not have children elif tagname == 'fig': # Add navpoints to list_of_figures self.figures_list.append(navpoint(child.attrib['id'], label, None, source, [])) elif tagname == 'table-wrap': # Add navpoints to list_of_tables self.tables_list.append(navpoint(child.attrib['id'], label, None, source, [])) return navpoints
<SYSTEM_TASK:> Function for creating neural time course models. <END_TASK> <USER_TASK:> Description: def funcNrlTcMotPred(idxPrc, varPixX, varPixY, NrlMdlChunk, varNumTP, aryBoxCar, # aryCond path, varNumNrlMdls, varNumMtDrctn, varPar, queOut): """ Function for creating neural time course models. This function should be used to create neural models if different predictors for every motion direction are included. """
# # if hd5 method is used: open file for reading # filename = 'aryBoxCar' + str(idxPrc) + '.hdf5' # hdf5_path = os.path.join(path, filename) # fileH = tables.openFile(hdf5_path, mode='r') # Output array with pRF model time courses at all modelled standard # deviations for current pixel position: aryOut = np.empty((len(NrlMdlChunk), varNumTP, varNumMtDrctn), dtype='float32') # Prepare status indicator if this is the first of the parallel processes: if idxPrc == 1: # We create a status indicator for the time consuming pRF model finding # algorithm. Number of steps of the status indicator: varStsStpSze = 20 # Number of pRF models to fit: varNumLoops = varNumNrlMdls/varPar # Vector with pRF values at which to give status feedback: vecStatus = np.linspace(0, varNumLoops, num=(varStsStpSze+1), endpoint=True) vecStatus = np.ceil(vecStatus) vecStatus = vecStatus.astype(int) # Vector with corresponding percentage values at which to give status # feedback: vecStatusPrc = np.linspace(0, 100, num=(varStsStpSze+1), endpoint=True) vecStatusPrc = np.ceil(vecStatusPrc) vecStatusPrc = vecStatusPrc.astype(int) # Counter for status indicator: varCntSts01 = 0 varCntSts02 = 0 # Loop through all Gauss parameters that are in this chunk for idx, NrlMdlTrpl in enumerate(NrlMdlChunk): # Status indicator (only used in the first of the parallel # processes): if idxPrc == 1: # Status indicator: if varCntSts02 == vecStatus[varCntSts01]: # Prepare status message: strStsMsg = ('---------Progress: ' + str(vecStatusPrc[varCntSts01]) + ' % --- ' + str(vecStatus[varCntSts01]) + ' loops out of ' + str(varNumLoops)) print(strStsMsg) # Only increment counter if the last value has not been # reached yet: if varCntSts01 < varStsStpSze: varCntSts01 = varCntSts01 + int(1) # x pos of Gauss model: NrlMdlTrpl[0] # y pos of Gauss model: NrlMdlTrpl[1] # std of Gauss model: NrlMdlTrpl[2] # index of tng crv model: NrlMdlTrpl[3] varTmpX = int(np.around(NrlMdlTrpl[0], 0)) varTmpY = int(np.around(NrlMdlTrpl[1], 0)) # Create pRF model (2D): aryGauss = funcGauss2D(varPixX, varPixY, varTmpX, varTmpY, NrlMdlTrpl[2]) # Multiply pixel-wise box car model with Gaussian pRF models: aryNrlTcTmp = np.multiply(aryBoxCar, aryGauss[:, :, None, None]) # Calculate sum across x- and y-dimensions - the 'area under the # Gaussian surface'. This is essentially an unscaled version of the # neural time course model (i.e. not yet scaled for the size of # the pRF). aryNrlTcTmp = np.sum(aryNrlTcTmp, axis=(0, 1)) # Normalise the nrl time course model to the size of the pRF. This # gives us the ratio of 'activation' of the pRF at each time point, # or, in other words, the neural time course model. aryNrlTcTmp = np.divide(aryNrlTcTmp, np.sum(aryGauss, axis=(0, 1))) # Put model time courses into the function's output array: aryOut[idx, :, :] = aryNrlTcTmp # Status indicator (only used in the first of the parallel # processes): if idxPrc == 1: # Increment status indicator counter: varCntSts02 = varCntSts02 + 1 # Output list: lstOut = [idxPrc, aryOut, ] queOut.put(lstOut)
<SYSTEM_TASK:> Function for finding best pRF model for voxel time course. <END_TASK> <USER_TASK:> Description: def funcFindPrf(idxPrc, aryFuncChnk, aryPrfTc, aryMdls, queOut): """ Function for finding best pRF model for voxel time course. This function should be used if there is only one predictor. """
# Number of voxels to be fitted in this chunk: varNumVoxChnk = aryFuncChnk.shape[0] # Number of volumes: varNumVol = aryFuncChnk.shape[1] # Vectors for pRF finding results [number-of-voxels times one]: vecBstXpos = np.zeros(varNumVoxChnk) vecBstYpos = np.zeros(varNumVoxChnk) vecBstSd = np.zeros(varNumVoxChnk) # vecBstR2 = np.zeros(varNumVoxChnk) # Vector for best R-square value. For each model fit, the R-square value is # compared to this, and updated if it is lower than the best-fitting # solution so far. We initialise with an arbitrary, high value vecBstRes = np.add(np.zeros(varNumVoxChnk), 100000.0) # We reshape the voxel time courses, so that time goes down the column, # i.e. from top to bottom. aryFuncChnk = aryFuncChnk.T # Constant term for the model: vecConst = np.ones((varNumVol), dtype=np.float32) # Change type to float 32: aryFuncChnk = aryFuncChnk.astype(np.float32) aryPrfTc = aryPrfTc.astype(np.float32) # Number of pRF models to fit: varNumMdls = len(aryMdls) # Prepare status indicator if this is the first of the parallel processes: if idxPrc == 0: # We create a status indicator for the time consuming pRF model finding # algorithm. Number of steps of the status indicator: varStsStpSze = 20 # Vector with pRF values at which to give status feedback: vecStatPrf = np.linspace(0, varNumMdls, num=(varStsStpSze+1), endpoint=True) vecStatPrf = np.ceil(vecStatPrf) vecStatPrf = vecStatPrf.astype(int) # Vector with corresponding percentage values at which to give status # feedback: vecStatPrc = np.linspace(0, 100, num=(varStsStpSze+1), endpoint=True) vecStatPrc = np.ceil(vecStatPrc) vecStatPrc = vecStatPrc.astype(int) # Counter for status indicator: varCntSts01 = 0 varCntSts02 = 0 # Loop through pRF models: for idxMdls in range(0, varNumMdls): # Status indicator (only used in the first of the parallel # processes): if idxPrc == 0: # Status indicator: if varCntSts02 == vecStatPrf[varCntSts01]: # Prepare status message: strStsMsg = ('---------Progress: ' + str(vecStatPrc[varCntSts01]) + ' % --- ' + str(vecStatPrf[varCntSts01]) + ' pRF models out of ' + str(varNumMdls)) print(strStsMsg) # Only increment counter if the last value has not been # reached yet: if varCntSts01 < varStsStpSze: varCntSts01 = varCntSts01 + int(1) # Current pRF time course model: vecMdlTc = aryPrfTc[idxMdls, :].flatten() # We create a design matrix including the current pRF time # course model, and a constant term: aryDsgn = np.vstack([vecMdlTc, vecConst]).T # Calculation of the ratio of the explained variance (R square) # for the current model for all voxel time courses. # print('------------np.linalg.lstsq on pRF: ' + # str(idxX) + # 'x ' + # str(idxY) + # 'y ' + # str(idxSd) + # 'z --- START') # varTmeTmp01 = time.time() # Change type to float32: # aryDsgn = aryDsgn.astype(np.float32) # Calculate the least-squares solution for all voxels: vecTmpRes = np.linalg.lstsq(aryDsgn, aryFuncChnk)[1] # varTmeTmp02 = time.time() # varTmeTmp03 = np.around((varTmeTmp02 - varTmeTmp01), # decimals=2) # print('------------np.linalg.lstsq on pRF: ' + # str(idxX) + # 'x ' + # str(idxY) + # 'y ' + # str(idxSd) + # 'z --- DONE elapsed time: ' + # str(varTmeTmp03) + # 's') # Check whether current residuals are lower than previously # calculated ones: vecLgcTmpRes = np.less(vecTmpRes, vecBstRes) # Replace best x and y position values, and SD values. vecBstXpos[vecLgcTmpRes] = aryMdls[idxMdls][0] vecBstYpos[vecLgcTmpRes] = aryMdls[idxMdls][1] vecBstSd[vecLgcTmpRes] = aryMdls[idxMdls][2] # Replace best residual values: vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes] # varTmeTmp04 = time.time() # varTmeTmp05 = np.around((varTmeTmp04 - varTmeTmp02), # decimals=2) # print('------------selection of best-fitting pRF model: ' + # str(idxX) + # 'x ' + # str(idxY) + # 'y ' + # str(idxSd) + # 'z --- elapsed time: ' + # str(varTmeTmp05) + # 's') # Status indicator (only used in the first of the parallel # processes): if idxPrc == 0: # Increment status indicator counter: varCntSts02 = varCntSts02 + 1 # After finding the best fitting model for each voxel, we still have to # calculate the coefficient of determination (R-squared) for each voxel. We # start by calculating the total sum of squares (i.e. the deviation of the # data from the mean). The mean of each time course: vecFuncMean = np.mean(aryFuncChnk, axis=0) # Deviation from the mean for each datapoint: vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :]) # Sum of squares: vecSsTot = np.sum(np.power(vecFuncDev, 2.0), axis=0) # Coefficient of determination: vecBstR2 = np.subtract(1.0, np.divide(vecBstRes, vecSsTot)) # Output list: lstOut = [idxPrc, vecBstXpos, vecBstYpos, vecBstSd, vecBstR2] queOut.put(lstOut)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def concatenate( fname1, fname2, dfilter1=None, dfilter2=None, has_header1=True, has_header2=True, frow1=0, frow2=0, ofname=None, ocols=None, ): r""" Concatenate two comma-separated values file. Data rows from the second file are appended at the end of the data rows from the first file :param fname1: Name of the first comma-separated values file, the file whose data appears first in the output file :type fname1: FileNameExists_ :param fname2: Name of the second comma-separated values file, the file whose data appears last in the output file :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the first file. If None no data filtering is done on the file :type dfilter1: :ref:`CsvDataFilter` or None :param dfilter2: Row and/or column filter for the second file. If None no data filtering is done on the file :type dfilter2: :ref:`CsvDataFilter` or None :param has_header1: Flag that indicates whether the first comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the second comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: First comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Second comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the data from the first and second files. If None the first file is replaced "in place" :type ofname: FileName_ or None :param ocols: Column names of the output comma-separated values file. If None the column names in the first file are used if **has_header1** is True or the column names in the second files are used if **has_header1** is False and **has_header2** is True, otherwise no header is used :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. pcsv.concatenate.concatenate :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Files have different number of columns) * RuntimeError (Invalid column specification) * RuntimeError (Number of columns in data files and output columns are different) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """
# pylint: disable=R0913,R0914 iro = pexdoc.exh.addex(RuntimeError, "Files have different number of columns") iom = pexdoc.exh.addex( RuntimeError, "Number of columns in data files and output columns are different" ) # Read and validate file 1 obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1) # Read and validate file 2 obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2) # Assign output data structure ofname = fname1 if ofname is None else ofname # Create new header if (ocols is None) and has_header1: ocols = [obj1.header()] if obj1.cfilter is None else [obj1.cfilter] elif (ocols is None) and has_header2: ocols = [obj2.header()] if obj2.cfilter is None else [obj2.cfilter] elif ocols is None: ocols = [] else: iom((obj1.cfilter is not None) and (len(obj1.cfilter) != len(ocols))) ocols = [ocols] # Miscellaneous data validation iro(_C(obj1.cfilter, obj2.cfilter) and (len(obj1.cfilter) != len(obj2.cfilter))) # Write final output data = ocols + obj1.data(filtered=True) + obj2.data(filtered=True) write(fname=ofname, data=data, append=False)
<SYSTEM_TASK:> Handles the movement of images to the cache. Must be helpful if it finds <END_TASK> <USER_TASK:> Description: def move_images_to_cache(source, destination): """ Handles the movement of images to the cache. Must be helpful if it finds that the folder for this article already exists. """
if os.path.isdir(destination): log.debug('Cached images for this article already exist') return else: log.debug('Cache location: {0}'.format(destination)) try: shutil.copytree(source, destination) except: log.exception('Images could not be moved to cache') else: log.info('Moved images to cache'.format(destination))
<SYSTEM_TASK:> The method used to handle an explicitly defined image directory by the <END_TASK> <USER_TASK:> Description: def explicit_images(images, image_destination, rootname, config): """ The method used to handle an explicitly defined image directory by the user as a parsed argument. """
log.info('Explicit image directory specified: {0}'.format(images)) if '*' in images: images = images.replace('*', rootname) log.debug('Wildcard expansion for image directory: {0}'.format(images)) try: shutil.copytree(images, image_destination) except: #The following is basically a recipe for log.exception() but with a #CRITICAL level if the execution should be killed immediately #log.critical('Unable to copy from indicated directory', exc_info=True) log.exception('Unable to copy from indicated directory') return False else: return True
<SYSTEM_TASK:> The method used to handle Input-Relative image inclusion. <END_TASK> <USER_TASK:> Description: def input_relative_images(input_path, image_destination, rootname, config): """ The method used to handle Input-Relative image inclusion. """
log.debug('Looking for input relative images') input_dirname = os.path.dirname(input_path) for path in config.input_relative_images: if '*' in path: path = path.replace('*', rootname) log.debug('Wildcard expansion for image directory: {0}'.format(path)) images = os.path.normpath(os.path.join(input_dirname, path)) if os.path.isdir(images): log.info('Input-Relative image directory found: {0}'.format(images)) shutil.copytree(images, image_destination) return True return False
<SYSTEM_TASK:> Main logic controller for the placement of images into the output directory <END_TASK> <USER_TASK:> Description: def get_images(output_directory, explicit, input_path, config, parsed_article): """ Main logic controller for the placement of images into the output directory Controlling logic for placement of the appropriate imager files into the EPUB directory. This function interacts with interface arguments as well as the local installation config.py file. These may change behavior of this function in terms of how it looks for images relative to the input, where it finds explicit images, whether it will attempt to download images, and whether successfully downloaded images will be stored in the cache. Parameters ---------- output_directory : str The directory path where the EPUB is being constructed/output explicit : str A directory path to a user specified directory of images. Allows * wildcard expansion. input_path : str The absolute path to the input XML file. config : config module The imported configuration module parsed_article : openaccess_epub.article.Article object The Article instance for the article being converted to EPUB """
#Split the DOI journal_doi, article_doi = parsed_article.doi.split('/') log.debug('journal-doi : {0}'.format(journal_doi)) log.debug('article-doi : {0}'.format(article_doi)) #Get the rootname for wildcard expansion rootname = utils.file_root_name(input_path) #Specify where to place the images in the output img_dir = os.path.join(output_directory, 'EPUB', 'images-{0}'.format(article_doi)) log.info('Using {0} as image directory target'.format(img_dir)) #Construct path to cache for article article_cache = os.path.join(config.image_cache, journal_doi, article_doi) #Use manual image directory, explicit images if explicit: success = explicit_images(explicit, img_dir, rootname, config) if success and config.use_image_cache: move_images_to_cache(img_dir, article_cache) #Explicit images prevents all other image methods return success #Input-Relative import, looks for any one of the listed options if config.use_input_relative_images: #Prevents other image methods only if successful if input_relative_images(input_path, img_dir, rootname, config): if config.use_image_cache: move_images_to_cache(img_dir, article_cache) return True #Use cache for article if it exists if config.use_image_cache: #Prevents other image methods only if successful if image_cache(article_cache, img_dir): return True #Download images from Internet if config.use_image_fetching: os.mkdir(img_dir) if journal_doi == '10.3389': fetch_frontiers_images(article_doi, img_dir) if config.use_image_cache: move_images_to_cache(img_dir, article_cache) return True elif journal_doi == '10.1371': success = fetch_plos_images(article_doi, img_dir, parsed_article) if success and config.use_image_cache: move_images_to_cache(img_dir, article_cache) return success else: log.error('Fetching images for this publisher is not supported!') return False return False
<SYSTEM_TASK:> Initiates the image cache if it does not exist <END_TASK> <USER_TASK:> Description: def make_image_cache(img_cache): """ Initiates the image cache if it does not exist """
log.info('Initiating the image cache at {0}'.format(img_cache)) if not os.path.isdir(img_cache): utils.mkdir_p(img_cache) utils.mkdir_p(os.path.join(img_cache, '10.1371')) utils.mkdir_p(os.path.join(img_cache, '10.3389'))
<SYSTEM_TASK:> Fetch the images for a PLoS article from the internet. <END_TASK> <USER_TASK:> Description: def fetch_plos_images(article_doi, output_dir, document): """ Fetch the images for a PLoS article from the internet. PLoS images are known through the inspection of <graphic> and <inline-graphic> elements. The information in these tags are then parsed into appropriate URLs for downloading. """
log.info('Processing images for {0}...'.format(article_doi)) #A dict of URLs for PLoS subjournals journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}', 'pcbi': 'http://www.ploscompbiol.org/article/{0}', 'ppat': 'http://www.plospathogens.org/article/{0}', 'pntd': 'http://www.plosntds.org/article/{0}', 'pmed': 'http://www.plosmedicine.org/article/{0}', 'pbio': 'http://www.plosbiology.org/article/{0}', 'pone': 'http://www.plosone.org/article/{0}', 'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'} #Identify subjournal name for base URL subjournal_name = article_doi.split('.')[1] base_url = journal_urls[subjournal_name] #Acquire <graphic> and <inline-graphic> xml elements graphics = document.document.getroot().findall('.//graphic') graphics += document.document.getroot().findall('.//inline-graphic') #Begin to download log.info('Downloading images, this may take some time...') for graphic in graphics: nsmap = document.document.getroot().nsmap xlink_href = graphic.attrib['{' + nsmap['xlink'] + '}' + 'href'] #Equations are handled a bit differently than the others #Here we decide that an image name starting with "e" is an equation if xlink_href.split('.')[-1].startswith('e'): resource = 'fetchObject.action?uri=' + xlink_href + '&representation=PNG' else: resource = xlink_href + '/largerimage' full_url = base_url.format(resource) try: image = urllib.request.urlopen(full_url) except urllib.error.HTTPError as e: if e.code == 503: # Server overload error time.sleep(1) # Wait a second try: image = urllib.request.urlopen(full_url) except: return False # Happened twice, give up else: log.error('urllib.error.HTTPError {0}'.format(e.code)) return False else: img_name = xlink_href.split('.')[-1] + '.png' img_path = os.path.join(output_dir, img_name) with open(img_path, 'wb') as output: output.write(image.read()) log.info('Downloaded image {0}'.format(img_name)) log.info('Done downloading images') return True
<SYSTEM_TASK:> Return the dict element whose numeric key is closest to a target. <END_TASK> <USER_TASK:> Description: def get_nearest_by_numeric_key(data: dict, key: int) -> Any: """Return the dict element whose numeric key is closest to a target."""
return data.get(key, data[min(data.keys(), key=lambda k: abs(k - key))])
<SYSTEM_TASK:> Create a blueprint resource route from a decorated function. <END_TASK> <USER_TASK:> Description: def resource(self, uri, methods=frozenset({'GET'}), host=None, strict_slashes=None, stream=False, version=None, name=None, **kwargs): """ Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. """
if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): self.resources.append(( FutureRoute(handler, uri, methods, host, strict_slashes, stream, version, name), kwargs)) return handler return decorator
<SYSTEM_TASK:> Create a blueprint resource route from a function. <END_TASK> <USER_TASK:> Description: def add_resource(self, handler, uri, methods=frozenset({'GET'}), host=None, strict_slashes=None, version=None, name=None, **kwargs): """ Create a blueprint resource route from a function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. """
self.resource(uri=uri, methods=methods, host=host, strict_slashes=strict_slashes, version=version, name=name, **kwargs)(handler)
<SYSTEM_TASK:> Returns the connection to the database using the settings. <END_TASK> <USER_TASK:> Description: def _get_db(): """ Returns the connection to the database using the settings. This function should not be called outside of this file. Use db instead. """
from .settings import settings mongo = settings.MONGODB if 'URI' in mongo and mongo['URI']: uri = mongo['URI'] else: uri = 'mongodb://' if all(mongo.get(key) for key in ('USERNAME', 'PASSWORD')): uri += '{0}:{1}@'.format(mongo['USERNAME'], mongo['PASSWORD']) if 'HOSTS' in mongo and mongo['HOSTS']: uri += ','.join( '{0}:{1}'.format(host, port) for (host, port) in zip(mongo['HOSTS'], mongo['PORTS']), ) else: uri += '{0}:{1}'.format(mongo['HOST'], mongo.get('PORT', 27017)) uri += '/' + mongo['DATABASE'] if 'OPTIONS' in mongo and mongo['OPTIONS']: uri += '?{0}'.format('&'.join(mongo['OPTIONS'])) client = ConnectionFailureProxy(MongoClient(uri, connect=False)) database = client[parse_uri(uri)['database']] return database
<SYSTEM_TASK:> Attempts to resolve a PLoS DOI into a URL path to the XML file. <END_TASK> <USER_TASK:> Description: def plos_doi_to_xmlurl(doi_string): """ Attempts to resolve a PLoS DOI into a URL path to the XML file. """
#Create URL to request DOI resolution from http://dx.doi.org doi_url = 'http://dx.doi.org/{0}'.format(doi_string) log.debug('DOI URL: {0}'.format(doi_url)) #Open the page, follow the redirect try: resolved_page = urllib.request.urlopen(doi_url) except urllib.error.URLError as err: print('Unable to resolve DOI URL, or could not connect') raise err else: #Given the redirection, attempt to shape new request for PLoS servers resolved_address = resolved_page.geturl() log.debug('DOI resolved to {0}'.format(resolved_address)) parsed = urllib.parse.urlparse(resolved_address) xml_url = '{0}://{1}'.format(parsed.scheme, parsed.netloc) xml_url += '/article/fetchObjectAttachment.action?uri=' xml_path = parsed.path.replace(':', '%3A').replace('/', '%2F') xml_path = xml_path.split('article%2F')[1] xml_url += '{0}{1}'.format(xml_path, '&representation=XML') log.debug('Shaped PLoS request for XML {0}'.format(xml_url)) #Return this url to the calling function return xml_url
<SYSTEM_TASK:> This method accepts a DOI string and attempts to download the appropriate <END_TASK> <USER_TASK:> Description: def doi_input(doi_string, download=True): """ This method accepts a DOI string and attempts to download the appropriate xml file. If successful, it returns a path to that file. As with all URL input types, the success of this method depends on supporting per-publisher conventions and will fail on unsupported publishers """
log.debug('DOI Input - {0}'.format(doi_string)) doi_string = doi_string[4:] if '10.1371' in doi_string: # Corresponds to PLoS log.debug('DOI string shows PLoS') xml_url = plos_doi_to_xmlurl(doi_string) else: log.critical('DOI input for this publisher is not supported') sys.exit('This publisher is not yet supported by OpenAccess_EPUB') return url_input(xml_url, download)
<SYSTEM_TASK:> This method expects a direct URL link to an xml file. It will apply no <END_TASK> <USER_TASK:> Description: def url_input(url_string, download=True): """ This method expects a direct URL link to an xml file. It will apply no modifications to the received URL string, so ensure good input. """
log.debug('URL Input - {0}'.format(url_string)) try: open_xml = urllib.request.urlopen(url_string) except urllib.error.URLError as err: print('utils.input.url_input received a bad URL, or could not connect') raise err else: #Employ a quick check on the mimetype of the link if not open_xml.headers['Content-Type'] == 'text/xml': sys.exit('URL request does not appear to be XML') filename = open_xml.headers['Content-Disposition'].split('\"')[1] if download: with open(filename, 'wb') as xml_file: xml_file.write(open_xml.read()) return openaccess_epub.utils.file_root_name(filename)
<SYSTEM_TASK:> This method provides support for Frontiers production using base zipfiles <END_TASK> <USER_TASK:> Description: def frontiersZipInput(zip_path, output_prefix, download=None): """ This method provides support for Frontiers production using base zipfiles as the input for ePub creation. It expects a valid pathname for one of the two zipfiles, and that both zipfiles are present in the same directory. """
log.debug('frontiersZipInput called') #If there is a problem with the input, it should clearly describe the issue pathname, pathext = os.path.splitext(zip_path) path, name = os.path.split(pathname) if not pathext == '.zip': # Checks for a path to zipfile log.error('Pathname provided does not end with .zip') print('Invalid file path: Does not have a zip extension.') sys.exit(1) #Construct the pair of zipfile pathnames file_root = name.split('-r')[0] zipname1 = "{0}-r{1}.zip".format(file_root, '1') zipname2 = "{0}-r{1}.zip".format(file_root, '2') #Construct the pathnames for output output = os.path.join(output_prefix, file_root) if os.path.isdir(output): shutil.rmtree(output) # Delete previous output output_meta = os.path.join(output, 'META-INF') images_output = os.path.join(output, 'EPUB', 'images') with zipfile.ZipFile(os.path.join(path, zipname1), 'r') as xml_zip: zip_dir = '{0}-r1'.format(file_root) xml = '/'.join([zip_dir, '{0}.xml'.format(file_root)]) try: xml_zip.extract(xml) except KeyError: log.critical('There is no item {0} in the zipfile'.format(xml)) sys.exit('There is no item {0} in the zipfile'.format(xml)) else: if not os.path.isdir(output_meta): os.makedirs(output_meta) shutil.copy(xml, os.path.join(output_meta)) os.remove(xml) os.rmdir(zip_dir) with zipfile.ZipFile(os.path.join(path, zipname2), 'r') as image_zip: zip_dir = '{0}-r2'.format(file_root) for i in image_zip.namelist(): if 'image_m' in i: image_zip.extract(i) if not os.path.isdir(images_output): os.makedirs(images_output) unzipped_images = os.path.join(zip_dir, 'images', 'image_m') for i in os.listdir(unzipped_images): shutil.copy(os.path.join(unzipped_images, i), images_output) shutil.rmtree(zip_dir) return file_root
<SYSTEM_TASK:> Raises an RedValidationError, if the given listing does not comply with cwl_job_listing_schema. <END_TASK> <USER_TASK:> Description: def _red_listing_validation(key, listing): """ Raises an RedValidationError, if the given listing does not comply with cwl_job_listing_schema. If listing is None or an empty list, no exception is thrown. :param key: The input key to build an error message if needed. :param listing: The listing to validate :raise RedValidationError: If the given listing does not comply with cwl_job_listing_schema """
if listing: try: jsonschema.validate(listing, cwl_job_listing_schema) except ValidationError as e: raise RedValidationError('REDFILE listing of input "{}" does not comply with jsonschema: {}' .format(key, e.context))
<SYSTEM_TASK:> Returns a list of mounting connectors <END_TASK> <USER_TASK:> Description: def red_get_mount_connectors(red_data, ignore_outputs): """ Returns a list of mounting connectors :param red_data: The red data to be searched :param ignore_outputs: If outputs should be ignored :return: A list of connectors with active mount option. """
keys = [] batches = red_data.get('batches') inputs = red_data.get('inputs') if batches: for batch in batches: keys.extend(red_get_mount_connectors_from_inputs(batch['inputs'])) elif inputs: keys.extend(red_get_mount_connectors_from_inputs(inputs)) if not ignore_outputs: outputs = red_data.get('outputs') if batches: for batch in batches: batch_outputs = batch.get('outputs') if batch_outputs: keys.extend(red_get_mount_connectors_from_outputs(batch_outputs)) elif outputs: keys.extend(red_get_mount_connectors_from_outputs(outputs)) return keys
<SYSTEM_TASK:> Invokes the cleanup functions for all inputs. <END_TASK> <USER_TASK:> Description: def cleanup(connector_manager, red_data, tmp_dir): """ Invokes the cleanup functions for all inputs. """
for key, arg in red_data['inputs'].items(): val = arg if isinstance(arg, list): for index, i in enumerate(arg): if not isinstance(i, dict): continue # connector_class should be one of 'File' or 'Directory' connector_class = i['class'] input_key = '{}_{}'.format(key, index) path = os.path.join(tmp_dir, input_key) connector_data = i['connector'] internal = {URL_SCHEME_IDENTIFIER: path} if connector_class == 'File': connector_manager.receive_cleanup(connector_data, input_key, internal) elif connector_class == 'Directory': connector_manager.receive_directory_cleanup(connector_data, input_key, internal) elif isinstance(arg, dict): # connector_class should be one of 'File' or 'Directory' connector_class = arg['class'] input_key = key path = os.path.join(tmp_dir, input_key) connector_data = val['connector'] internal = {URL_SCHEME_IDENTIFIER: path} if connector_class == 'File': connector_manager.receive_cleanup(connector_data, input_key, internal) elif connector_class == 'Directory': connector_manager.receive_directory_cleanup(connector_data, input_key, internal) try: os.rmdir(tmp_dir) except (OSError, FileNotFoundError): # Maybe, raise a warning here, because not all connectors have cleaned up their contents correctly. pass
<SYSTEM_TASK:> Executes a connector by executing the given connector_command. The content of args will be the content of the <END_TASK> <USER_TASK:> Description: def _execute_connector(connector_command, top_level_argument, *file_contents, listing=None): """ Executes a connector by executing the given connector_command. The content of args will be the content of the files handed to the connector cli. :param connector_command: The connector command to execute. :param top_level_argument: The top level command line argument for the connector cli. (Like 'receive' or 'send_validate') :param file_contents: A dict of information handed over to the connector cli. :param listing: A listing to provide to the connector cli. Will be ignored if None. :return: A tuple containing the return code of the connector and the stderr of the command as str. """
# create temp_files for every file_content temp_files = [] for file_content in file_contents: if file_content is None: continue tmp_file = tempfile.NamedTemporaryFile('w') json.dump(file_content, tmp_file) tmp_file.flush() temp_files.append(tmp_file) tmp_listing_file = None if listing: tmp_listing_file = tempfile.NamedTemporaryFile('w') json.dump(listing, tmp_listing_file) tmp_listing_file.flush() command = [connector_command, top_level_argument] command.extend([t.name for t in temp_files]) if tmp_listing_file: command.append('--listing {}'.format(tmp_listing_file.name)) result = execute(' '.join(command)) # close temp_files for temp_file in temp_files: temp_file.close() if tmp_listing_file: tmp_listing_file.close() return result['returnCode'], result['stdErr']
<SYSTEM_TASK:> Checks if a given listing is present under the given directory path. <END_TASK> <USER_TASK:> Description: def directory_listing_content_check(directory_path, listing): """ Checks if a given listing is present under the given directory path. :param directory_path: The path to the base directory :param listing: The listing to check :return: None if no errors could be found, otherwise a string describing the error """
if listing: for sub in listing: path = os.path.join(directory_path, sub['basename']) if sub['class'] == 'File': if not os.path.isfile(path): return 'listing contains "{}" but this file could not be found on disk.'.format(path) elif sub['class'] == 'Directory': if not os.path.isdir(path): return 'listing contains "{}" but this directory could not be found on disk'.format(path) listing = sub.get('listing') if listing: return ConnectorManager.directory_listing_content_check(path, listing) return None
<SYSTEM_TASK:> Determines sufficient GPUs for the given requirements and returns a list of GPUDevices. <END_TASK> <USER_TASK:> Description: def match_gpus(available_devices, requirements): """ Determines sufficient GPUs for the given requirements and returns a list of GPUDevices. If there aren't sufficient GPUs a InsufficientGPUException is thrown. :param available_devices: A list of GPUDevices :param requirements: A list of GPURequirements :return: A list of sufficient devices """
if not requirements: return [] if not available_devices: raise InsufficientGPUError("No GPU devices available, but {} devices required.".format(len(requirements))) available_devices = available_devices.copy() used_devices = [] for req in requirements: dev = search_device(req, available_devices) if dev: used_devices.append(dev) available_devices.remove(dev) else: raise InsufficientGPUError("Not all GPU requirements could be fulfilled.") return used_devices
<SYSTEM_TASK:> Extracts the GPU from a dictionary requirements as list of GPURequirements. <END_TASK> <USER_TASK:> Description: def get_gpu_requirements(gpus_reqs): """ Extracts the GPU from a dictionary requirements as list of GPURequirements. :param gpus_reqs: A dictionary {'count': <count>} or a list [{min_vram: <min_vram>}, {min_vram: <min_vram>}, ...] :return: A list of GPURequirements """
requirements = [] if gpus_reqs: if type(gpus_reqs) is dict: count = gpus_reqs.get('count') if count: for i in range(count): requirements.append(GPURequirement()) elif type(gpus_reqs) is list: for gpu_req in gpus_reqs: requirements.append(GPURequirement(min_vram=gpu_req['minVram'])) return requirements else: # If no requirements are supplied return []
<SYSTEM_TASK:> Updates a dictionary containing environment variables to setup Nvidia-GPUs. <END_TASK> <USER_TASK:> Description: def set_nvidia_environment_variables(environment, gpu_ids): """ Updates a dictionary containing environment variables to setup Nvidia-GPUs. :param environment: The environment variables to update :param gpu_ids: A list of GPU ids """
if gpu_ids: nvidia_visible_devices = "" for gpu_id in gpu_ids: nvidia_visible_devices += "{},".format(gpu_id) environment["NVIDIA_VISIBLE_DEVICES"] = nvidia_visible_devices
<SYSTEM_TASK:> Returns whether the device is sufficient for this requirement. <END_TASK> <USER_TASK:> Description: def is_sufficient(self, device): """ Returns whether the device is sufficient for this requirement. :param device: A GPUDevice instance. :type device: GPUDevice :return: True if the requirement is fulfilled otherwise False """
sufficient = True if (self.min_vram is not None) and (device.vram < self.min_vram): sufficient = False return sufficient
<SYSTEM_TASK:> This function receives two strings representing system paths. The first is <END_TASK> <USER_TASK:> Description: def evaluate_relative_path(working=os.getcwd(), relative=''): """ This function receives two strings representing system paths. The first is the working directory and it should be an absolute path. The second is the relative path and it should not be absolute. This function will render an OS-appropriate absolute path, which is the normalized path from working to relative. """
return os.path.normpath(os.path.join(working, relative))
<SYSTEM_TASK:> This function will return an appropriate absolute path for the path it is <END_TASK> <USER_TASK:> Description: def get_absolute_path(some_path): """ This function will return an appropriate absolute path for the path it is given. If the input is absolute, it will return unmodified; if the input is relative, it will be rendered as relative to the current working directory. """
if os.path.isabs(some_path): return some_path else: return evaluate_relative_path(os.getcwd(), some_path)
<SYSTEM_TASK:> Returns the root name of a file from a full file path. <END_TASK> <USER_TASK:> Description: def file_root_name(name): """ Returns the root name of a file from a full file path. It will not raise an error if the result is empty, but an warning will be issued. """
base = os.path.basename(name) root = os.path.splitext(base)[0] if not root: warning = 'file_root_name returned an empty root name from \"{0}\"' log.warning(warning.format(name)) return root
<SYSTEM_TASK:> Generator function that will iterate over all files in the specified <END_TASK> <USER_TASK:> Description: def files_with_ext(extension, directory='.', recursive=False): """ Generator function that will iterate over all files in the specified directory and return a path to the files which possess a matching extension. You should include the period in your extension, and matching is not case sensitive: '.xml' will also match '.XML' and vice versa. An empty string passed to extension will match extensionless files. """
if recursive: log.info('Recursively searching {0} for files with extension "{1}"'.format(directory, extension)) for dirname, subdirnames, filenames in os.walk(directory): for filename in filenames: filepath = os.path.join(dirname, filename) _root, ext = os.path.splitext(filepath) if extension.lower() == ext.lower(): yield filepath else: log.info('Looking in {0} for files with extension: "{1}"'.format(directory, extension)) for name in os.listdir(directory): filepath = os.path.join(directory, name) if not os.path.isfile(filepath): # Skip non-files continue _root, ext = os.path.splitext(filepath) if extension.lower() == ext.lower(): yield filepath
<SYSTEM_TASK:> This method takes the name of an epub file as an argument. This name is <END_TASK> <USER_TASK:> Description: def epubcheck(epubname, config=None): """ This method takes the name of an epub file as an argument. This name is the input for the java execution of a locally installed epubcheck-.jar. The location of this .jar file is configured in config.py. """
if config is None: config = load_config_module() r, e = os.path.splitext(epubname) if not e: log.warning('Missing file extension, appending ".epub"') e = '.epub' epubname = r + e elif not e == '.epub': log.warning('File does not have ".epub" extension, appending it') epubname += '.epub' subprocess.call(['java', '-jar', config.epubcheck_jarfile, epubname])
<SYSTEM_TASK:> Returns an iterator over all of the insertion points in a given file. <END_TASK> <USER_TASK:> Description: def in_file(self, fn: str) -> Iterator[InsertionPoint]: """ Returns an iterator over all of the insertion points in a given file. """
logger.debug("finding insertion points in file: %s", fn) yield from self.__file_insertions.get(fn, [])
<SYSTEM_TASK:> Returns an iterator over all of the insertion points located at a <END_TASK> <USER_TASK:> Description: def at_line(self, line: FileLine) -> Iterator[InsertionPoint]: """ Returns an iterator over all of the insertion points located at a given line. """
logger.debug("finding insertion points at line: %s", str(line)) filename = line.filename # type: str line_num = line.num # type: int for ins in self.in_file(filename): if line_num == ins.location.line: logger.debug("found insertion point at line [%s]: %s", str(line), ins) yield ins
<SYSTEM_TASK:> Load presentation order of motion directions. <END_TASK> <USER_TASK:> Description: def loadPrsOrd(vecRunLngth, strPathPresOrd, vecVslStim): """Load presentation order of motion directions. Parameters ---------- vecRunLngth : list Number of volumes in every run strPathPresOrd : str Path to the npy vector containing order of presented motion directions. vecVslStim: list Key of (stimulus) condition presented in every run Returns ------- aryPresOrdAprt : 1d numpy array, shape [n_vols] Presentation order of aperture position. aryPresOrdMtn : 1d numpy array, shape [n_vols] Presentation order of motion direction. """
print('------Load presentation order of motion directions') aryPresOrd = np.empty((0, 2)) for idx01 in range(0, len(vecRunLngth)): # reconstruct file name # ---> consider: some runs were shorter than others(replace next row) filename1 = (strPathPresOrd + str(vecVslStim[idx01]) + '.pickle') # filename1 = (strPathPresOrd + str(idx01+1) + '.pickle') # load array with open(filename1, 'rb') as handle: array1 = pickle.load(handle) tempCond = array1["Conditions"] tempCond = tempCond[:vecRunLngth[idx01], :] # add temp array to aryPresOrd aryPresOrd = np.concatenate((aryPresOrd, tempCond), axis=0) aryPresOrdAprt = aryPresOrd[:, 0].astype(int) aryPresOrdMtn = aryPresOrd[:, 1].astype(int) return aryPresOrdAprt, aryPresOrdMtn
<SYSTEM_TASK:> Create pixel-wise boxcar functions. <END_TASK> <USER_TASK:> Description: def crtPwBoxCarFn(varNumVol, aryPngData, aryPresOrd, vecMtDrctn): """Create pixel-wise boxcar functions. Parameters ---------- input1 : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. input2 : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] """
print('------Create pixel-wise boxcar functions') aryBoxCar = np.empty(aryPngData.shape[0:2] + (len(vecMtDrctn),) + (varNumVol,), dtype='int64') for ind, num in enumerate(vecMtDrctn): aryCondTemp = np.zeros((aryPngData.shape), dtype='int64') lgcTempMtDrctn = [aryPresOrd == num][0] aryCondTemp[:, :, lgcTempMtDrctn] = np.copy( aryPngData[:, :, lgcTempMtDrctn]) aryBoxCar[:, :, ind, :] = aryCondTemp return aryBoxCar
<SYSTEM_TASK:> Spatially convolve boxcar functions with 2D Gaussian. <END_TASK> <USER_TASK:> Description: def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol, queOut): """Spatially convolve boxcar functions with 2D Gaussian. Parameters ---------- idxPrc : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. aryBoxCar : float, positive Description of input 2. aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. tplPngSize : float, positive Description of input 2. varNumVol : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. queOut : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] """
# Number of combinations of model parameters in the current chunk: varChnkSze = np.size(aryMdlParamsChnk, axis=0) # Determine number of motion directions varNumMtnDrtn = aryBoxCar.shape[2] # Output array with pRF model time courses: aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol]) # Loop through different motion directions: for idxMtn in range(0, varNumMtnDrtn): # Loop through combinations of model parameters: for idxMdl in range(0, varChnkSze): # Spatial parameters of current model: varTmpX = aryMdlParamsChnk[idxMdl, 1] varTmpY = aryMdlParamsChnk[idxMdl, 2] varTmpSd = aryMdlParamsChnk[idxMdl, 3] # Create pRF model (2D): aryGauss = crtGauss2D(tplPngSize[0], tplPngSize[1], varTmpX, varTmpY, varTmpSd) # Multiply pixel-time courses with Gaussian pRF models: aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :], aryGauss[:, :, None]) # Calculate sum across x- and y-dimensions - the 'area under the # Gaussian surface'. This is essentially an unscaled version of the # pRF time course model (i.e. not yet scaled for size of the pRF). aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1)) # Put model time courses into function's output with 2d Gaussian # arrray: aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp # Put column with the indicies of model-parameter-combinations into the # output array (in order to be able to put the pRF model time courses into # the correct order after the parallelised function): lstOut = [idxPrc, aryOut] # Put output to queue: queOut.put(lstOut)
<SYSTEM_TASK:> Resample pixel-time courses in high-res visual space. <END_TASK> <USER_TASK:> Description: def rsmplInHighRes(aryBoxCarConv, tplPngSize, tplVslSpcHighSze, varNumMtDrctn, varNumVol): """Resample pixel-time courses in high-res visual space. Parameters ---------- input1 : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. input2 : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] """
# Array for super-sampled pixel-time courses: aryBoxCarConvHigh = np.zeros((tplVslSpcHighSze[0], tplVslSpcHighSze[1], varNumMtDrctn, varNumVol)) # Loop through volumes: for idxMtn in range(0, varNumMtDrctn): for idxVol in range(0, varNumVol): # Range for the coordinates: vecRange = np.arange(0, tplPngSize[0]) # The following array describes the coordinates of the pixels in # the flattened array (i.e. "vecOrigPixVal"). In other words, these # are the row and column coordinates of the original pizel values. crd2, crd1 = np.meshgrid(vecRange, vecRange) aryOrixPixCoo = np.column_stack((crd1.flatten(), crd2.flatten())) # The following vector will contain the actual original pixel # values: vecOrigPixVal = aryBoxCarConv[:, :, idxMtn, idxVol] vecOrigPixVal = vecOrigPixVal.flatten() # The sampling interval for the creation of the super-sampled pixel # data (complex numbers are used as a convention for inclusive # intervals in "np.mgrid()").: varStpSzeX = np.complex(tplVslSpcHighSze[0]) varStpSzeY = np.complex(tplVslSpcHighSze[1]) # The following grid has the coordinates of the points at which we # would like to re-sample the pixel data: aryPixGridX, aryPixGridY = np.mgrid[0:tplPngSize[0]:varStpSzeX, 0:tplPngSize[1]:varStpSzeY] # The actual resampling: aryResampled = griddata(aryOrixPixCoo, vecOrigPixVal, (aryPixGridX, aryPixGridY), method='nearest') # Put super-sampled pixel time courses into array: aryBoxCarConvHigh[:, :, idxMtn, idxVol] = aryResampled return aryBoxCarConvHigh
<SYSTEM_TASK:> Requires that the user is logged in and authorized to execute requests <END_TASK> <USER_TASK:> Description: def login_required(wrapped): """ Requires that the user is logged in and authorized to execute requests Except if the method is in authorized_methods of the auth_collection Then he can execute the requests even not being authorized """
@wraps(wrapped) def wrapper(*args, **kwargs): request = args[1] auth_collection = settings.AUTH_COLLECTION[ settings.AUTH_COLLECTION.rfind('.') + 1: ].lower() auth_document = request.environ.get(auth_collection) if auth_document and auth_document.is_authorized(request): setattr(request, auth_collection, auth_document) return wrapped(*args, **kwargs) return Response(response=serialize(UnauthorizedError()), status=401) if hasattr(wrapped, 'decorators'): wrapper.decorators = wrapped.decorators wrapper.decorators.append('login_required') else: wrapper.decorators = ['login_required'] return wrapper
<SYSTEM_TASK:> If a keyword argument 'serialize' with a True value is passed to the <END_TASK> <USER_TASK:> Description: def serializable(wrapped): """ If a keyword argument 'serialize' with a True value is passed to the Wrapped function, the return of the wrapped function will be serialized. Nothing happens if the argument is not passed or the value is not True """
@wraps(wrapped) def wrapper(*args, **kwargs): should_serialize = kwargs.pop('serialize', False) result = wrapped(*args, **kwargs) return serialize(result) if should_serialize else result if hasattr(wrapped, 'decorators'): wrapper.decorators = wrapped.decorators wrapper.decorators.append('serializable') else: wrapper.decorators = ['serializable'] return wrapper
<SYSTEM_TASK:> Deserializes a string into a PyMongo BSON <END_TASK> <USER_TASK:> Description: def deserialize(to_deserialize, *args, **kwargs): """ Deserializes a string into a PyMongo BSON """
if isinstance(to_deserialize, string_types): if re.match('^[0-9a-f]{24}$', to_deserialize): return ObjectId(to_deserialize) try: return bson_loads(to_deserialize, *args, **kwargs) except: return bson_loads(bson_dumps(to_deserialize), *args, **kwargs) else: return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)
<SYSTEM_TASK:> Tries to atomically create the requested file. <END_TASK> <USER_TASK:> Description: def _doAtomicFileCreation(filePath): """Tries to atomically create the requested file."""
try: _os.close(_os.open(filePath, _os.O_CREAT | _os.O_EXCL)) return True except OSError as e: if e.errno == _errno.EEXIST: return False else: raise e
<SYSTEM_TASK:> Finds the next available file-name in a sequence. <END_TASK> <USER_TASK:> Description: def findNextFile(folder='.', prefix=None, suffix=None, fnameGen=None, base=0, maxattempts=10): """Finds the next available file-name in a sequence. This function will create a file of zero size and will return the path to it to the caller. No files which exist will be altered in this operation and concurrent executions of this function will return separate files. In case of conflict, the function will attempt to generate a new file name up to maxattempts number of times before failing. The sequence will start from the base argument (default: 0). If used with the prefix/suffix, it will look for the next file in the sequence ignoring any gaps. Hence, if the files "a.0.txt" and "a.3.txt" exist, then the next file returned will be "a.4.txt" when called with prefix="a." and suffix=".txt". In case fnameGen is provided, the first generated filename which does not exist will be created and its path will be returned. Hence, if the files "a.0.txt" and "a.3.txt" exist, then the next file returned will be "a.1.txt" when called with fnameGen = lambda x : "a." + str(x) + ".txt" Args: folder - string which has path to the folder where the file should be created (default: '.') prefix - prefix of the file to be generated (default: '') suffix - suffix of the file to be generated (default: '') fnameGen - function which generates the filenames given a number as input (default: None) base - the first index to count (default: 0) maxattempts - number of attempts to create the file before failing with OSError (default: 10) Returns: Path of the file which follows the provided pattern and can be opened for writing. Raises: RuntimeError - If an incorrect combination of arguments is provided. OSError - If is unable to create a file (wrong path, drive full, illegal character in filename, etc.). """
expFolder = _os.path.expanduser(_os.path.expandvars(folder)) return _findNextFile(expFolder, prefix, suffix, fnameGen, base, maxattempts, 0)
<SYSTEM_TASK:> Returns the value str, truncated to MAX_ERROR_STR_LEN characters. If <END_TASK> <USER_TASK:> Description: def _errstr(value): """Returns the value str, truncated to MAX_ERROR_STR_LEN characters. If it's truncated, the returned value will have '...' on the end. """
value = str(value) # We won't make the caller convert value to a string each time. if len(value) > MAX_ERROR_STR_LEN: return value[:MAX_ERROR_STR_LEN] + '...' else: return value
<SYSTEM_TASK:> Raise ValidationException with standardExcMsg, unless customExcMsg is specified. <END_TASK> <USER_TASK:> Description: def _raiseValidationException(standardExcMsg, customExcMsg=None): """Raise ValidationException with standardExcMsg, unless customExcMsg is specified."""
if customExcMsg is None: raise ValidationException(str(standardExcMsg)) else: raise ValidationException(str(customExcMsg))
<SYSTEM_TASK:> Returns None if the blank, strip, and blocklistRegexes parameters are valid <END_TASK> <USER_TASK:> Description: def _validateGenericParameters(blank, strip, allowlistRegexes, blocklistRegexes): """Returns None if the blank, strip, and blocklistRegexes parameters are valid of PySimpleValidate's validation functions have. Raises a PySimpleValidateException if any of the arguments are invalid."""
# Check blank parameter. if not isinstance(blank, bool): raise PySimpleValidateException('blank argument must be a bool') # Check strip parameter. if not isinstance(strip, (bool, str, type(None))): raise PySimpleValidateException('strip argument must be a bool, None, or str') # Check allowlistRegexes parameter (including each regex in it). if allowlistRegexes is None: allowlistRegexes = [] # allowlistRegexes defaults to a blank list. try: len(allowlistRegexes) # Make sure allowlistRegexes is a sequence. except: raise PySimpleValidateException('allowlistRegexes must be a sequence of regex_strs') for response in allowlistRegexes: if not isinstance(response[0], str): raise PySimpleValidateException('allowlistRegexes must be a sequence of regex_strs') # Check allowlistRegexes parameter (including each regex in it). # NOTE: blocklistRegexes is NOT the same format as allowlistRegex, it can # include an "invalid input reason" string to display if the input matches # the blocklist regex. if blocklistRegexes is None: blocklistRegexes = [] # blocklistRegexes defaults to a blank list. try: len(blocklistRegexes) # Make sure blocklistRegexes is a sequence of (regex_str, str) or strs. except: raise PySimpleValidateException('blocklistRegexes must be a sequence of (regex_str, str) tuples or regex_strs') for response in blocklistRegexes: if isinstance(response, str): continue if len(response) != 2: raise PySimpleValidateException('blocklistRegexes must be a sequence of (regex_str, str) tuples or regex_strs') if not isinstance(response[0], str) or not isinstance(response[1], str): raise PySimpleValidateException('blocklistRegexes must be a sequence of (regex_str, str) tuples or regex_strs')
<SYSTEM_TASK:> Raises ValidationException if value is not a float or int. <END_TASK> <USER_TASK:> Description: def validateNum(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, _numType='num', min=None, max=None, lessThan=None, greaterThan=None, excMsg=None): """Raises ValidationException if value is not a float or int. Returns value, so it can be used inline in an expression: print(2 + validateNum(your_number)) Note that since int() and float() ignore leading or trailing whitespace when converting a string to a number, so does this validateNum(). * value (str): The value being validated as an int or float. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float. * min (int, float): The (inclusive) minimum value for the value to pass validation. * max (int, float): The (inclusive) maximum value for the value to pass validation. * lessThan (int, float): The (exclusive) minimum value for the value to pass validation. * greaterThan (int, float): The (exclusive) maximum value for the value to pass validation. * excMsg (str): A custom message to use in the raised ValidationException. If you specify min or max, you cannot also respectively specify lessThan or greaterThan. Doing so will raise PySimpleValidateException. >>> import pysimplevalidate as pysv >>> pysv.validateNum('3') 3 >>> pysv.validateNum('3.0') 3.0 >>> pysv.validateNum(' 3.0 ') 3.0 >>> pysv.validateNum('549873259847598437598435798435793.589985743957435794357') 5.498732598475984e+32 >>> pysv.validateNum('4', lessThan=4) Traceback (most recent call last): ... pysimplevalidate.ValidationException: Number must be less than 4. >>> pysv.validateNum('4', max=4) 4 >>> pysv.validateNum('4', min=2, max=5) 4 """
# Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes) _validateParamsFor_validateNum(min=min, max=max, lessThan=lessThan, greaterThan=greaterThan) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: # If we can convert value to an int/float, then do so. For example, # if an allowlist regex allows '42', then we should return 42/42.0. if (_numType == 'num' and '.' in value) or (_numType == 'float'): try: return float(value) except ValueError: return value # Return the value as is. if (_numType == 'num' and '.' not in value) or (_numType == 'int'): try: return int(value) except ValueError: return value # Return the value as is. # Validate the value's type (and convert value back to a number type). if (_numType == 'num' and '.' in value): # We are expecting a "num" (float or int) type and the user entered a float. try: value = float(value) except: _raiseValidationException(_('%r is not a number.') % (_errstr(value)), excMsg) elif (_numType == 'num' and '.' not in value): # We are expecting a "num" (float or int) type and the user entered an int. try: value = int(value) except: _raiseValidationException(_('%r is not a number.') % (_errstr(value)), excMsg) elif _numType == 'float': try: value = float(value) except: _raiseValidationException(_('%r is not a float.') % (_errstr(value)), excMsg) elif _numType == 'int': try: if float(value) % 1 != 0: # The number is a float that doesn't end with ".0" _raiseValidationException(_('%r is not an integer.') % (_errstr(value)), excMsg) value = int(float(value)) except: _raiseValidationException(_('%r is not an integer.') % (_errstr(value)), excMsg) # Validate against min argument. if min is not None and value < min: _raiseValidationException(_('Number must be at minimum %s.') % (min), excMsg) # Validate against max argument. if max is not None and value > max: _raiseValidationException(_('Number must be at maximum %s.') % (max), excMsg) # Validate against max argument. if lessThan is not None and value >= lessThan: _raiseValidationException(_('Number must be less than %s.') % (lessThan), excMsg) # Validate against max argument. if greaterThan is not None and value <= greaterThan: _raiseValidationException(_('Number must be greater than %s.') % (greaterThan), excMsg) return value
<SYSTEM_TASK:> Raises ValidationException if value is not a int. <END_TASK> <USER_TASK:> Description: def validateInt(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, min=None, max=None, lessThan=None, greaterThan=None, excMsg=None): """Raises ValidationException if value is not a int. Returns value, so it can be used inline in an expression: print(2 + validateInt(your_number)) Note that since int() and ignore leading or trailing whitespace when converting a string to a number, so does this validateNum(). * value (str): The value being validated as an int or float. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float. * min (int, float): The (inclusive) minimum value for the value to pass validation. * max (int, float): The (inclusive) maximum value for the value to pass validation. * lessThan (int, float): The (exclusive) minimum value for the value to pass validation. * greaterThan (int, float): The (exclusive) maximum value for the value to pass validation. * excMsg (str): A custom message to use in the raised ValidationException. If you specify min or max, you cannot also respectively specify lessThan or greaterThan. Doing so will raise PySimpleValidateException. >>> import pysimplevalidate as pysv >>> pysv.validateInt('42') 42 >>> pysv.validateInt('forty two') Traceback (most recent call last): ... pysimplevalidate.ValidationException: 'forty two' is not an integer. """
return validateNum(value=value, blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes, _numType='int', min=min, max=max, lessThan=lessThan, greaterThan=greaterThan)
<SYSTEM_TASK:> Raises ValidationException if value is not one of the values in <END_TASK> <USER_TASK:> Description: def validateChoice(value, choices, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, numbered=False, lettered=False, caseSensitive=False, excMsg=None): """Raises ValidationException if value is not one of the values in choices. Returns the selected choice. Returns the value in choices that was selected, so it can be used inline in an expression: print('You chose ' + validateChoice(your_choice, ['cat', 'dog'])) Note that value itself is not returned: validateChoice('CAT', ['cat', 'dog']) will return 'cat', not 'CAT'. If lettered is True, lower or uppercase letters will be accepted regardless of what caseSensitive is set to. The caseSensitive argument only matters for matching with the text of the strings in choices. * value (str): The value being validated. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * numbered (bool): If True, this function will also accept a string of the choice's number, i.e. '1' or '2'. * lettered (bool): If True, this function will also accept a string of the choice's letter, i.e. 'A' or 'B' or 'a' or 'b'. * caseSensitive (bool): If True, then the exact case of the option must be entered. * excMsg (str): A custom message to use in the raised ValidationException. Returns the choice selected as it appeared in choices. That is, if 'cat' was a choice and the user entered 'CAT' while caseSensitive is False, this function will return 'cat'. >>> import pysimplevalidate as pysv >>> pysv.validateChoice('dog', ['dog', 'cat', 'moose']) 'dog' >>> pysv.validateChoice('DOG', ['dog', 'cat', 'moose']) 'dog' >>> pysv.validateChoice('2', ['dog', 'cat', 'moose'], numbered=True) 'cat' >>> pysv.validateChoice('a', ['dog', 'cat', 'moose'], lettered=True) 'dog' >>> pysv.validateChoice('C', ['dog', 'cat', 'moose'], lettered=True) 'moose' >>> pysv.validateChoice('dog', ['dog', 'cat', 'moose'], lettered=True) 'dog' >>> pysv.validateChoice('spider', ['dog', 'cat', 'moose']) Traceback (most recent call last): ... pysimplevalidate.ValidationException: 'spider' is not a valid choice. """
# Validate parameters. _validateParamsFor_validateChoice(choices=choices, blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes, numbered=numbered, lettered=lettered, caseSensitive=caseSensitive) if '' in choices: # blank needs to be set to True here, otherwise '' won't be accepted as a choice. blank = True returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Validate against choices. if value in choices: return value if numbered and value.isdigit() and 0 < int(value) <= len(choices): # value must be 1 to len(choices) # Numbered options begins at 1, not 0. return choices[int(value) - 1] # -1 because the numbers are 1 to len(choices) but the index are 0 to len(choices) - 1 if lettered and len(value) == 1 and value.isalpha() and 0 < ord(value.upper()) - 64 <= len(choices): # Lettered options are always case-insensitive. return choices[ord(value.upper()) - 65] if not caseSensitive and value.upper() in [choice.upper() for choice in choices]: # Return the original item in choices that value has a case-insensitive match with. return choices[[choice.upper() for choice in choices].index(value.upper())] _raiseValidationException(_('%r is not a valid choice.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a time formatted in one <END_TASK> <USER_TASK:> Description: def validateTime(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%H:%M:%S', '%H:%M', '%X'), excMsg=None): """Raises ValidationException if value is not a time formatted in one of the formats formats. Returns a datetime.time object of value. * value (str): The value being validated as a time. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid time. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateTime('12:00:01') datetime.time(12, 0, 1) >>> pysv.validateTime('13:00:01') datetime.time(13, 0, 1) >>> pysv.validateTime('25:00:01') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '25:00:01' is not a valid time. >>> pysv.validateTime('hour 12 minute 01', formats=['hour %H minute %M']) datetime.time(12, 1) """
# TODO - handle this # Reuse the logic in _validateToDateTimeFormat() for this function. try: dt = _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) return datetime.time(dt.hour, dt.minute, dt.second, dt.microsecond) except ValidationException: _raiseValidationException(_('%r is not a valid time.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a time formatted in one <END_TASK> <USER_TASK:> Description: def validateDate(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%Y/%m/%d', '%y/%m/%d', '%m/%d/%Y', '%m/%d/%y', '%x'), excMsg=None): """Raises ValidationException if value is not a time formatted in one of the formats formats. Returns a datetime.date object of value. * value (str): The value being validated as a time. * blank (bool): If True, a blank string for value will be accepted. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid date. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDate('2/29/2004') datetime.date(2004, 2, 29) >>> pysv.validateDate('2/29/2005') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '2/29/2005' is not a valid date. >>> pysv.validateDate('September 2019', formats=['%B %Y']) datetime.date(2019, 9, 1) """
# Reuse the logic in _validateToDateTimeFormat() for this function. try: dt = _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) return datetime.date(dt.year, dt.month, dt.day) except ValidationException: _raiseValidationException(_('%r is not a valid date.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a datetime formatted in one <END_TASK> <USER_TASK:> Description: def validateDatetime(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S', '%Y/%m/%d %H:%M', '%y/%m/%d %H:%M', '%m/%d/%Y %H:%M', '%m/%d/%y %H:%M', '%x %H:%M', '%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S'), excMsg=None): """Raises ValidationException if value is not a datetime formatted in one of the formats formats. Returns a datetime.datetime object of value. * value (str): The value being validated as a datetime. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid datetime. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDatetime('2018/10/31 12:00:01') datetime.datetime(2018, 10, 31, 12, 0, 1) >>> pysv.validateDatetime('10/31/2018 12:00:01') datetime.datetime(2018, 10, 31, 12, 0, 1) >>> pysv.validateDatetime('10/31/2018') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '10/31/2018' is not a valid date and time. """
# Reuse the logic in _validateToDateTimeFormat() for this function. try: return _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) except ValidationException: _raiseValidationException(_('%r is not a valid date and time.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not an IPv4 or IPv6 address. <END_TASK> <USER_TASK:> Description: def validateIP(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not an IPv4 or IPv6 address. Returns the value argument. * value (str): The value being validated as an IP address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateIP('127.0.0.1') '127.0.0.1' >>> pysv.validateIP('255.255.255.255') '255.255.255.255' >>> pysv.validateIP('256.256.256.256') Traceback (most recent call last): pysimplevalidate.ValidationException: '256.256.256.256' is not a valid IP address. >>> pysv.validateIP('1:2:3:4:5:6:7:8') '1:2:3:4:5:6:7:8' >>> pysv.validateIP('1::8') '1::8' >>> pysv.validateIP('fe80::7:8%eth0') 'fe80::7:8%eth0' >>> pysv.validateIP('::255.255.255.255') '::255.255.255.255' """
# Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Reuse the logic in validateRegex() try: try: # Check if value is an IPv4 address. if validateRegex(value=value, regex=IPV4_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes): return value except: pass # Go on to check if it's an IPv6 address. # Check if value is an IPv6 address. if validateRegex(value=value, regex=IPV6_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes): return value except ValidationException: _raiseValidationException(_('%r is not a valid IP address.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value does not match the regular expression in regex. <END_TASK> <USER_TASK:> Description: def validateRegex(value, regex, flags=0, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value does not match the regular expression in regex. Returns the value argument. This is similar to calling inputStr() and using the allowlistRegexes keyword argument, however, validateRegex() allows you to pass regex flags such as re.IGNORECASE or re.VERBOSE. You can also pass a regex object directly. If you want to check if a string is a regular expression string, call validateRegexStr(). * value (str): The value being validated as a regular expression string. * regex (str, regex): The regular expression to match the value against. * flags (int): Identical to the flags argument in re.compile(). Pass re.VERBOSE et al here. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> pysv.validateRegex('cat bat rat', r'(cat)|(dog)|(moose)', re.IGNORECASE) 'cat' >>> pysv.validateRegex('He said "Hello".', r'"(.*?)"', re.IGNORECASE) '"Hello"' """
# Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Search value with regex, whether regex is a str or regex object. if isinstance(regex, str): # TODO - check flags to see they're valid regex flags. mo = re.compile(regex, flags).search(value) elif isinstance(regex, REGEX_TYPE): mo = regex.search(value) else: raise PySimpleValidateException('regex must be a str or regex object') if mo is not None: return mo.group() else: _raiseValidationException(_('%r does not match the specified pattern.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value can't be used as a regular expression string. <END_TASK> <USER_TASK:> Description: def validateRegexStr(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value can't be used as a regular expression string. Returns the value argument as a regex object. If you want to check if a string matches a regular expression, call validateRegex(). * value (str): The value being validated as a regular expression string. * regex (str, regex): The regular expression to match the value against. * flags (int): Identical to the flags argument in re.compile(). Pass re.VERBOSE et al here. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateRegexStr('(cat)|(dog)') re.compile('(cat)|(dog)') >>> pysv.validateRegexStr('"(.*?)"') re.compile('"(.*?)"') >>> pysv.validateRegexStr('"(.*?"') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '"(.*?"' is not a valid regular expression: missing ), unterminated subpattern at position 1 """
# TODO - I'd be nice to check regexes in other languages, i.e. JS and Perl. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value try: return re.compile(value) except Exception as ex: _raiseValidationException(_('%r is not a valid regular expression: %s') % (_errstr(value), ex), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a URL. <END_TASK> <USER_TASK:> Description: def validateURL(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not a URL. Returns the value argument. The "http" or "https" protocol part of the URL is optional. * value (str): The value being validated as a URL. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateURL('https://inventwithpython.com') 'https://inventwithpython.com' >>> pysv.validateURL('inventwithpython.com') 'inventwithpython.com' >>> pysv.validateURL('localhost') 'localhost' >>> pysv.validateURL('mailto:[email protected]') 'mailto:[email protected]' >>> pysv.validateURL('ftp://example.com') 'example.com' >>> pysv.validateURL('https://inventwithpython.com/blog/2018/02/02/how-to-ask-for-programming-help/') 'https://inventwithpython.com/blog/2018/02/02/how-to-ask-for-programming-help/' >>> pysv.validateURL('blah blah blah') Traceback (most recent call last): ... pysimplevalidate.ValidationException: 'blah blah blah' is not a valid URL. """
# Reuse the logic in validateRegex() try: result = validateRegex(value=value, regex=URL_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) if result is not None: return result except ValidationException: # 'localhost' is also an acceptable URL: if value == 'localhost': return value _raiseValidationException(_('%r is not a valid URL.') % (value), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not an email address. <END_TASK> <USER_TASK:> Description: def validateEmail(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not an email address. Returns the value argument. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateEmail('[email protected]') '[email protected]' >>> pysv.validateEmail('alinventwithpython.com') Traceback (most recent call last): ... pysimplevalidate.ValidationException: 'alinventwithpython.com' is not a valid email address. """
# Reuse the logic in validateRegex() try: result = validateRegex(value=value, regex=EMAIL_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) if result is not None: return result except ValidationException: _raiseValidationException(_('%r is not a valid email address.') % (value), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a yes or no response. <END_TASK> <USER_TASK:> Description: def validateYesNo(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, yesVal='yes', noVal='no', caseSensitive=False, excMsg=None): """Raises ValidationException if value is not a yes or no response. Returns the yesVal or noVal argument, not value. Note that value can be any case (by default) and can also just match the * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * caseSensitive (bool): Determines if value must match the case of yesVal and noVal. Defaults to False. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateYesNo('y') 'yes' >>> pysv.validateYesNo('YES') 'yes' >>> pysv.validateYesNo('No') 'no' >>> pysv.validateYesNo('OUI', yesVal='oui', noVal='no') 'oui' """
# Validate parameters. TODO - can probably improve this to remove the duplication. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value yesVal = str(yesVal) noVal = str(noVal) if len(yesVal) == 0: raise PySimpleValidateException('yesVal argument must be a non-empty string.') if len(noVal) == 0: raise PySimpleValidateException('noVal argument must be a non-empty string.') if (yesVal == noVal) or (not caseSensitive and yesVal.upper() == noVal.upper()): raise PySimpleValidateException('yesVal and noVal arguments must be different.') if (yesVal[0] == noVal[0]) or (not caseSensitive and yesVal[0].upper() == noVal[0].upper()): raise PySimpleValidateException('first character of yesVal and noVal arguments must be different') returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if caseSensitive: if value in (yesVal, yesVal[0]): return yesVal elif value in (noVal, noVal[0]): return noVal else: if value.upper() in (yesVal.upper(), yesVal[0].upper()): return yesVal elif value.upper() in (noVal.upper(), noVal[0].upper()): return noVal _raiseValidationException(_('%r is not a valid %s/%s response.') % (_errstr(value), yesVal, noVal), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not an email address. <END_TASK> <USER_TASK:> Description: def validateBool(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, trueVal='True', falseVal='False', caseSensitive=False, excMsg=None): """Raises ValidationException if value is not an email address. Returns the yesVal or noVal argument, not value. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateYesNo('y') 'yes' >>> pysv.validateYesNo('YES') 'yes' >>> pysv.validateYesNo('No') 'no' >>> pysv.validateYesNo('OUI', yesVal='oui', noVal='no') 'oui' """
# Validate parameters. TODO - can probably improve this to remove the duplication. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Replace the exception messages used in validateYesNo(): trueVal = str(trueVal) falseVal = str(falseVal) if len(trueVal) == 0: raise PySimpleValidateException('trueVal argument must be a non-empty string.') if len(falseVal) == 0: raise PySimpleValidateException('falseVal argument must be a non-empty string.') if (trueVal == falseVal) or (not caseSensitive and trueVal.upper() == falseVal.upper()): raise PySimpleValidateException('trueVal and noVal arguments must be different.') if (trueVal[0] == falseVal[0]) or (not caseSensitive and trueVal[0].upper() == falseVal[0].upper()): raise PySimpleValidateException('first character of trueVal and noVal arguments must be different') result = validateYesNo(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, yesVal=trueVal, noVal=falseVal, caseSensitive=caseSensitive, excMsg=None) # Return a bool value instead of a string. if result == trueVal: return True elif result == falseVal: return False else: assert False, 'inner validateYesNo() call returned something that was not yesVal or noVal. This should never happen.'
<SYSTEM_TASK:> Raises ValidationException if value is not a USA state. <END_TASK> <USER_TASK:> Description: def validateState(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None, returnStateName=False): """Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. >>> import pysimplevalidate as pysv >>> pysv.validateState('tx') 'TX' >>> pysv.validateState('california') 'CA' >>> pysv.validateState('WASHINGTON') 'WA' >>> pysv.validateState('WASHINGTON', returnStateName=True) 'Washington' """
# TODO - note that this is USA-centric. I should work on trying to make this more international. # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if value.upper() in USA_STATES_UPPER.keys(): # check if value is a state abbreviation if returnStateName: return USA_STATES[value.upper()] # Return full state name. else: return value.upper() # Return abbreviation. elif value.title() in USA_STATES.values(): # check if value is a state name if returnStateName: return value.title() # Return full state name. else: return USA_STATES_REVERSED[value.title()] # Return abbreviation. _raiseValidationException(_('%r is not a state.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a month, like 'Jan' or 'March'. <END_TASK> <USER_TASK:> Description: def validateMonth(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, monthNames=ENGLISH_MONTHS, excMsg=None): """Raises ValidationException if value is not a month, like 'Jan' or 'March'. Returns the titlecased month. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * monthNames (Mapping): A mapping of uppercase month abbreviations to month names, i.e. {'JAN': 'January', ... }. The default provides English month names. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateMonth('Jan') 'January' >>> pysv.validateMonth('MARCH') 'March' """
# returns full month name, e.g. 'January' # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value try: if (monthNames == ENGLISH_MONTHS) and (1 <= int(value) <= 12): # This check here only applies to months, not when validateDayOfWeek() calls this function. return ENGLISH_MONTH_NAMES[int(value) - 1] except: pass # continue if the user didn't enter a number 1 to 12. # Both month names and month abbreviations will be at least 3 characters. if len(value) < 3: _raiseValidationException(_('%r is not a month.') % (_errstr(value)), excMsg) if value[:3].upper() in monthNames.keys(): # check if value is a month abbreviation return monthNames[value[:3].upper()] # It turns out that titlecase is good for all the month. elif value.upper() in monthNames.values(): # check if value is a month name return value.title() _raiseValidationException(_('%r is not a month.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a day of the week, such as 'Mon' or 'Friday'. <END_TASK> <USER_TASK:> Description: def validateDayOfWeek(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, dayNames=ENGLISH_DAYS_OF_WEEK, excMsg=None): """Raises ValidationException if value is not a day of the week, such as 'Mon' or 'Friday'. Returns the titlecased day of the week. * value (str): The value being validated as a day of the week. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * dayNames (Mapping): A mapping of uppercase day abbreviations to day names, i.e. {'SUN': 'Sunday', ...} The default provides English day names. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDayOfWeek('mon') 'Monday' >>> pysv.validateDayOfWeek('THURSday') 'Thursday' """
# TODO - reuse validateChoice for this function # returns full day of the week str, e.g. 'Sunday' # Reuses validateMonth. try: return validateMonth(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, monthNames=ENGLISH_DAYS_OF_WEEK) except: # Replace the exception message. _raiseValidationException(_('%r is not a day of the week') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Raises ValidationException if value is not a day of the month, from <END_TASK> <USER_TASK:> Description: def validateDayOfMonth(value, year, month, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not a day of the month, from 1 to 28, 29, 30, or 31 depending on the month and year. Returns value. * value (str): The value being validated as existing as a numbered day in the given year and month. * year (int): The given year. * month (int): The given month. 1 is January, 2 is February, and so on. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDayOfMonth('31', 2019, 10) 31 >>> pysv.validateDayOfMonth('32', 2019, 10) Traceback (most recent call last): ... pysimplevalidate.ValidationException: '32' is not a day in the month of October 2019 >>> pysv.validateDayOfMonth('29', 2004, 2) 29 >>> pysv.validateDayOfMonth('29', 2005, 2) Traceback (most recent call last): ... pysimplevalidate.ValidationException: '29' is not a day in the month of February 2005 """
try: daysInMonth = calendar.monthrange(year, month)[1] except: raise PySimpleValidateException('invalid arguments for year and/or month') try: return validateInt(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, min=1, max=daysInMonth) except: # Replace the exception message. _raiseValidationException(_('%r is not a day in the month of %s %s') % (_errstr(value), ENGLISH_MONTH_NAMES[month - 1], year), excMsg)
<SYSTEM_TASK:> Returns an appropriate logging level integer from a string name <END_TASK> <USER_TASK:> Description: def get_level(level_string): """ Returns an appropriate logging level integer from a string name """
levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} try: level = levels[level_string.lower()] except KeyError: sys.exit('{0} is not a recognized logging level'.format(level_string)) else: return level
<SYSTEM_TASK:> Configures and generates a Logger object, 'openaccess_epub' based on common <END_TASK> <USER_TASK:> Description: def config_logging(no_log_file, log_to, log_level, silent, verbosity): """ Configures and generates a Logger object, 'openaccess_epub' based on common parameters used for console interface script execution in OpenAccess_EPUB. These parameters are: no_log_file Boolean. Disables logging to file. If set to True, log_to and log_level become irrelevant. log_to A string name indicating a file path for logging. log_level Logging level, one of: 'debug', 'info', 'warning', 'error', 'critical' silent Boolean verbosity Console logging level, one of: 'debug', 'info', 'warning', 'error', 'critical This method currently only configures a console StreamHandler with a message-only Formatter. """
log_level = get_level(log_level) console_level = get_level(verbosity) #We want to configure our openaccess_epub as the parent log log = logging.getLogger('openaccess_epub') log.setLevel(logging.DEBUG) # Don't filter at the log level standard = logging.Formatter(STANDARD_FORMAT) message_only = logging.Formatter(MESSAGE_ONLY_FORMAT) #Only add FileHandler IF it's allowed AND we have a name for it if not no_log_file and log_to is not None: fh = logging.FileHandler(filename=log_to) fh.setLevel(log_level) fh.setFormatter(standard) log.addHandler(fh) #Add on the console StreamHandler at verbosity level if silent not set if not silent: sh_echo = logging.StreamHandler(sys.stdout) sh_echo.setLevel(console_level) sh_echo.setFormatter(message_only) log.addHandler(sh_echo)
<SYSTEM_TASK:> This utility function will remove a previous Logger FileHandler, if one <END_TASK> <USER_TASK:> Description: def replace_filehandler(logname, new_file, level=None, frmt=None): """ This utility function will remove a previous Logger FileHandler, if one exists, and add a new filehandler. Parameters: logname The name of the log to reconfigure, 'openaccess_epub' for example new_file The file location for the new FileHandler level Optional. Level of FileHandler logging, if not used then the new FileHandler will have the same level as the old. Pass in name strings, 'INFO' for example frmt Optional string format of Formatter for the FileHandler, if not used then the new FileHandler will inherit the Formatter of the old, pass in format strings, '%(message)s' for example It is best practice to use the optional level and frmt arguments to account for the case where a previous FileHandler does not exist. In the case that they are not used and a previous FileHandler is not found, then the level will be set logging.DEBUG and the frmt will be set to openaccess_epub.utils.logs.STANDARD_FORMAT as a matter of safety. """
#Call up the Logger to get reconfigured log = logging.getLogger(logname) #Set up defaults and whether explicit for level if level is not None: level = get_level(level) explicit_level = True else: level = logging.DEBUG explicit_level = False #Set up defaults and whether explicit for frmt if frmt is not None: frmt = logging.Formatter(frmt) explicit_frmt = True else: frmt = logging.Formatter(STANDARD_FORMAT) explicit_frmt = False #Look for a FileHandler to replace, set level and frmt if not explicit old_filehandler = None for handler in log.handlers: #I think this is an effective method of detecting FileHandler if type(handler) == logging.FileHandler: old_filehandler = handler if not explicit_level: level = handler.level if not explicit_frmt: frmt = handler.formatter break #Set up the new FileHandler new_filehandler = logging.FileHandler(new_file) new_filehandler.setLevel(level) new_filehandler.setFormatter(frmt) #Add the new FileHandler log.addHandler(new_filehandler) #Remove the old FileHandler if we found one if old_filehandler is not None: old_filehandler.close() log.removeHandler(old_filehandler)
<SYSTEM_TASK:> Remap x, y, sigma parameters from pixel to degree. <END_TASK> <USER_TASK:> Description: def rmp_pixel_deg_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax): """Remap x, y, sigma parameters from pixel to degree. Parameters ---------- vecX : 1D numpy array Array with possible x parametrs in pixels vecY : 1D numpy array Array with possible y parametrs in pixels vecPrfSd : 1D numpy array Array with possible sd parametrs in pixels tplPngSize : tuple, 2 Pixel dimensions of the visual space in pixel (width, height). varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varExtYmin : int Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) Returns ------- vecX : 1D numpy array Array with possible x parametrs in degree vecY : 1D numpy array Array with possible y parametrs in degree vecPrfSd : 1D numpy array Array with possible sd parametrs in degree """
# Remap modelled x-positions of the pRFs: vecXdgr = rmp_rng(vecX, varExtXmin, varExtXmax, varOldThrMin=0.0, varOldAbsMax=(tplPngSize[0] - 1)) # Remap modelled y-positions of the pRFs: vecYdgr = rmp_rng(vecY, varExtYmin, varExtYmax, varOldThrMin=0.0, varOldAbsMax=(tplPngSize[1] - 1)) # We calculate the scaling factor from pixels to degrees of visual angle to # separately for the x- and the y-directions (the two should be the same). varPix2DgrX = np.divide((varExtXmax - varExtXmin), tplPngSize[0]) varPix2DgrY = np.divide((varExtYmax - varExtYmin), tplPngSize[1]) # Check whether varDgr2PixX and varDgr2PixY are similar: strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \ 'stimulus space (in pixels) do not agree' assert 0.5 > np.absolute((varPix2DgrX - varPix2DgrY)), strErrMsg # Convert prf sizes from degrees of visual angles to pixel vecPrfSdDgr = np.multiply(vecPrfSd, varPix2DgrX) # Return new values. return vecXdgr, vecYdgr, vecPrfSdDgr
<SYSTEM_TASK:> Create an array with all possible model parameter combinations <END_TASK> <USER_TASK:> Description: def crt_mdl_prms(tplPngSize, varNum1, varExtXmin, varExtXmax, varNum2, varExtYmin, varExtYmax, varNumPrfSizes, varPrfStdMin, varPrfStdMax, kwUnt='pix', kwCrd='crt'): """Create an array with all possible model parameter combinations Parameters ---------- tplPngSize : tuple, 2 Pixel dimensions of the visual space (width, height). varNum1 : int, positive Number of x-positions to model varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varNum2 : float, positive Number of y-positions to model. varExtYmin : int Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) varNumPrfSizes : int, positive Number of pRF sizes to model. varPrfStdMin : float, positive Minimum pRF model size (standard deviation of 2D Gaussian) varPrfStdMax : float, positive Maximum pRF model size (standard deviation of 2D Gaussian) kwUnt: str Keyword to set the unit for model parameter combinations; model parameters can be in pixels ["pix"] or degrees of visual angles ["deg"] kwCrd: str Keyword to set the coordinate system for model parameter combinations; parameters can be in cartesian ["crt"] or polar ["pol"] coordinates Returns ------- aryMdlParams : 2d numpy array, shape [n_x_pos*n_y_pos*n_sd, 3] Model parameters (x, y, sigma) for all models. """
# Number of pRF models to be created (i.e. number of possible # combinations of x-position, y-position, and standard deviation): varNumMdls = varNum1 * varNum2 * varNumPrfSizes # Array for the x-position, y-position, and standard deviations for # which pRF model time courses are going to be created, where the # columns correspond to: (1) the x-position, (2) the y-position, and # (3) the standard deviation. The parameters are in units of the # upsampled visual space. aryMdlParams = np.zeros((varNumMdls, 3), dtype=np.float32) # Counter for parameter array: varCntMdlPrms = 0 if kwCrd == 'crt': # Vector with the moddeled x-positions of the pRFs: vecX = np.linspace(varExtXmin, varExtXmax, varNum1, endpoint=True) # Vector with the moddeled y-positions of the pRFs: vecY = np.linspace(varExtYmin, varExtYmax, varNum2, endpoint=True) # Vector with standard deviations pRF models (in degree of vis angle): vecPrfSd = np.linspace(varPrfStdMin, varPrfStdMax, varNumPrfSizes, endpoint=True) if kwUnt == 'deg': # since parameters are already in degrees of visual angle, # we do nothing pass elif kwUnt == 'pix': # convert parameters to pixels vecX, vecY, vecPrfSd = rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax) else: print('Unknown keyword provided for possible model parameter ' + 'combinations: should be either pix or deg') # Put all combinations of x-position, y-position, and standard # deviations into the array: # Loop through x-positions: for idxX in range(0, varNum1): # Loop through y-positions: for idxY in range(0, varNum2): # Loop through standard deviations (of Gaussian pRF models): for idxSd in range(0, varNumPrfSizes): # Place index and parameters in array: aryMdlParams[varCntMdlPrms, 0] = vecX[idxX] aryMdlParams[varCntMdlPrms, 1] = vecY[idxY] aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd] # Increment parameter index: varCntMdlPrms += 1 elif kwCrd == 'pol': # Vector with the radial position: vecRad = np.linspace(0.0, varExtXmax, varNum1, endpoint=True) # Vector with the angular position: vecTht = np.linspace(0.0, 2*np.pi, varNum2, endpoint=False) # Get all possible combinations on the grid, using matrix indexing ij # of output aryRad, aryTht = np.meshgrid(vecRad, vecTht, indexing='ij') # Flatten arrays to be able to combine them with meshgrid vecRad = aryRad.flatten() vecTht = aryTht.flatten() # Convert from polar to cartesian vecX, vecY = map_pol_to_crt(vecTht, vecRad) # Vector with standard deviations pRF models (in degree of vis angle): vecPrfSd = np.linspace(varPrfStdMin, varPrfStdMax, varNumPrfSizes, endpoint=True) if kwUnt == 'deg': # since parameters are already in degrees of visual angle, # we do nothing pass elif kwUnt == 'pix': # convert parameters to pixels vecX, vecY, vecPrfSd = rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax) # Put all combinations of x-position, y-position, and standard # deviations into the array: # Loop through x-positions: for idxXY in range(0, varNum1*varNum2): # Loop through standard deviations (of Gaussian pRF models): for idxSd in range(0, varNumPrfSizes): # Place index and parameters in array: aryMdlParams[varCntMdlPrms, 0] = vecX[idxXY] aryMdlParams[varCntMdlPrms, 1] = vecY[idxXY] aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd] # Increment parameter index: varCntMdlPrms += 1 else: print('Unknown keyword provided for coordinate system for model ' + 'parameter combinations: should be either crt or pol') return aryMdlParams
<SYSTEM_TASK:> Create responses of 2D Gauss models to spatial conditions. <END_TASK> <USER_TASK:> Description: def crt_mdl_rsp(arySptExpInf, tplPngSize, aryMdlParams, varPar, strCrd='crt', lgcPrint=True): """Create responses of 2D Gauss models to spatial conditions. Parameters ---------- arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions] All spatial conditions stacked along second axis. tplPngSize : tuple, 2 Pixel dimensions of the visual space (width, height). aryMdlParams : 2d numpy array, shape [n_x_pos*n_y_pos*n_sd, 3] Model parameters (x, y, sigma) for all models. varPar : int, positive Number of cores to parallelize over. strCrd, string, either 'crt' or 'pol' Whether model parameters are provided in cartesian or polar coordinates lgcPrint : boolean Whether print statements should be executed. Returns ------- aryMdlCndRsp : 2d numpy array, shape [n_x_pos*n_y_pos*n_sd, n_cond] Responses of 2D Gauss models to spatial conditions. """
if varPar == 1: # if the number of cores requested by the user is equal to 1, # we save the overhead of multiprocessing by calling aryMdlCndRsp # directly aryMdlCndRsp = cnvl_2D_gauss(0, aryMdlParams, arySptExpInf, tplPngSize, None, strCrd=strCrd) else: # The long array with all the combinations of model parameters is put # into separate chunks for parallelisation, using a list of arrays. lstMdlParams = np.array_split(aryMdlParams, varPar) # Create a queue to put the results in: queOut = mp.Queue() # Empty list for results from parallel processes (for pRF model # responses): lstMdlTc = [None] * varPar # Empty list for processes: lstPrcs = [None] * varPar if lgcPrint: print('---------Running parallel processes') # Create processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc] = mp.Process(target=cnvl_2D_gauss, args=(idxPrc, lstMdlParams[idxPrc], arySptExpInf, tplPngSize, queOut ), kwargs={'strCrd': strCrd}, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # Start processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].start() # Collect results from queue: for idxPrc in range(0, varPar): lstMdlTc[idxPrc] = queOut.get(True) # Join processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].join() if lgcPrint: print('---------Collecting results from parallel processes') # Put output arrays from parallel process into one big array lstMdlTc = sorted(lstMdlTc) aryMdlCndRsp = np.empty((0, arySptExpInf.shape[-1])) for idx in range(0, varPar): aryMdlCndRsp = np.concatenate((aryMdlCndRsp, lstMdlTc[idx][1]), axis=0) # Clean up: del(lstMdlParams) del(lstMdlTc) return aryMdlCndRsp.astype('float16')
<SYSTEM_TASK:> Create temporally upsampled neural time courses. <END_TASK> <USER_TASK:> Description: def crt_nrl_tc(aryMdlRsp, aryCnd, aryOns, aryDrt, varTr, varNumVol, varTmpOvsmpl, lgcPrint=True): """Create temporally upsampled neural time courses. Parameters ---------- aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions. aryCnd : np.array 1D array with condition identifiers (every condition has its own int) aryOns : np.array, same len as aryCnd 1D array with condition onset times in seconds. aryDrt : np.array, same len as aryCnd 1D array with condition durations of different conditions in seconds. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment varNumVol : float, positive Number of data point (volumes) in the (fMRI) data varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTc : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, varNumVol*varTmpOvsmpl] Neural time course models in temporally upsampled space Notes --------- [1] This function first creates boxcar functions based on the conditions as they are specified in the temporal experiment information, provided by the user in the csv file. Second, it then replaces the 1s in the boxcar function by predicted condition values that were previously calculated based on the overlap between the assumed 2D Gaussian for the current model and the presented stimulus aperture for that condition. Since the 2D Gaussian is normalized, the overlap value will be between 0 and 1. """
# adjust the input, if necessary, such that input is 2D tplInpShp = aryMdlRsp.shape aryMdlRsp = aryMdlRsp.reshape((-1, aryMdlRsp.shape[-1])) # the first spatial condition might code the baseline (blank periods) with # all zeros. If this is the case, remove the first spatial condition, since # for temporal conditions this is removed automatically below and we need # temporal and sptial conditions to maych if np.all(aryMdlRsp[:, 0] == 0): if lgcPrint: print('------------Removed first spatial condition (all zeros)') aryMdlRsp = aryMdlRsp[:, 1:] # create boxcar functions in temporally upsampled space aryBxCarTmp = create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol, aryExclCnd=np.array([0.]), varTmpOvsmpl=varTmpOvsmpl).T # Make sure that aryMdlRsp and aryBxCarTmp have the same number of # conditions assert aryMdlRsp.shape[-1] == aryBxCarTmp.shape[0] # pre-allocate pixelwise boxcar array aryNrlTc = np.zeros((aryMdlRsp.shape[0], aryBxCarTmp.shape[-1]), dtype='float16') # loop through boxcar functions of conditions for ind, vecCndOcc in enumerate(aryBxCarTmp): # get response predicted by models for this specific spatial condition rspValPrdByMdl = aryMdlRsp[:, ind] # insert predicted response value several times using broad-casting aryNrlTc[..., vecCndOcc.astype('bool')] = rspValPrdByMdl[:, None] # determine output shape tplOutShp = tplInpShp[:-1] + (int(varNumVol*varTmpOvsmpl), ) return aryNrlTc.reshape(tplOutShp).astype('float16')
<SYSTEM_TASK:> Convolve every neural time course with HRF function. <END_TASK> <USER_TASK:> Description: def crt_prf_tc(aryNrlTc, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=None, lgcPrint=True): """Convolve every neural time course with HRF function. Parameters ---------- aryNrlTc : 4d numpy array, shape [n_x_pos, n_y_pos, n_sd, n_vol] Temporally upsampled neural time course models. varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Number of cores for multi-processing. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 5d numpy array, shape [n_x_pos, n_y_pos, n_sd, n_hrf_bases, varNumVol] Neural time courses convolved with HRF basis functions """
# Create hrf time course function: if switchHrfSet == 3: lstHrf = [spmt, dspmt, ddspmt] elif switchHrfSet == 2: lstHrf = [spmt, dspmt] elif switchHrfSet == 1: lstHrf = [spmt] # If necessary, adjust the input such that input is 2D, with last dim time tplInpShp = aryNrlTc.shape aryNrlTc = np.reshape(aryNrlTc, (-1, aryNrlTc.shape[-1])) if varPar == 1: # if the number of cores requested by the user is equal to 1, # we save the overhead of multiprocessing by calling aryMdlCndRsp # directly aryNrlTcConv = cnvl_tc(0, aryNrlTc, lstHrf, varTr, varNumVol, varTmpOvsmpl, None, dctPrm=dctPrm) else: # Put input data into chunks: lstNrlTc = np.array_split(aryNrlTc, varPar) # Create a queue to put the results in: queOut = mp.Queue() # Empty list for processes: lstPrcs = [None] * varPar # Empty list for results of parallel processes: lstConv = [None] * varPar if lgcPrint: print('------------Running parallel processes') # Create processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc] = mp.Process(target=cnvl_tc, args=(idxPrc, lstNrlTc[idxPrc], lstHrf, varTr, varNumVol, varTmpOvsmpl, queOut), kwargs={'dctPrm': dctPrm}, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # Start processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].start() # Collect results from queue: for idxPrc in range(0, varPar): lstConv[idxPrc] = queOut.get(True) # Join processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].join() if lgcPrint: print('------------Collecting results from parallel processes') # Put output into correct order: lstConv = sorted(lstConv) # Concatenate convolved pixel time courses (into the same order aryNrlTcConv = np.zeros((0, switchHrfSet, varNumVol), dtype=np.float32) for idxRes in range(0, varPar): aryNrlTcConv = np.concatenate((aryNrlTcConv, lstConv[idxRes][1]), axis=0) # clean up del(aryNrlTc) del(lstConv) # Reshape results: tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (varNumVol, ) # Return: return np.reshape(aryNrlTcConv, tplOutShp).astype(np.float32)
<SYSTEM_TASK:> Create all spatial x feature prf time courses. <END_TASK> <USER_TASK:> Description: def crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=None, lgcPrint=True): """Create all spatial x feature prf time courses. Parameters ---------- aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions aryTmpExpInf: 2d numpy array, shape [unknown, 4] Temporal information about conditions varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Description of input 1. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 3d numpy array, shape [nr of models, nr of unique feautures, varNumVol] Prf time course models """
# Identify number of unique features vecFeat = np.unique(aryTmpExpInf[:, 3]) vecFeat = vecFeat[np.nonzero(vecFeat)[0]] # Preallocate the output array aryPrfTc = np.zeros((aryMdlRsp.shape[0], 0, varNumVol), dtype=np.float32) # Loop over unique features for indFtr, ftr in enumerate(vecFeat): if lgcPrint: print('---------Create prf time course model for feature ' + str(ftr)) # Derive sptial conditions, onsets and durations for this specific # feature aryTmpCnd = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 0] aryTmpOns = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 1] aryTmpDrt = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 2] # Create temporally upsampled neural time courses. aryNrlTcTmp = crt_nrl_tc(aryMdlRsp, aryTmpCnd, aryTmpOns, aryTmpDrt, varTr, varNumVol, varTmpOvsmpl, lgcPrint=lgcPrint) # Convolve with hrf to create model pRF time courses. aryPrfTcTmp = crt_prf_tc(aryNrlTcTmp, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=dctPrm, lgcPrint=lgcPrint) # Add temporal time course to time course that will be returned aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcTmp), axis=1) return aryPrfTc
<SYSTEM_TASK:> Find unique rows in 2D array. <END_TASK> <USER_TASK:> Description: def fnd_unq_rws(A, return_index=False, return_inverse=False): """Find unique rows in 2D array. Parameters ---------- A : 2d numpy array Array for which unique rows should be identified. return_index : bool Bool to decide whether I is returned. return_inverse : bool Bool to decide whether J is returned. Returns ------- B : 1d numpy array, Unique rows I: 1d numpy array, only returned if return_index is True B = A[I,:] J: 2d numpy array, only returned if return_inverse is True A = B[J,:] """
A = np.require(A, requirements='C') assert A.ndim == 2, "array must be 2-dim'l" B = np.unique(A.view([('', A.dtype)]*A.shape[1]), return_index=return_index, return_inverse=return_inverse) if return_index or return_inverse: return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \ + B[1:] else: return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')
<SYSTEM_TASK:> This method defines how the Article tries to determine the publisher of <END_TASK> <USER_TASK:> Description: def get_publisher(self): """ This method defines how the Article tries to determine the publisher of the article. This method relies on the success of the get_DOI method to fetch the appropriate full DOI for the article. It then takes the DOI prefix which corresponds to the publisher and then uses that to attempt to load the correct publisher-specific code. This may fail; if the DOI is not mapped to a code file, if the DOI is mapped but the code file could not be located, or if the mapped code file is malformed then this method will issue/log an informative error message and return None. This method will not try to infer the publisher based on any metadata other than the DOI of the article. Returns ------- publisher : Publisher instance or None """
#For a detailed explanation of the DOI system, visit: #http://www.doi.org/hb.html #The basic syntax of a DOI is this <prefix>/<suffix> #The <prefix> specifies a unique DOI registrant, in our case, this #should correspond to the publisher. We use this information to register #the correct Publisher class with this article doi_prefix = self.doi.split('/')[0] #The import_by_doi method should raise ImportError if a problem occurred try: publisher_mod = openaccess_epub.publisher.import_by_doi(doi_prefix) except ImportError as e: log.exception(e) return None #Each publisher module should define an attribute "pub_class" pointing #to the publisher-specific class extending #openaccess_epub.publisher.Publisher return publisher_mod.pub_class(self)
<SYSTEM_TASK:> This method defines how the Article tries to detect the DOI. <END_TASK> <USER_TASK:> Description: def get_DOI(self): """ This method defines how the Article tries to detect the DOI. It attempts to determine the article DOI string by DTD-appropriate inspection of the article metadata. This method should be made as flexible as necessary to properly collect the DOI for any XML publishing specification. Returns ------- doi : str or None The full (publisher/article) DOI string for the article, or None on failure. """
if self.dtd_name == 'JPTS': doi = self.root.xpath("./front/article-meta/article-id[@pub-id-type='doi']") if doi: return doi[0].text log.warning('Unable to locate DOI string for this article') return None else: log.warning('Unable to locate DOI string for this article') return None
<SYSTEM_TASK:> Least squares fitting in numpy without cross-validation. <END_TASK> <USER_TASK:> Description: def np_lst_sq(vecMdl, aryFuncChnk): """Least squares fitting in numpy without cross-validation. Notes ----- This is just a wrapper function for np.linalg.lstsq to keep piping consistent. """
aryTmpBts, vecTmpRes = np.linalg.lstsq(vecMdl, aryFuncChnk, rcond=-1)[:2] return aryTmpBts, vecTmpRes