docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Try to convert item to lowercase, if it is string. Args: item (obj): Str, unicode or any other object. Returns: obj: ``item.lower()`` if `item` is ``str`` or ``unicode``, else just \ `item` itself.
def _lower_if_str(item): # python 2 / 3 shill try: string_type = basestring except NameError: string_type = str if isinstance(item, string_type): return item.lower() return item
1,083,317
Raises error if invalid SAM format detected Args: entries (list): A list of SamEntry instances line (int): Line number of first entry Raises: FormatError: Error when SAM format incorrect with descriptive message
def sam_verifier(entries, line=None): regex = r'^[!-?A-~]{1,255}\t' \ + r'([0-9]{1,4}|[0-5][0-9]{4}|' \ + r'[0-9]{1,4}|[1-5][0-9]{4}|' \ + r'6[0-4][0-9]{3}|65[0-4][0-9]{2}|' \ + r'655[0-2][0-9]|6553[0-7])\t' \ + r'\*|[!-()+-<>-~][!-~]*\t' \ + r'([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \ + r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \ + r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \ + r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \ + r'6([0-3][0-9]|4[0-7])))))))))\t' \ + r'([0-9]{1,2}|1[0-9]{2}|' \ + r'2[0-4][0-9]|25[0-5])\t' \ + r'\*|([0-9]+[MIDNSHPX=])+\t' \ + r'\*|=|[!-()+-<>-~][!-~]*\t' \ + r'([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \ + r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \ + r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \ + r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \ + r'6([0-3][0-9]|4[0-7])))))))))\t' \ + r'-?([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \ + r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \ + r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \ + r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \ + r'6([0-3][0-9]|4[0-7])))))))))\t' \ + r'\*|[A-Za-z=.]+\t' \ + r'[!-~]+{0}$'.format(os.linesep) delimiter = r'\t' for entry in entries: try: entry_verifier([entry.write()], regex, delimiter) except FormatError as error: # Format info on what entry error came from if line: intro = 'Line {0}'.format(str(line)) elif error.part == 0: intro = 'An entry with reference {0}'.format(entry.rname) else: intro = 'An entry with query {0}'.format(entry.qname) # Generate error if error.part == 0: if len(entry.qname) == 0: msg = '{0} has no query name'.format(intro) elif len(entry.qname) > 255: msg = '{0} query name must be less than 255 ' \ 'characters'.format(intro) else: msg = '{0} query name contains characters not in ' \ '[!-?A-~]'.format(intro) elif error.part == 1: msg = '{0} flag not in range [0-(2^31-1)]'.format(intro) elif error.part == 2: if len(entry.rname) == 0: msg = '{0} has no reference name'.format(intro) else: msg = '{0} reference name has characters not in ' \ '[!-()+-<>-~][!-~]'.format(intro) elif error.part == 3: msg = '{0} leftmost position not in range ' \ '[0-(2^31-1)]'.format(intro) elif error.part == 4: msg = '{0} mapping quality not in range ' \ '[0-(2^8-1)]'.format(intro) elif error.part == 5: msg = '{0} CIGAR string has characters not in ' \ '[0-9MIDNSHPX=]'.format(intro) elif error.part == 6: msg = '{0} mate read name has characters not in ' \ '[!-()+-<>-~][!-~]'.format(intro) elif error.part == 7: msg = '{0} mate read position not in range ' \ '[0-(2^31-1)]'.format(intro) elif error.part == 8: msg = '{0} template length not in range ' \ '[(-2^31+1)-(2^31-1)]'.format(intro) elif error.part == 9: msg = '{0} sequence has characters not in ' \ '[A-Za-z=.]'.format(intro) elif error.part == 10: msg = '{0} quality scores has characters not in ' \ '[!-~]'.format(intro) else: msg = '{0}: Unknown Error: Likely a Bug'.format(intro) raise FormatError(message=msg) if line: line += 1
1,083,485
Determine if a feature's position overlaps with the entry Args: feature (class): GFF3Entry object stranded (bool): allow features to overlap on different strands if True [default: False] Returns: bool: True if features overlap, else False
def overlap(self, feature, stranded: bool=False): # Allow features to overlap on different strands feature_strand = feature.strand strand = self.strand if stranded and ((strand == '.') or (strand == '+' and \ feature_strand in ['-', '.']) or (strand == '-' and \ feature_strand in ['+', '.'])): return False iv_1 = set(range(feature.start, feature.end + 1)) iv_2 = set(range(self.start, self.end + 1)) if len(iv_1.intersection(iv_2)) > 0: return True else: return False
1,083,672
Determines the executor for the code in `elem.text`. The elem attributes and classes select the executor in this order (highest to lowest): - custom commands (cmd=...) - runas (runas=...) takes a key for the executors - first element class (.class) determines language and thus executor Args: elem The AST element. doc The document. Returns: The command to execute code.
def select_executor(elem, doc): executor = EXECUTORS['default'] if 'cmd' in elem.attributes.keys(): executor = elem.attributes['cmd'] elif 'runas' in elem.attributes.keys(): executor = EXECUTORS[elem.attributes['runas']] elif elem.classes[0] != 'exec': executor = EXECUTORS[elem.classes[0]] return executor
1,083,709
Executes a code block by passing it to the executor. Args: elem The AST element. doc The document. Returns: The output of the command.
def execute_code_block(elem, doc): command = select_executor(elem, doc).split(' ') code = elem.text if 'plt' in elem.attributes or 'plt' in elem.classes: code = save_plot(code, elem) command.append(code) if 'args' in elem.attributes: for arg in elem.attributes['args'].split(): command.append(arg) cwd = elem.attributes['wd'] if 'wd' in elem.attributes else None return subprocess.run(command, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd).stdout
1,083,710
Executes code blocks for a python shell. Parses the code in `elem.text` into blocks and executes them. Args: elem The AST element. doc The document. Return: The code with inline results.
def execute_interactive_code(elem, doc): code_lines = [l[4:] for l in elem.text.split('\n')] code_blocks = [[code_lines[0]]] for line in code_lines[1:]: if line.startswith(' ') or line == '': code_blocks[-1].append(line) else: code_blocks.append([line]) final_code = [] try: child = replwrap.REPLWrapper("python", ">>> ", None) except NameError: pf.debug('Can not run interactive session. No output produced ' + '(Code was:\n{!s}\n)' .format(elem)) pf.debug('Please pip install pexpect.') return '' for code_block in code_blocks: result = child.run_command('\n'.join(code_block) + '\n').rstrip('\r\n') final_code += [('>>> ' if i == 0 else '... ') + l for i, l in enumerate(code_block)] if result: final_code += [r for r in result.split('\n') if r.strip() not in code_block] return '\n'.join(final_code)
1,083,711
Reads a file which matches the pattern `filename`. Args: filename The filename pattern Returns: The file content or the empty string, if the file is not found.
def read_file(filename): hits = glob.glob('**/{}'.format(filename), recursive=True) if not len(hits): pf.debug('No file "{}" found.'.format(filename)) return '' elif len(hits) > 1: pf.debug('File pattern "{}" ambiguous. Using first.'.format(filename)) with open(hits[0], 'r') as f: return f.read()
1,083,712
Removes lines with import statements from the code. Args: code: The code to be stripped. Returns: The code without import statements.
def remove_import_statements(code): new_code = [] for line in code.splitlines(): if not line.lstrip().startswith('import ') and \ not line.lstrip().startswith('from '): new_code.append(line) while new_code and new_code[0] == '': new_code.pop(0) while new_code and new_code[-1] == '': new_code.pop() return '\n'.join(new_code)
1,083,714
Converts matplotlib plots to tikz code. If elem has either the plt attribute (format: plt=width,height) or the attributes width=width and/or height=height, the figurewidth and -height are set accordingly. If none are given, a height of 4cm and a width of 6cm is used as default. Args: code: The matplotlib code. elem: The element. Returns: The code and some code to invoke matplotlib2tikz.
def save_plot(code, elem): if 'plt' in elem.attributes: figurewidth, figureheight = elem.attributes['plt'].split(',') else: try: figureheight = elem.attributes['height'] except KeyError: figureheight = '4cm' try: figurewidth = elem.attributes['width'] except KeyError: figurewidth = '6cm' return f
1,083,715
Simplifies the given path. If pathdepth is in attributes, the last pathdepth elements will be returned. If pathdepth is "full", the full path will be returned. Otherwise the filename only will be returned. Args: attributes: The element attributes. Returns: The trimmed path.
def trimpath(attributes): if 'pathdepth' in attributes: if attributes['pathdepth'] != 'full': pathelements = [] remainder = attributes['file'] limit = int(attributes['pathdepth']) while len(pathelements) < limit and remainder: remainder, pe = os.path.split(remainder) pathelements.insert(0, pe) return os.path.join(*pathelements) return attributes['file'] return os.path.basename(attributes['file'])
1,083,716
Embeds a possible tikz image inside a center environment. Searches for matplotlib2tikz last commend line to detect tikz images. Args: result: The code execution result Returns: The input result if no tikzpicture was found, otherwise a centered version.
def maybe_center_plot(result): begin = re.search('(% .* matplotlib2tikz v.*)', result) if begin: result = ('\\begin{center}\n' + result[begin.end():] + '\n\\end{center}') return result
1,083,719
Processes pf.CodeBlocks. For details and a specification of how each command should behave, check the example files (especially the md and pdf)! Args: elem: The element to process. doc: The document. Returns: A changed element or None.
def action(elem, doc): # noqa if isinstance(elem, pf.CodeBlock): doc.listings_counter += 1 elems = [elem] if 'hide' not in elem.classes else [] if 'file' in elem.attributes: elem.text = read_file(elem.attributes['file']) filename = trimpath(elem.attributes) prefix = pf.Emph(pf.Str('File:')) if 'exec' in elem.classes: if 'interactive' in elem.classes or elem.text[:4] == '>>> ': elem.text = execute_interactive_code(elem, doc) else: result = execute_code_block(elem, doc) if 'hideimports' in elem.classes: elem.text = remove_import_statements(elem.text) if 'plt' in elem.attributes or 'plt' in elem.classes: doc.plot_found = True result = maybe_center_plot(result) block = pf.RawBlock(result, format='latex') else: block = pf.CodeBlock(result, classes=['changelog']) elems += [pf.Para(pf.Emph(pf.Str('Output:'))), block] if 'lines' in elem.attributes: elem.text = filter_lines(elem.text, elem.attributes['lines']) label = elem.attributes.get('label', f'cl:{doc.listings_counter}') if 'caption' in elem.attributes.keys(): doc.caption_found = True cap = pf.convert_text(elem.attributes['caption'], output_format='latex') # noqa if 'shortcaption' in elem.attributes.keys(): shortcap = pf.convert_text(elem.attributes['shortcaption'], output_format='latex') # noqa else: shortcap = cap if 'file' in elem.attributes.keys(): cap += pf.convert_text(f'&nbsp;(`{filename}`)', output_format='latex') # noqa elems = make_codelisting(elems, cap, label, shortcaption=shortcap, above='capbelow' not in elem.classes) elif 'caption' in elem.classes: doc.caption_found = True cap = '' if 'file' in elem.attributes.keys(): cap = pf.convert_text(f'`{filename}`', output_format='latex') elems = make_codelisting(elems, cap, label, above='capbelow' not in elem.classes) else: if 'file' in elem.attributes.keys(): elems.insert(0, pf.Para(prefix, pf.Space, pf.Code(filename))) return elems
1,083,720
Parses bank CSV file and returns Transactions instance. Args: filename: Path to CSV file to read. format: CSV format; one of the entries in `elv.formats`. encoding: The CSV file encoding. Returns: A ``Transactions`` object.
def parse(filename, format=u"Jæren Sparebank", encoding="latin1"): Class = formats[format.lower()] if PY3: kw = {"encoding": encoding} else: kw = {} with open(filename, "rt", **kw) as f: return Class.csv_to_transactions(f)
1,083,984
Converts money amount in string to a Decimal object. With the default arguments, the format is expected to be ``-38.500,00``, where dots separate thousands and comma the decimals. Args: thousand_sep: Separator for thousands. decimal_sep: Separator for decimals. Returns: A ``Decimal`` object of the string encoded money amount.
def money(s, thousand_sep=".", decimal_sep=","): s = s.replace(thousand_sep, "") s = s.replace(decimal_sep, ".") return Decimal(s)
1,083,986
Return a ``Transactions`` object in an inclusive date range. Args: start_date: A ``datetime.Date`` object that marks the inclusive start date for the range. stop_date: A ``datetime.Date`` object that marks the inclusive end date for the range. field: The field to compare start and end dates to. Default is the ``xfer`` field. Returns: A ``Transactions`` object.
def range(self, start_date=None, stop_date=None, field=lambda x: x.xfer): assert start_date <= stop_date, \ "Start date must be earlier than end date." out = Transactions() for t in self.trans: date = field(t) if (start_date is not None) and not (date >= start_date): continue if (stop_date is not None) and not (date <= stop_date): continue out.append(t) return out
1,084,000
Route ufunc execution intelligently to local host or remote engine(s) depending on where the inputs are, to minimize the need to move data. Args: see numpy documentation for __numpy_ufunc__
def _ufunc_dispatch(ufunc, method, i, inputs, **kwargs): #__print_ufunc(ufunc, method, i, inputs, **kwargs) if 'out' in kwargs and kwargs['out'] is not None: raise Error('for distributed ufuncs `out=` is not yet implemented') nin = 2 if ufunc is np.dot else ufunc.nin if nin is 1 and method == '__call__': return vectorize(ufunc.__call__)(inputs[0], **kwargs) elif nin is 2 and method == '__call__': from distob import engine here = engine.eid # Choose best location for the computation, possibly distributed: locs, weights = zip(*[_engine_affinity(a) for a in inputs]) # for DistArrays, adjust preferred distaxis to account for broadcasting bshape = _broadcast_shape(*inputs) locs = list(locs) for i, loc in enumerate(locs): if isinstance(loc, _TupleType): num_new_axes = len(bshape) - inputs[i].ndim if num_new_axes > 0: locs[i] = (locs[i][0], locs[i][1] + num_new_axes) if ufunc is np.dot: locs = [here if isinstance(m, _TupleType) else m for m in locs] if locs[0] == locs[1]: location = locs[0] else: # TODO: More accurately penalize the increased data movement if we # choose to distribute an axis that requires broadcasting. smallest = 0 if weights[0] <= weights[1] else 1 largest = 1 - smallest if locs[0] is here or locs[1] is here: location = here if weights[0] == weights[1] else locs[largest] else: # Both inputs are on remote engines. With the current # implementation, data on one remote engine can only be moved # to another remote engine via the client. Cost accordingly: if weights[smallest]*2 < weights[largest] + weights[smallest]: location = locs[largest] else: location = here # Move both inputs to the chosen location: inputs = [_ufunc_move_input(a, location, bshape) for a in inputs] # Execute computation: if location is here: return ufunc.__call__(inputs[0], inputs[1], **kwargs) else: if isinstance(location, numbers.Integral): # location is a single remote engine return call(ufunc.__call__, inputs[0], inputs[1], **kwargs) else: # location is a tuple (list of engine ids, distaxis) implying # that the moved inputs are now distributed arrays (or scalar) engine_ids, distaxis = location n = len(engine_ids) is_dist = tuple(isinstance(a, DistArray) for a in inputs) assert(is_dist[0] or is_dist[1]) for i in 0, 1: if is_dist[i]: ndim = inputs[i].ndim assert(inputs[i]._distaxis == distaxis) assert(inputs[i]._n == n) def _remote_ucall(inputs, **kwargs): return ufunc.__call__(inputs[0], inputs[1], **kwargs) results = [] kwargs = kwargs.copy() kwargs['block'] = False kwargs['prefer_local'] = False for j in range(n): subinputs = tuple(inputs[i]._subarrays[j] if is_dist[i] else inputs[i] for i in (0, 1)) results.append(call(_remote_ucall, subinputs, **kwargs)) results = [convert_result(ar) for ar in results] return DistArray(results, distaxis) elif ufunc.nin > 2: raise Error(u'Distributing ufuncs with >2 inputs is not yet supported') else: raise Error(u'Distributed ufunc.%s() is not yet implemented' % method)
1,084,077
Returns a view of the array with axes transposed. For a 1-D array, this has no effect. For a 2-D array, this is the usual matrix transpose. For an n-D array, if axes are given, their order indicates how the axes are permuted Args: a (array_like): Input array. axes (list of int, optional): By default, reverse the dimensions, otherwise permute the axes according to the values given.
def transpose(a, axes=None): if isinstance(a, np.ndarray): return np.transpose(a, axes) elif isinstance(a, RemoteArray): return a.transpose(*axes) elif isinstance(a, Remote): return _remote_to_array(a).transpose(*axes) elif isinstance(a, DistArray): if axes is None: axes = range(a.ndim - 1, -1, -1) axes = list(axes) if len(set(axes)) < len(axes): raise ValueError("repeated axis in transpose") if sorted(axes) != list(range(a.ndim)): raise ValueError("axes don't match array") distaxis = a._distaxis new_distaxis = axes.index(distaxis) new_subarrays = [ra.transpose(*axes) for ra in a._subarrays] return DistArray(new_subarrays, new_distaxis) else: return np.transpose(a, axes)
1,084,078
Roll the specified axis backwards, until it lies in a given position. Args: a (array_like): Input array. axis (int): The axis to roll backwards. The positions of the other axes do not change relative to one another. start (int, optional): The axis is rolled until it lies before this position. The default, 0, results in a "complete" roll. Returns: res (ndarray)
def rollaxis(a, axis, start=0): if isinstance(a, np.ndarray): return np.rollaxis(a, axis, start) if axis not in range(a.ndim): raise ValueError( 'rollaxis: axis (%d) must be >=0 and < %d' % (axis, a.ndim)) if start not in range(a.ndim + 1): raise ValueError( 'rollaxis: start (%d) must be >=0 and < %d' % (axis, a.ndim+1)) axes = list(range(a.ndim)) axes.remove(axis) axes.insert(start, axis) return transpose(a, axes)
1,084,079
Insert a new axis, corresponding to a given position in the array shape Args: a (array_like): Input array. axis (int): Position (amongst axes) where new axis is to be inserted.
def expand_dims(a, axis): if hasattr(a, 'expand_dims') and hasattr(type(a), '__array_interface__'): return a.expand_dims(axis) else: return np.expand_dims(a, axis)
1,084,080
Stack arrays in sequence vertically (row wise), handling ``RemoteArray`` and ``DistArray`` without moving data. Args: tup (sequence of array_like) Returns: res: `ndarray`, if inputs were all local `RemoteArray`, if inputs were all on the same remote engine `DistArray`, if inputs were already scattered on different engines
def vstack(tup): # Follow numpy.vstack behavior for 1D arrays: arrays = list(tup) for i in range(len(arrays)): if arrays[i].ndim is 1: arrays[i] = arrays[i][np.newaxis, :] return concatenate(tup, axis=0)
1,084,083
Stack arrays in sequence horizontally (column wise), handling ``RemoteArray`` and ``DistArray`` without moving data. Args: tup (sequence of array_like) Returns: res: `ndarray`, if inputs were all local `RemoteArray`, if inputs were all on the same remote engine `DistArray`, if inputs were already scattered on different engines
def hstack(tup): # Follow numpy.hstack behavior for 1D arrays: if all(ar.ndim is 1 for ar in tup): return concatenate(tup, axis=0) else: return concatenate(tup, axis=1)
1,084,084
Stack arrays in sequence depth wise (along third dimension), handling ``RemoteArray`` and ``DistArray`` without moving data. Args: tup (sequence of array_like) Returns: res: `ndarray`, if inputs were all local `RemoteArray`, if inputs were all on the same remote engine `DistArray`, if inputs were already scattered on different engines
def dstack(tup): # Follow numpy.dstack behavior for 1D and 2D arrays: arrays = list(tup) for i in range(len(arrays)): if arrays[i].ndim is 1: arrays[i] = arrays[i][np.newaxis, :] if arrays[i].ndim is 2: arrays[i] = arrays[i][:, :, np.newaxis] return concatenate(arrays, axis=2)
1,084,085
Insert a new axis, at a given position in the array shape Args: axis (int): Position (amongst axes) where new axis is to be inserted.
def expand_dims(self, axis): if axis == -1: axis = self.ndim if axis <= self._distaxis: subaxis = axis new_distaxis = self._distaxis + 1 else: subaxis = axis - 1 new_distaxis = self._distaxis new_subarrays = [expand_dims(ra, subaxis) for ra in self._subarrays] return DistArray(new_subarrays, new_distaxis)
1,084,101
True constructor, which really initializes the :class:`HTMLElement`. This is the function where all the preprocessing happens. Args: tag (str): HTML tag as string.
def _init_tag(self, tag): self._element = tag self._parseIsTag() self._parseIsComment() if not self._istag or self._iscomment: self._tagname = self._element else: self._parseTagName() if self._iscomment or not self._istag: return self._parseIsEndTag() self._parseIsNonPairTag() if self._istag and (not self._isendtag) or "=" in self._element: self._parseParams()
1,084,205
Alternative constructor used when the tag parameters are added to the HTMLElement (HTMLElement(tag, params)). This method just creates string and then pass it to the :meth:`_init_tag`. Args: tag (str): HTML tag as string. params (dict): HTML tag parameters as dictionary.
def _init_tag_params(self, tag, params): self._element = tag self.params = params self._parseTagName() self._istag = True self._isendtag = False self._isnonpairtag = False self._element = self.tagToString()
1,084,206
True if element is listed in nonpair tag table (``br`` for example) or if it ends with ``/>`` (``<hr />`` for example). You can also change state from pair to nonpair if you use this as setter. Args: isnonpair (bool, default None): If set, internal nonpair state is changed. Returns: book: True if tag is nonpair.
def isNonPairTag(self, isnonpair=None): if isnonpair is None: return self._isnonpairtag if not self._istag: return if isnonpair: self.endtag = None self.childs = [] self._isnonpairtag = isnonpair
1,084,212
Retrieve Token details for a specific Token. Args: token: The identifier of the token. Returns:
def retrieve_token(self, token): headers = self.client._get_private_headers() endpoint = '/tokens/{}'.format(token) return self.client._get(self.client.URL_BASE + endpoint, headers=headers)
1,084,472
Perform particle swarm optimization of the given fitness function. Args: objective_function: the cost function to optimize. stopping_condition: function specifying the stopping condition. parameters: dictionary: parameter dictionary for the PSO. Returns: cipy.algorithms.pso.Particle: The global best particle.
def optimize(objective_function, domain, stopping_condition, parameters=None, position_update=functions.std_position, velocity_update=functions.std_velocity, parameter_update=functions.std_parameter_update, measurements=(), measurer=dictionary_based_metrics): params = __init_parameters__(parameters) rng = np.random.RandomState(params['seed']) initial_swarm = [functions.initialize_particle(rng, domain, objective_function) for i in range(params['swarm_size'])] state = types.PSOState(rng, params, iterations=0, swarm=initial_swarm) topology_function = state.params['topology'] update_fitness = functions.update_fitness update_particle = functions.update_particle results, measure = measurer(measurements) while not stopping_condition(state): n_bests = topology_function(state) state = state._replace(swarm=[update_particle(position_update, velocity_update, state, n_bests, ip) for ip in enumerate(state.swarm)]) state = state._replace(swarm=[update_fitness(objective_function, particle) for particle in state.swarm], iterations=state.iterations + 1) state = parameter_update(state, objective_function) results = measure(results, state) return functions.solution(state.swarm), results
1,085,054
Look up all the lists that the user is a member of. Args: username (str): The MIT username of the user member_type(str): The type of user, "USER" or "STRING" Returns: list of strings: names of the lists that this user is a member of
def user_lists(self, username, member_type="USER"): return self.client.service.getUserLists(username, member_type, self.proxy_id)
1,085,064
Look up all the members of a list. Args: name (str): The name of the list type (str): The type of results to return. "USER" to get users, "LIST" to get lists. recurse (bool): Presumably, whether to recurse into member lists when retrieving users. max_results (int): Maximum number of results to return. Returns: list of strings: names of the members of the list
def list_members(self, name, type="USER", recurse=True, max_results=1000): results = self.client.service.getListMembership( name, type, recurse, max_results, self.proxy_id, ) return [item["member"] for item in results]
1,085,066
Look up the attributes of a list. Args: name (str): The name of the list Returns: dict: attributes of the list
def list_attributes(self, name): result = self.client.service.getListAttributes(name, self.proxy_id) if isinstance(result, list) and len(result) == 1: return result[0] return result
1,085,067
Add a member to an existing list. Args: username (str): The username of the user to add listname (str): The name of the list to add the user to member_type (str): Normally, this should be "USER". If you are adding a list as a member of another list, set this to "LIST", instead.
def add_member_to_list(self, username, listname, member_type="USER"): return self.client.service.addMemberToList( listname, username, member_type, self.proxy_id )
1,085,068
Parse text in cartouche format and return a reStructuredText equivalent Args: lines: A sequence of strings representing the lines of a single docstring as read from the source by Sphinx. This string should be in a format that can be parsed by cartouche. Returns: A list of lines containing the transformed docstring as reStructuredText as produced by cartouche. Raises: RuntimeError: If the docstring cannot be parsed.
def parse_cartouche_text(lines): indent_lines = unindent(lines) indent_lines = pad_blank_lines(indent_lines) indent_lines = first_paragraph_indent(indent_lines) indent_paragraphs = gather_lines(indent_lines) parse_tree = group_paragraphs(indent_paragraphs) syntax_tree = extract_structure(parse_tree) result = syntax_tree.render_rst() ensure_terminal_blank(result) return result
1,085,128
Give blank (empty) lines the same indent level as the preceding line. Args: indent_texts: An iterable of tuples each containing an integer in the first element and a string in the second element. Returns: A list of tuples each containing an integer in the first element and a string in the second element.
def pad_blank_lines(indent_texts): current_indent = 0 result = [] for indent, text in indent_texts: if len(text) > 0: current_indent = indent result.append((current_indent, text)) return result
1,085,130
Parse the first line of a Cartouche exception description. Args: line (str): A single line Cartouche exception description. Returns: A 2-tuple containing the exception type and the first line of the description.
def parse_exception(line): m = RAISES_REGEX.match(line) if m is None: raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Raises block'.format(line=line)) return m.group(2), m.group(1)
1,085,143
Determine the opening indent level for a docstring. The opening indent level is the indent level is the first non-zero indent level of a non-empty line in the docstring. Args: indent_texts: The lines of the docstring as an iterable over 2-tuples each containing an integer indent level as the first element and the text as the second element. Returns: The opening indent level as an integer.
def determine_opening_indent(indent_texts): num_lines = len(indent_texts) if num_lines < 1: return 0 assert num_lines >= 1 first_line_indent = indent_texts[0][0] if num_lines == 1: return first_line_indent assert num_lines >= 2 second_line_indent = indent_texts[1][0] second_line_text = indent_texts[1][1] if len(second_line_text) == 0: return first_line_indent return second_line_indent
1,085,152
Test whether this element contains at least all `params`, or more. Args: params (dict/SpecialDict): Subset of parameters. Returns: bool: True if all `params` are contained in this element.
def containsParamSubset(self, params): for key in params.keys(): if key not in self.params: return False if params[key] != self.params[key]: return False return True
1,085,267
Emit signal by calling all connected slots. The arguments supplied have to match the signal definition. Args: kwargs: Keyword arguments to be passed to connected slots. Raises: :exc:`InvalidEmit`: If arguments don't match signal specification.
def emit(self, **kwargs): self._ensure_emit_kwargs(kwargs) for slot in self.slots: slot(**kwargs)
1,085,455
Connect ``slot`` to this singal. Args: slot (callable): Callable object wich accepts keyword arguments. Raises: InvalidSlot: If ``slot`` doesn't accept keyword arguments.
def connect(self, slot): self._ensure_slot_args(slot) if not self.is_connected(slot): self.slots.append(slot)
1,085,457
Raises error if invalid GFF3 format detected Args: entries (list): A list of GFF3Entry instances line (int): Line number of first entry Raises: FormatError: Error when GFF3 format incorrect with descriptive message
def gff3_verifier(entries, line=None): regex = r'^[a-zA-Z0-9.:^*$@!+_?-|]+\t.+\t.+\t\d+\t\d+\t' \ + r'\d*\.?\d*\t[+-.]\t[.0-2]\t.+{0}$'.format(os.linesep) delimiter = r'\t' for entry in entries: try: entry_verifier([entry.write()], regex, delimiter) except FormatError as error: # Format info on what entry error came from if line: intro = 'Line {0}'.format(str(line)) elif error.part == 0: intro = 'Entry with source {0}'.format(entry.source) else: intro = 'Entry with Sequence ID {0}'.format(entry.seqid) # Generate error if error.part == 0: msg = '{0} has no Sequence ID'.format(intro) elif error.part == 1: msg = '{0} has no source'.format(intro) elif error.part == 2: msg = '{0} has non-numerical characters in type'.format(intro) elif error.part == 3: msg = '{0} has non-numerical characters in ' \ 'start position'.format(intro) elif error.part == 4: msg = '{0} has non-numerical characters in ' \ 'end position'.format(intro) elif error.part == 5: msg = '{0} has non-numerical characters in score'.format(intro) elif error.part == 6: msg = '{0} strand not in [+-.]'.format(intro) elif error.part == 7: msg = '{0} phase not in [.0-2]'.format(intro) elif error.part == 8: msg = '{0} has no attributes'.format(intro) else: msg = 'Unknown Error: Likely a Bug' raise FormatError(message=msg) if line: line += 1
1,085,793
Extract information about spatial resources from an environment file. Arguments: filename - a string representing the path to the environment file. world_size - a tuple representing the x and y coordinates of the world. (default: 60x60) Returns a list of lists of sets indicating the set of resources available at each x,y location in the Avida grid.
def parse_environment_file(filename, world_size=(60, 60)): infile = open(filename) lines = infile.readlines() infile.close() tasks = [] # Find all spatial resources and record which cells they're in res_order = [] res_dict = {} for line in lines: if line.startswith("GRADIENT_RESOURCE"): name, cells = parse_gradient(line, world_size) elif line.startswith("CELL"): name, cells = parse_cell(line, world_size) elif line.startswith("REACTION"): task = parse_reaction(line) if task not in tasks: tasks.append(task) else: continue dict_increment(res_dict, name, cells) if name not in res_order: res_order.append(name) # Create a map of niches across the environment and return it grid = make_niche_grid(res_dict, world_size) return EnvironmentFile(grid, res_order, world_size, filename, tasks)
1,086,155
Remove all non-digit and non "x" characters from given string. Args: isbn (str): isbn string, which will be cleaned. Returns: list: array of numbers (if "x" is found, it is converted to 10).
def _clean_isbn(isbn): if isinstance(isbn, basestring): isbn = list(isbn.lower()) # filter digits and "x" isbn = filter(lambda x: x.isdigit() or x == "x", isbn) # convert ISBN to numbers return map(lambda x: 10 if x == "x" else int(x), isbn)
1,086,272
Decorator for calling other functions from this module. Purpose of this decorator is to clean the ISBN string from garbage and return list of digits. Args: fn (function): function in which will be :func:`_clean_isbn(isbn)` call wrapped.
def _isbn_cleaner(fn): @wraps(fn) def wrapper(isbn): return fn(_clean_isbn(isbn)) return wrapper
1,086,273
Validate given `isbn`. Wrapper for :func:`is_isbn10_valid`/ :func:`is_isbn13_valid`. Args: isbn (str/list): ISBN number as string or list of digits. Note: Function doesn't require `isbn` type to be specified (it can be both 10/13 isbn's versions). Returns: bool: ``True`` if ISBN is valid.
def is_valid_isbn(isbn): length = len(isbn) if length == 10: return is_isbn10_valid(isbn) elif length == 13: return is_isbn13_valid(isbn) return False
1,086,275
Guaranteed convergence velocity update. Args: particle: cipy.algorithms.pso.Particle: Particle to update the velocity for. social: cipy.algorithms.pso.Particle: The social best for the particle. state: cipy.algorithms.pso.State: The state of the PSO algorithm. Returns: numpy.ndarray: the calculated velocity.
def gc_velocity_update(particle, social, state): gbest = state.swarm[gbest_idx(state.swarm)].position if not np.array_equal(gbest, particle.position): return std_velocity(particle, social, state) rho = state.params['rho'] inertia = state.params['inertia'] v_max = state.params['v_max'] size = particle.position.size r2 = state.rng.uniform(0.0, 1.0, size) velocity = __gc_velocity_equation__(inertia, rho, r2, particle, gbest) return __clamp__(velocity, v_max)
1,086,736
Initializes a particle within a domain. Args: rng: numpy.random.RandomState: The random number generator. domain: cipy.problems.core.Domain: The domain of the problem. Returns: cipy.algorithms.pso.Particle: A new, fully initialized particle.
def initialize_particle(rng, domain, fitness_function): position = rng.uniform(domain.lower, domain.upper, domain.dimension) fitness = fitness_function(position) return Particle(position=position, velocity=np.zeros(domain.dimension), fitness=fitness, best_fitness=fitness, best_position=position)
1,086,738
Calculates and updates the fitness and best_fitness of a particle. Fitness is calculated using the 'problem.fitness' function. Args: problem: The optimization problem encapsulating the fitness function and optimization type. particle: cipy.algorithms.pso.Particle: Particle to update the fitness for. Returns: cipy.algorithms.pso.Particle: A new particle with the updated fitness.
def update_fitness(objective_function, particle): fitness = objective_function(particle.position) best_fitness = particle.best_fitness cmp = comparator(fitness) if best_fitness is None or cmp(fitness, best_fitness): best_position = particle.position return particle._replace(fitness=fitness, best_fitness=fitness, best_position=best_position) else: return particle._replace(fitness=fitness)
1,086,739
gbest Neighbourhood topology function. Args: swarm: list: The list of particles. Returns: int: The index of the gbest particle.
def gbest_idx(swarm): best = 0 cmp = comparator(swarm[best].best_fitness) for (idx, particle) in enumerate(swarm): if cmp(particle.best_fitness, swarm[best].best_fitness): best = idx return best
1,086,742
lbest Neighbourhood topology function. Neighbourhood size is determined by state.params['n_s']. Args: state: cipy.algorithms.pso.State: The state of the PSO algorithm. idx: int: index of the particle in the swarm. Returns: int: The index of the lbest particle.
def lbest_idx(state, idx): swarm = state.swarm n_s = state.params['n_s'] cmp = comparator(swarm[0].best_fitness) indices = __lbest_indices__(len(swarm), n_s, idx) best = None for i in indices: if best is None or cmp(swarm[i].best_fitness, swarm[best].best_fitness): best = i return best
1,086,743
Determines the global best particle in the swarm. Args: swarm: iterable: an iterable that yields all particles in the swarm. Returns: cipy.algorithms.pso.Particle: The best particle in the swarm when comparing the best_fitness values of the particles.
def solution(swarm): best = swarm[0] cmp = comparator(best.best_fitness) for particle in swarm: if cmp(particle.best_fitness, best.best_fitness): best = particle return best
1,086,746
Serialize the given object into JSON. Args: obj: the object to be serialized. Returns: (str): JSON representation of the given object.
def serialize(obj): LOGGER.debug('serialize(%s)', obj) if isinstance(obj, datetime.date): return simplejson.dumps(obj, default=encoders.as_date) elif hasattr(obj, '__dict__'): return simplejson.dumps(obj, default=encoders.as_object) return simplejson.dumps(obj)
1,086,800
Deserialize a JSON string into a Python object. Args: json (str): the JSON string. cls (:py:class:`object`): if the ``json`` is deserialized into a ``dict`` and this argument is set, the ``dict`` keys are passed as keyword arguments to the given ``cls`` initializer. Returns: Python object representation of the given JSON string.
def deserialize(json, cls=None): LOGGER.debug('deserialize(%s)', json) out = simplejson.loads(json) if isinstance(out, dict) and cls is not None: return cls(**out) return out
1,086,801
Restore B6/M8 entry to original format Args: default (bool): output entry in default BLAST+ B6 format Returns: str: properly formatted string containing the B6/M8 entry
def write(self, default: bool=False): none_type = type(None) if default: # Default order of format specifiers ordered_vals = ['query', 'subject', 'identity', 'length', 'mismatches', 'gaps', 'query_start', 'query_end', 'subject_start', 'subject_end', 'evalue', 'bitscore'] else: # Original order of B6 entry format specifiers try: ordered_vals = [self.custom_fs[i] if i in self.custom_fs else getattr(self, i) for i in self.fs_order] except TypeError: ordered_vals = [getattr(self, i) for i in self.fs_order] # Format entry for writing fstr = "\t".join(['-' if type(i) == none_type else str(i) for i in ordered_vals]) return '{}{}'.format(fstr, os.linesep)
1,087,086
Function to verify if a file exists Args: file_name: The name of file to check file_location: The location of the file, derive from the os module Returns: returns boolean True or False
def verify_file_exists(file_name, file_location): return __os.path.isfile(__os.path.join(file_location, file_name))
1,087,377
Function to increase a filename by a number 1 Args: file_name: The name of file to check file_location: The location of the file, derive from the os module Returns: returns a good filename.
def file_name_increase(file_name, file_location): add_one = 1 file_name_temp = file_name while verify_file_exists(file_name_temp, file_location): try: name, file_extension = file_name.split('.') file_name_temp = '%s-%i.%s' % (name, add_one, file_extension) except Exception as e: LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e)) name = file_name file_name_temp = '%s-%i' % (name, add_one) add_one += 1 file_name = file_name_temp return file_name
1,087,378
Function to verify if a directory exists Args: directory_name: The name of directory to check directory_location: The location of the directory, derive from the os module directory_create: If you want to create the directory Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
def verify_directory(directory_name, directory_location, directory_create=False): if not directory_create: return __os.path.exists(__os.path.join(directory_location, directory_name)) elif directory_create: good = __os.path.exists(__os.path.join(directory_location, directory_name)) if not good: __os.mkdir(__os.path.join(directory_location, directory_name))
1,087,379
Function to export a list to a text file Args: orig_list: The list you want exported file_name: The name of the exported file file_location: The location of the file, derive from the os module Returns: returns the filename info
def list_to_file(orig_list, file_name, file_location): file = __os.path.join(file_location, file_name) def add_line_break(list_line): list_line = ('%s\n' % (list_line,)) return list_line write_file = open(file, "a") for orig_list_line in orig_list: write_file.write(add_line_break(str(orig_list_line))) write_file.close() return file_name
1,087,380
Function to import a text file to a list Args: file_name: The name of file to be import file_location: The location of the file, derive from the os module Returns: returns a list
def file_to_list(file_name, file_location): file = __os.path.join(file_location, file_name) read_file = open(file, "r") temp_list = read_file.read().splitlines() read_file.close() return temp_list
1,087,381
Function to import a csv as a dictionary Args: file_name: The name of the csv file file_location: The location of the file, derive from the os module Returns: returns a dictionary
def csv_to_dict(file_name, file_location): file = __os.path.join(file_location, file_name) try: csv_read = open(file, "r") except Exception as e: LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e)) print('Error {error} ignoring any errors'.format(error=e)) csv_read = open(file, "r", errors='ignore') data_row = __csv.DictReader(csv_read, dialect="excel") dict_key = 1 temp_dict = dict() for row in data_row: temp_dict[dict_key] = row dict_key += 1 csv_read.close() return temp_dict
1,087,382
Function to export a dictionary to a csv file Args: orig_dict: The dictionary you want exported file_name: The name of the exported file field_names_tuple: The fieldnames in a tuple file_location: The location of the file, derive from the os module Returns: returns the filename info
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location): file = __os.path.join(file_location, file_name) csv_write = open(file, 'a') writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n') headers = dict((n, n) for n in field_names_tuple) writer.writerow(headers) for dict_key, a in list(orig_dict.items()): writer.writerow(orig_dict[dict_key]) csv_write.close() return file_name
1,087,383
Function to store objects in a shelve Args: file_name: Shelve storage file name save_key: The name of the key to store the item to file_location: The location of the file, derive from the os module object_to_store: The object you want to store Returns:
def store_object(file_name, save_key, file_location, object_to_store=None): file = __os.path.join(file_location, file_name) try: shelve_store = __shelve.open(file) except Exception as e: LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e)) print('Bad storage dB, rebuilding!!') __os.remove(file) shelve_store = __shelve.open(file) shelve_store[save_key] = object_to_store shelve_store.close()
1,087,384
Function to retrieve objects from a shelve Args: file_name: Shelve storage file name save_key: The name of the key the item is stored in file_location: The location of the file, derive from the os module Returns: Returns the stored object
def retrieve_object_from_file(file_name, save_key, file_location): shelve_store = None file = __os.path.join(file_location, file_name) try: shelve_store = __shelve.open(file) except Exception as e: LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e)) __sys.exit('Storage dB is not readable, closing App!!') stored_object = shelve_store.get(save_key) shelve_store.close() return stored_object
1,087,385
Function to delete objects from a shelve Args: file_name: Shelve storage file name save_key: The name of the key the item is stored in file_location: The location of the file, derive from the os module Returns:
def delete_object_from_file(file_name, save_key, file_location): file = __os.path.join(file_location, file_name) shelve_store = __shelve.open(file) del shelve_store[save_key] shelve_store.close()
1,087,386
Function to check for a key in a shelve Args: file_name: Shelve storage file name save_key: The name of the key the item is stored in file_location: The location of the file, derive from the os module Returns: returns true or false
def verify_key_in_shelve(file_name, save_key, file_location): file = __os.path.join(file_location, file_name) shelve_store = __shelve.open(file) exists = shelve_store.get(save_key) shelve_store.close() if exists: return True elif not exists: return False
1,087,387
Function to retreive all keys in a shelve Args: file_name: Shelve storage file name file_location: The location of the file, derive from the os module Returns: a list of the keys
def get_keys_from_shelve(file_name, file_location): temp_list = list() file = __os.path.join(file_location, file_name) shelve_store = __shelve.open(file) for key in shelve_store: temp_list.append(key) shelve_store.close() return temp_list
1,087,388
Remove a symbol from a string, and replace it with a different one Args: string_item: String that you want to replace symbols in remove_symbol: Symbol to remove add_symbol: Symbol to add Returns: returns a string with symbols swapped
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol): string_item = add_symbol.join(string_item.split(remove_symbol)) return string_item
1,087,389
List the files in a specified directory Args: full_directory_path: The full directory path to check, derive from the os module Returns: returns a list of files
def list_files_in_directory(full_directory_path): files = list() for file_name in __os.listdir(full_directory_path): if __os.path.isfile(__os.path.join(full_directory_path, file_name)): files.append(file_name) return files
1,087,390
List the directories in a specified directory Args: full_directory_path: The full directory path to check, derive from the os module Returns: returns a list of directories
def list_directories_in_directory(full_directory_path): directories = list() for directory_name in __os.listdir(full_directory_path): if __os.path.isdir(__os.path.join(full_directory_path, directory_name)): directories.append(directory_name) return directories
1,087,391
Function to create a line of a random string Args: chars_per_line: An integer that says how many characters to return Returns: A String
def random_line_data(chars_per_line=80): return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
1,087,393
Function to creates lines of random string data Args: line_count: An integer that says how many lines to return chars_per_line: An integer that says how many characters per line to return Returns: A String
def random_data(line_count=1, chars_per_line=80): divide_lines = chars_per_line * line_count return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
1,087,394
Gets a country from its name Args: country_name: country name Returns: Country
def get_country_by_name(self, country_name: str) -> typing.Optional['Country']: VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError) if country_name not in self._countries_by_name.keys(): for country in self.countries: if country.country_name == country_name: self._countries_by_name[country_name] = country return country raise ValueError(country_name) else: return self._countries_by_name[country_name]
1,087,723
Gets a country from its name Args: country_id: country id Returns: Country
def get_country_by_id(self, country_id: int) -> typing.Optional['Country']: VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id') if country_id not in self._countries_by_id.keys(): for country in self.countries: if country.country_id == country_id: self._countries_by_id[country_id] = country return country raise ValueError(country_id) else: return self._countries_by_id[country_id]
1,087,724
Gets a group by id Args: group_id: group id Returns: Group
def get_group_by_id(self, group_id: str) -> typing.Optional['Group']: VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id', exc=ValueError) for group in self.groups: if group.group_id == group_id: return group return None
1,087,725
Gets a group from its name Args: group_name: Returns: Group
def get_group_by_name(self, group_name: str) -> typing.Optional['Group']: VALID_STR.validate(group_name, 'get_group_by_name') for group in self.groups: if group.group_name == group_name: return group return None
1,087,727
Gets a unit from its name Args: unit_name: unit name Returns:
def get_unit_by_name(self, unit_name: str) -> typing.Optional['BaseUnit']: VALID_STR.validate(unit_name, 'get_unit_by_name') for unit in self.units: if unit.unit_name == unit_name: return unit return None
1,087,728
Gets a unit from its ID Args: unit_id: unit id Returns: Unit
def get_unit_by_id(self, unit_id: str) -> typing.Optional['BaseUnit']: VALID_POSITIVE_INT.validate(unit_id, 'get_unit_by_id') for unit in self.units: if unit.unit_id == unit_id: return unit return None
1,087,729
Gets a country in this coalition by its name Args: country_name: country name Returns: Country
def get_country_by_name(self, country_name) -> 'Country': VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError) if country_name not in self._countries_by_name.keys(): for country in self.countries: if country.country_name == country_name: return country raise ValueError(country_name) else: return self._countries_by_name[country_name]
1,087,750
Gets a country in this coalition by its ID Args: country_id: country Id Returns: Country
def get_country_by_id(self, country_id) -> 'Country': VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError) if country_id not in self._countries_by_id.keys(): for country in self.countries: if country.country_id == country_id: return country raise ValueError(country_id) else: return self._countries_by_id[country_id]
1,087,751
Prepare all iPython engines for distributed object processing. Args: client (ipyparallel.Client, optional): If None, will create a client using the default ipyparallel profile.
def setup_engines(client=None): if not client: try: client = ipyparallel.Client() except: raise DistobClusterError( u) eids = client.ids if not eids: raise DistobClusterError( u'No ipyparallel compute engines are available') nengines = len(eids) dv = client[eids] dv.use_dill() with dv.sync_imports(quiet=True): import distob # create global ObjectEngine distob.engine on each engine ars = [] for i in eids: dv.targets = i ars.append(dv.apply_async(_remote_setup_engine, i, nengines)) dv.wait(ars) for ar in ars: if not ar.successful(): raise ar.r # create global ObjectHub distob.engine on the client host if distob.engine is None: distob.engine = ObjectHub(-1, client)
1,088,015
Waits for and converts any AsyncResults. Converts any Ref into a Remote. Args: r: can be an ordinary object, ipyparallel.AsyncResult, a Ref, or a Sequence of objects, AsyncResults and Refs. Returns: either an ordinary object or a Remote instance
def convert_result(r): if (isinstance(r, collections.Sequence) and not isinstance(r, string_types)): rs = [] for subresult in r: rs.append(convert_result(subresult)) return rs if isinstance(r, ipyparallel.AsyncResult): r = r.r if isinstance(r, Ref): RemoteClass = distob.engine.proxy_types[r.type] r = RemoteClass(r) return r
1,088,020
Make an ObjectHub. Args: engine_id: ipyparallel engine id number where this Hub is located, or a negative number if it is on an ipyparallel client. client: ipyparallel.Client
def __init__(self, engine_id, client): self._client = client self._dv = client.direct_view(targets='all') self._dv.use_dill() nengines = len(client) super(ObjectHub, self).__init__(engine_id, nengines)
1,088,046
Set up the Remote* proxy object to access an already-existing object, which may be local or remote. Args: obj (Ref or object): either a Ref reference to the (possibly remote) object to be controlled, or else an actual (local) object to be controlled.
def __init__(self, obj): if distob.engine is None: setup_engines() if isinstance(obj, Ref): self._ref = obj self.is_local = (self._ref.id.engine is distob.engine.eid) else: self._ref = Ref(obj) self.is_local = True if self.is_local: self._dv = None self._obcache = distob.engine[self._ref.id] self._obcache_current = True else: self._dv = distob.engine._client[self._ref.id.engine] self._dv.use_dill() self._obcache = None self._obcache_current = False self._id = self._ref.id # preference setting: whether to give cached local results if available self.prefer_local = True #Add proxy controllers for any instance-specific methods/attributes: instance_methods, instance_attribs, size = call( _scan_instance, self, self.__class__._include_underscore, self.__class__._exclude, prefer_local=False) for name, doc in instance_methods: setattr(self, name, _make_proxy_method(name, doc)) for name, doc in instance_attribs: setattr(self.__class__, name, _make_proxy_property(name, doc)) self.__engine_affinity__ = (self._ref.id.engine, size)
1,088,055
Outputs error message on own logger. Also raises exceptions if need be. Args: error_msg: message to output
def error(self, error_msg): if self.logger is not None: self.logger.error(error_msg) if self.exc is not None: raise self.exc(error_msg)
1,088,353
Join geojsons into one. The spatial reference system of the output file is the same as the one of the last file in the list. Args: input_files (list): List of file name strings. output_file (str): Output file name.
def join(input_files, output_file): # get feature collections final_features = [] for file in input_files: with open(file) as f: feat_collection = geojson.load(f) final_features += feat_collection['features'] feat_collection['features'] = final_features # write to output file with open(output_file, 'w') as f: geojson.dump(feat_collection, f)
1,088,462
Split a geojson in two separate files. Args: input_file (str): Input filename. file_1 (str): Output file name 1. file_2 (str): Output file name 2. no_features (int): Number of features in input_file to go to file_1. output_file (str): Output file name.
def split(input_file, file_1, file_2, no_in_first_file): # get feature collection with open(input_file) as f: feat_collection = geojson.load(f) features = feat_collection['features'] feat_collection_1 = geojson.FeatureCollection(features[0:no_in_first_file]) feat_collection_2 = geojson.FeatureCollection(features[no_in_first_file:]) with open(file_1, 'w') as f: geojson.dump(feat_collection_1, f) with open(file_2, 'w') as f: geojson.dump(feat_collection_2, f)
1,088,463
Reads a geojson and returns a list of value tuples, each value corresponding to a property in property_names. Args: input_file (str): File name. property_names: List of strings; each string is a property name. Returns: List of value tuples.
def get_from(input_file, property_names): # get feature collections with open(input_file) as f: feature_collection = geojson.load(f) features = feature_collection['features'] values = [tuple([feat['properties'].get(x) for x in property_names]) for feat in features] return values
1,088,464
Override the vanilla delete functionality to soft-delete instead. Soft-delete is accomplished by setting the status field to "deleted" Arguments: hard <bool=False> if true, do a hard delete instead, effectively removing the object from the database
def delete(self, hard=False): if hard: return models.Model.delete(self) self.status = "deleted" self.save() for key in self._handleref.delete_cascade: q = getattr(self, key).all() if not hard: # if we are soft deleting only trigger delete on # objects that are not already deleted, as to avoid # unnecessary re-saves and overriding of updated dates q = q.exclude(status="deleted") for child in q: child.delete(hard=hard)
1,088,721
Add a Update object to the database Arguments: trainer - expects a int of trainer's id or a trainer object xp time_updated - expects datetime.datetime
def create_update(self, trainer, xp, time_updated=None): if isinstance(trainer, Trainer): trainer = trainer.id url = api_url+'trainers/'+str(trainer)+'/updates/' payload = {'trainer' : int(trainer), 'xp' : int(xp)} if time_updated is None: payload['update_time'] = maya.now().iso8601() else: payload['update_time'] = time_updated.iso8601() if self.identifier: payload['meta_source'] = self.identifier r = requests.post(url, data=json.dumps(payload), headers=self.headers) print(request_status(r)) r.raise_for_status() return Update(r.json())
1,088,733
Replace value in this element with values from `el`. This useful when you don't want change all references to object. Args: el (obj): :class:`HTMLElement` instance.
def replaceWith(self, el): self.childs = el.childs self.params = el.params self.endtag = el.endtag self.openertag = el.openertag self._tagname = el.getTagName() self._element = el.tagToString() self._istag = el.isTag() self._isendtag = el.isEndTag() self._iscomment = el.isComment() self._isnonpairtag = el.isNonPairTag()
1,088,812
Remove subelement (`child`) specified by reference. Note: This can't be used for removing subelements by value! If you want to do such thing, try:: for e in dom.find("value"): dom.removeChild(e) Args: child (obj): :class:`HTMLElement` instance which will be removed from this element. end_tag_too (bool, default True): Remove also `child` endtag.
def removeChild(self, child, end_tag_too=True): # if there are multiple childs, remove them if _is_iterable(child): for x in child: self.removeChild(child=x, end_tag_too=end_tag_too) return if not self.childs: return end_tag = None if end_tag_too: end_tag = child.endtag for e in self.childs: if e != child: e.removeChild(child, end_tag_too) continue if end_tag_too and end_tag in self.childs: self.childs.remove(end_tag) self.childs.remove(e)
1,088,813
Send a GET request to the specified URL. Method directly wraps around `Session.get` and updates browser attributes. <http://docs.python-requests.org/en/master/api/#requests.get> Args: url: URL for the new `Request` object. **kwargs: Optional arguments that `Request` takes. Returns: `Response` object of a successful request.
def get(self, url, **kwargs): response = self.session.get(url, **kwargs) self._url = response.url self._response = response return response
1,089,417
Retrieve a record with a given type name and record id. Args: name (string): The name which the record is stored under. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: The cached model.
def get_record(self, name, record_id): if name in self._cache: if record_id in self._cache[name]: return self._cache[name][record_id]
1,089,451
Return all the records for the given name in the cache. Args: name (string): The name which the required models are stored under. Returns: list: A list of :class:`cinder_data.model.CinderModel` models.
def get_records(self, name): if name in self._cache: return self._cache[name].values() else: return []
1,089,452
Save a record into the cache. Args: name (string): The name to save the model under. record_id (int): The record id. record (:class:`cinder_data.model.CinderModel`): The model
def set_record(self, name, record_id, record): if name not in self._cache: self._cache[name] = {} self._cache[name][record_id] = record
1,089,453
Generates a random excuse from a simple template dict. Based off of drow's generator.js (public domain). Grok it here: http://donjon.bin.sh/code/random/generator.js Args: template_dict: Dict with template strings. key: String with the starting index for the dict. (Default: 'start') Returns: Generated string.
def generate_random_string(template_dict, key='start'): data = template_dict.get(key) #if isinstance(data, list): result = random.choice(data) #else: #result = random.choice(data.values()) for match in token_regex.findall(result): word = generate_random_string(template_dict, match) or match result = result.replace('{{{0}}}'.format(match), word) return result
1,089,641
Generate random BOFH themed technical excuses! Args: how_many: Number of excuses to generate. (Default: 1) Returns: A list of BOFH excuses.
def bofh_excuse(how_many=1): excuse_path = os.path.join(os.path.dirname(__file__), 'bofh_excuses.json') with open(excuse_path, 'r') as _f: excuse_dict = json.load(_f) return [generate_random_string(excuse_dict) for _ in range(int(how_many))]
1,089,642
Parse ``pip``-style requirements files. This is a *very* naïve parser, but very few packages make use of the more advanced features. Support for other features will be added only when packages in the wild depend on them. Args: __fname: Base file to pass Returns: Parsed dependencies
def parse_requires(__fname: str) -> List[str]: deps = [] with open(__fname) as req_file: entries = [s.split('#')[0].strip() for s in req_file.readlines()] for dep in entries: if not dep: continue elif dep.startswith('-r '): include = dep.split()[1] if '/' not in include: include = path.join(path.dirname(__fname), include) deps.extend(parse_requires(include)) continue elif ';' in dep: dep, marker = [s.strip() for s in dep.split(';')] # Support for other markers will be added when they’re actually # found in the wild match = re.fullmatch(r, marker, re.VERBOSE) if not match: raise ValueError('Invalid marker {!r}'.format(marker)) env = { '__builtins__': {}, 'python_version': '{}.{}'.format(*version_info[:2]), } if not eval(marker, env): # pylint: disable=eval-used continue deps.append(dep) return deps
1,089,769
用于将mprpc的标准异常注册到`_mprpc_exceptions`的装饰器. Parameters: code (int): - 标准状态码 Return: (Callable): - 装饰函数
def add_status_code(code): def class_decorator(cls): cls.status_code = code _mprpc_exceptions[code] = cls return cls return class_decorator
1,089,918
根据状态码创建一个异常. Parameters: status_code (int): - 错误的状态码 ID (str): - 任务的ID号,通常是一个uuid,默认为None,服务错误不需要ID,方法调用错误需要ID exception (): - 错误的异常栈信息,默认为None,服务错误不需要,方法调用错误可以需要 message (str): - 错误信息 Return: (MprpcException): - 指定错误码对应的mprpc标准异常
def abort(status_code: int, ID: str=None, exception: str="", message: str=None): if message is None: message = STATUS_CODES.get(status_code) # These are stored as bytes in the STATUS_CODES dict mprpc_exception = _mprpc_exceptions.get(status_code, MprpcException) if issubclass(mprpc_exception, MethodError) or ( mprpc_exception is MethodError): return mprpc_exception( message=message, ID=ID, exception=exception, status_code=status_code) return mprpc_exception( message=message, status_code=status_code)
1,089,919
初始化异常. Parameters: message (str): - 异常信息 status_code (int): - 状态码
def __init__(self, message: str, status_code: int=None): super().__init__(message) if status_code is not None: self.status_code = status_code
1,089,920