code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, Categorical): return delegate._reduce(name, numeric_only=numeric_only, **kwds) elif isinstance(delegate, ExtensionArray): return delegate._reduce(name, skipna=skipna, **kwds) elif is_datetime64_dtype(delegate): delegate = DatetimeIndex(delegate) elif isinstance(delegate, np.ndarray): if numeric_only: raise NotImplementedError('Series.{0} does not implement ' 'numeric_only.'.format(name)) with np.errstate(all='ignore'): return op(delegate, skipna=skipna, **kwds) return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, filter_type=filter_type, **kwds)
Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object.
def distance(self, method='haversine'): distances = [] for segment in self: if len(segment) < 2: distances.append([]) else: distances.append(segment.distance(method)) return distances
Calculate distances between locations in segments. Args: method (str): Method used to calculate distance Returns: list of list of float: Groups of distance between points in segments
def verifyUniqueWcsname(fname,wcsname): uniq = True numsci,extname = count_sci_extensions(fname) wnames = altwcs.wcsnames(fname,ext=(extname,1)) if wcsname in wnames.values(): uniq = False return uniq
Report whether or not the specified WCSNAME already exists in the file
def fastq_convert_pipe_cl(in_file, data): cmd = _seqtk_fastq_prep_cl(data, in_file) if not cmd: cat_cmd = "zcat" if in_file.endswith(".gz") else "cat" cmd = cat_cmd + " " + in_file return "<(%s)" % cmd
Create an anonymous pipe converting Illumina 1.3-1.7 to Sanger. Uses seqtk: https://github.com/lh3/seqt
def safe_temp_edit(filename): with temporary_file() as tmp_file: try: shutil.copyfile(filename, tmp_file.name) yield filename finally: shutil.copyfile(tmp_file.name, filename)
Safely modify a file within context that automatically reverts any changes afterwards The file mutatation occurs in place. The file is backed up in a temporary file before edits occur and when the context is closed, the mutated file is discarded and replaced with the backup. WARNING: There may be a chance that the file may not be restored and this method should be used carefully with the known risk.
def ylogydu(y, u): mask = (np.atleast_1d(y)!=0.) out = np.zeros_like(u) out[mask] = y[mask] * np.log(y[mask] / u[mask]) return out
tool to give desired output for the limit as y -> 0, which is 0 Parameters ---------- y : array-like of len(n) u : array-like of len(n) Returns ------- np.array len(n)
def steal(self, instr): instr._stolen_by = self for jmp in instr._target_of: jmp.arg = self self._target_of = instr._target_of instr._target_of = set() return self
Steal the jump index off of `instr`. This makes anything that would have jumped to `instr` jump to this Instruction instead. Parameters ---------- instr : Instruction The instruction to steal the jump sources from. Returns ------- self : Instruction The instruction that owns this method. Notes ----- This mutates self and ``instr`` inplace.
def _runOPF(self): if self.decommit: solver = UDOPF(self.case, dc=(self.locationalAdjustment == "dc")) elif self.locationalAdjustment == "dc": solver = OPF(self.case, dc=True) else: solver = OPF(self.case, dc=False, opt={"verbose": True}) self._solution = solver.solve() return self._solution["converged"]
Computes dispatch points and LMPs using OPF.
def rectangle(self): if self.start_point is None or self.end_point is None: return QgsRectangle() elif self.start_point.x() == self.end_point.x() or \ self.start_point.y() == self.end_point.y(): return QgsRectangle() return QgsRectangle(self.start_point, self.end_point)
Accessor for the rectangle. :return: A rectangle showing the designed extent. :rtype: QgsRectangle
def contains_container(self, path): path = make_path(path) try: self.get_container(path) return True except KeyError: return False
Returns True if a container exists at the specified path, otherwise False. :param path: str or Path instance :return: :rtype: bool :raises ValueError: A component of path is a field name.
def abi_splitext(filename): filename = os.path.basename(filename) is_ncfile = False if filename.endswith(".nc"): is_ncfile = True filename = filename[:-3] known_extensions = abi_extensions() for i in range(len(filename)-1, -1, -1): ext = filename[i:] if ext in known_extensions: break else: raise ValueError("Cannot find a registered extension in %s" % filename) root = filename[:i] if is_ncfile: ext += ".nc" return root, ext
Split the ABINIT extension from a filename. "Extension" are found by searching in an internal database. Returns "(root, ext)" where ext is the registered ABINIT extension The final ".nc" is included (if any) >>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK') >>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
def setup(): global _displayhooks, _excepthooks if _displayhooks is not None: return _displayhooks = [] _excepthooks = [] if sys.displayhook != sys.__displayhook__: _displayhooks.append(weakref.ref(sys.displayhook)) if sys.excepthook != sys.__excepthook__: _excepthooks.append(weakref.ref(sys.excepthook)) sys.displayhook = displayhook sys.excepthook = excepthook
Initializes the hook queues for the sys module. This method will automatically be called on the first registration for a hook to the system by either the registerDisplay or registerExcept functions.
def _add_edge(self, source, target, **kwargs): edge_properties = self.edge_properties for k, v in kwargs.items(): edge_properties[k] = v self.graph.add_edge(source, target, **edge_properties)
Add an edge to the graph.
def getDayStart(self, dateTime): return ensure_localtime(dateTime).replace(hour=0,minute=0,second=0,microsecond=0)
Ensure local time and get the beginning of the day
def configure(self, settings_module=None, **kwargs): default_settings.reload() environment_var = self._kwargs.get( "ENVVAR_FOR_DYNACONF", default_settings.ENVVAR_FOR_DYNACONF ) settings_module = settings_module or os.environ.get(environment_var) compat_kwargs(kwargs) kwargs.update(self._kwargs) self._wrapped = Settings(settings_module=settings_module, **kwargs) self.logger.debug("Lazy Settings configured ...")
Allows user to reconfigure settings object passing a new settings module or separated kwargs :param settings_module: defines the setttings file :param kwargs: override default settings
def fetch_alien(self, ): parent = self.get_parent() if parent: parentelement = parent.get_element() else: parentelement = self.get_refobjinter().get_current_element() if not parentelement: self._alien = True return self._alien element = self.get_element() if element == parentelement: self._alien = False elif isinstance(element, djadapter.models.Shot)\ and (element.sequence.name == djadapter.GLOBAL_NAME\ or (isinstance(parentelement, djadapter.models.Shot)\ and parentelement.sequence == element.sequence and element.name == djadapter.GLOBAL_NAME)): self._alien = False else: assets = parentelement.assets.all() self._alien = element not in assets return self._alien
Set and return, if the reftrack element is linked to the current scene. Askes the refobj interface for the current scene. If there is no current scene then True is returned. :returns: whether the element is linked to the current scene :rtype: bool :raises: None
def get(self, instance_name): url = self._url + instance_name + '/' response = requests.get(url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_instance(data)
Get an ObjectRocket instance by name. :param str instance_name: The name of the instance to retrieve. :returns: A subclass of :py:class:`bases.BaseInstance`, or None if instance does not exist. :rtype: :py:class:`bases.BaseInstance`
def encrypt(self, pubkey: str, nonce: Union[str, bytes], text: Union[str, bytes]) -> str: text_bytes = ensure_bytes(text) nonce_bytes = ensure_bytes(nonce) recipient_pubkey = PublicKey(pubkey) crypt_bytes = libnacl.public.Box(self, recipient_pubkey).encrypt(text_bytes, nonce_bytes) return Base58Encoder.encode(crypt_bytes[24:])
Encrypt message text with the public key of the recipient and a nonce The nonce must be a 24 character string (you can use libnacl.utils.rand_nonce() to get one) and unique for each encrypted message. Return base58 encoded encrypted message :param pubkey: Base58 encoded public key of the recipient :param nonce: Unique nonce :param text: Message to encrypt :return:
def reverse_point(self, latitude, longitude, **kwargs): fields = ",".join(kwargs.pop("fields", [])) point_param = "{0},{1}".format(latitude, longitude) response = self._req( verb="reverse", params={"q": point_param, "fields": fields} ) if response.status_code != 200: return error_response(response) return Location(response.json())
Method for identifying an address from a geographic point
def count_relations(graph) -> Counter: return Counter( data[RELATION] for _, _, data in graph.edges(data=True) )
Return a histogram over all relationships in a graph. :param pybel.BELGraph graph: A BEL graph :return: A Counter from {relation type: frequency}
def lookup_deleted_folder(event, filesystem, journal): folder_events = (e for e in journal[event.parent_inode] if 'DIRECTORY' in e.attributes and 'FILE_DELETE' in e.changes) for folder_event in folder_events: path = lookup_deleted_folder(folder_event, filesystem, journal) return ntpath.join(path, event.name) return lookup_folder(event, filesystem)
Lookup the parent folder in the journal content.
def _attributeStr(self, name): return "{}={}".format( _encodeAttr(name), ",".join([_encodeAttr(v) for v in self.attributes[name]]))
Return name=value for a single attribute
def play(cls, file_path, on_done=None, logger=None): pygame.mixer.init() try: pygame.mixer.music.load(file_path) except pygame.error as e: if logger is not None: logger.warning(str(e)) return pygame.mixer.music.play() while pygame.mixer.music.get_busy(): time.sleep(0.1) continue if on_done: on_done()
Play an audio file. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes.
def dAbr_dV(dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St): dAf_dPf = spdiag(2 * Sf.real()) dAf_dQf = spdiag(2 * Sf.imag()) dAt_dPt = spdiag(2 * St.real()) dAt_dQt = spdiag(2 * St.imag()) dAf_dVa = dAf_dPf * dSf_dVa.real() + dAf_dQf * dSf_dVa.imag() dAt_dVa = dAt_dPt * dSt_dVa.real() + dAt_dQt * dSt_dVa.imag() dAf_dVm = dAf_dPf * dSf_dVm.real() + dAf_dQf * dSf_dVm.imag() dAt_dVm = dAt_dPt * dSt_dVm.real() + dAt_dQt * dSt_dVm.imag() return dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm
Partial derivatives of squared flow magnitudes w.r.t voltage. Computes partial derivatives of apparent power w.r.t active and reactive power flows. Partial derivative must equal 1 for lines with zero flow to avoid division by zero errors (1 comes from L'Hopital).
def get(self, path, params=None): r = requests.get(url=self.url + path, params=params, timeout=self.timeout) r.raise_for_status() return r.json()
Perform GET request
def block(self): if (self.is_actinoid or self.is_lanthanoid) and self.Z not in [71, 103]: return "f" elif self.is_actinoid or self.is_lanthanoid: return "d" elif self.group in [1, 2]: return "s" elif self.group in range(13, 19): return "p" elif self.group in range(3, 13): return "d" raise ValueError("unable to determine block")
Return the block character "s,p,d,f"
def detect_timezone(): if sys.platform == "win32": tz = _detect_timezone_windows() if tz is not None: return tz tz = _detect_timezone_environ() if tz is not None: return tz tz = _detect_timezone_etc_timezone() if tz is not None: return tz tz = _detect_timezone_etc_localtime() if tz is not None: return tz warnings.warn("Had to fall back to worst detection method (the 'PHP' " "method).") tz = _detect_timezone_php() if tz is not None: return tz raise pytz.UnknownTimeZoneError("Unable to detect your timezone!")
Try and detect the timezone that Python is currently running in. We have a bunch of different methods for trying to figure this out (listed in order they are attempted). * In windows, use win32timezone.TimeZoneInfo.local() * Try TZ environment variable. * Try and find /etc/timezone file (with timezone name). * Try and find /etc/localtime file (with timezone data). * Try and match a TZ to the current dst/offset/shortname. Returns: The detected local timezone as a tzinfo object Raises: pytz.UnknownTimeZoneError: If it was unable to detect a timezone.
def loadImageData(filename, spacing=()): if not os.path.isfile(filename): colors.printc("~noentry File not found:", filename, c=1) return None if ".tif" in filename.lower(): reader = vtk.vtkTIFFReader() elif ".slc" in filename.lower(): reader = vtk.vtkSLCReader() if not reader.CanReadFile(filename): colors.printc("~prohibited Sorry bad slc file " + filename, c=1) exit(1) elif ".vti" in filename.lower(): reader = vtk.vtkXMLImageDataReader() elif ".mhd" in filename.lower(): reader = vtk.vtkMetaImageReader() reader.SetFileName(filename) reader.Update() image = reader.GetOutput() if len(spacing) == 3: image.SetSpacing(spacing[0], spacing[1], spacing[2]) return image
Read and return a ``vtkImageData`` object from file.
def get_avatar_metadata(self, jid, *, require_fresh=False, disable_pep=False): if require_fresh: self._metadata_cache.pop(jid, None) else: try: return self._metadata_cache[jid] except KeyError: pass if disable_pep: metadata = [] else: metadata = yield from self._get_avatar_metadata_pep(jid) if not metadata and jid not in self._has_pep_avatar: metadata = yield from self._get_avatar_metadata_vcard(jid) if jid not in self._metadata_cache: self._update_metadata(jid, metadata) return self._metadata_cache[jid]
Retrieve a list of avatar descriptors. :param jid: the JID for which to retrieve the avatar metadata. :type jid: :class:`aioxmpp.JID` :param require_fresh: if true, do not return results from the avatar metadata chache, but retrieve them again from the server. :type require_fresh: :class:`bool` :param disable_pep: if true, do not try to retrieve the avatar via pep, only try the vCard fallback. This usually only useful when querying avatars via MUC, where the PEP request would be invalid (since it would be for a full jid). :type disable_pep: :class:`bool` :returns: an iterable of avatar descriptors. :rtype: a :class:`list` of :class:`~aioxmpp.avatar.service.AbstractAvatarDescriptor` instances Returning an empty list means that the avatar not set. We mask a :class:`XMPPCancelError` in the case that it is ``feature-not-implemented`` or ``item-not-found`` and return an empty list of avatar descriptors, since this is semantically equivalent to not having an avatar. .. note:: It is usually an error to get the avatar for a full jid, normally, the avatar is set for the bare jid of a user. The exception are vCard avatars over MUC, where the IQ requests for the vCard may be translated by the MUC server. It is recommended to use the `disable_pep` option in that case.
def is_unclaimed(work): if work['is_completed']: return False cutoff_time = time.time() - MAX_PROCESSING_TIME if (work['claimed_worker_id'] and work['claimed_worker_start_time'] is not None and work['claimed_worker_start_time'] >= cutoff_time): return False return True
Returns True if work piece is unclaimed.
def smart_reroot(treefile, outgroupfile, outfile, format=0): tree = Tree(treefile, format=format) leaves = [t.name for t in tree.get_leaves()][::-1] outgroup = [] for o in must_open(outgroupfile): o = o.strip() for leaf in leaves: if leaf[:len(o)] == o: outgroup.append(leaf) if outgroup: break if not outgroup: print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr) return treefile try: tree.set_outgroup(tree.get_common_ancestor(*outgroup)) except ValueError: assert type(outgroup) == list outgroup = outgroup[0] tree.set_outgroup(outgroup) tree.write(outfile=outfile, format=format) logging.debug("Rerooted tree printed to {0}".format(outfile)) return outfile
simple function to reroot Newick format tree using ete2 Tree reading format options see here: http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
def start_cluster_server(self, num_gpus=1, rdma=False): return TFNode.start_cluster_server(self, num_gpus, rdma)
Convenience function to access ``TFNode.start_cluster_server`` directly from this object instance.
def tkvrsn(item): item = stypes.stringToCharP(item) return stypes.toPythonString(libspice.tkvrsn_c(item))
Given an item such as the Toolkit or an entry point name, return the latest version string. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tkvrsn_c.html :param item: Item for which a version string is desired. :type item: str :return: the latest version string. :rtype: str
def _load_image(cls, rkey): v = cls._stock[rkey] img = None itype = v['type'] if itype in ('stock', 'data'): img = tk.PhotoImage(format=v['format'], data=v['data']) elif itype == 'created': img = v['image'] else: img = tk.PhotoImage(file=v['filename']) cls._cached[rkey] = img logger.info('Loaded resource %s.' % rkey) return img
Load image from file or return the cached instance.
def get_data(self): "Return one SNMP response list for all status OIDs, and one list for all metric OIDs." alarm_oids = [netsnmp.Varbind(status_mib[alarm_id]['oid']) for alarm_id in self.models[self.modem_type]['alarms']] metric_oids = [netsnmp.Varbind(metric_mib[metric_id]['oid']) for metric_id in self.models[self.modem_type]['metrics']] response = self.snmp_session.get(netsnmp.VarList(*alarm_oids + metric_oids)) return ( response[0:len(alarm_oids)], response[len(alarm_oids):] )
Return one SNMP response list for all status OIDs, and one list for all metric OIDs.
def get_bookmarks(self, folder='unread', limit=25, have=None): path = 'bookmarks/list' params = {'folder_id': folder, 'limit': limit} if have: have_concat = ','.join(str(id_) for id_ in have) params['have'] = have_concat response = self.request(path, params) items = response['data'] bookmarks = [] for item in items: if item.get('type') == 'error': raise Exception(item.get('message')) elif item.get('type') == 'bookmark': bookmarks.append(Bookmark(self, **item)) return bookmarks
Return list of user's bookmarks. :param str folder: Optional. Possible values are unread (default), starred, archive, or a folder_id value. :param int limit: Optional. A number between 1 and 500, default 25. :param list have: Optional. A list of IDs to exclude from results :returns: List of user's bookmarks :rtype: list
def exists(name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: exists = conn.describe_stacks(name) log.debug('Stack %s exists.', name) return True except BotoServerError as e: log.debug('boto_cfn.exists raised an exception', exc_info=True) return False
Check to see if a stack exists. CLI Example: .. code-block:: bash salt myminion boto_cfn.exists mystack region=us-east-1
def build_groups(self, tokens): groups = {} for token in tokens: match_type = MatchType.start if token.group_end else MatchType.single groups[token.group_start] = (token, match_type) if token.group_end: groups[token.group_end] = (token, MatchType.end) return groups
Build dict of groups from list of tokens
def find_files(self): all_files = self._invoke('locate', '-I', '.').splitlines() from_root = os.path.relpath(self.location, self.find_root()) loc_rel_paths = [ os.path.relpath(path, from_root) for path in all_files] return loc_rel_paths
Find versioned files in self.location
def parse_magmoms(self, data, lattice=None): if lattice is None: raise Exception( 'Magmoms given in terms of crystal axes in magCIF spec.') try: magmoms = { data["_atom_site_moment_label"][i]: np.array( [str2float(data["_atom_site_moment_crystalaxis_x"][i]), str2float(data["_atom_site_moment_crystalaxis_y"][i]), str2float(data["_atom_site_moment_crystalaxis_z"][i])] ) for i in range(len(data["_atom_site_moment_label"])) } except (ValueError, KeyError): return None return magmoms
Parse atomic magnetic moments from data dictionary
def data_url(self, image_format='png', add_quiet_zone=True): memory_file = io.BytesIO() pil_image = self.image(add_quiet_zone=add_quiet_zone) if image_format == 'png': pil_image.save(memory_file, format='png', compress_level=1) elif image_format == 'bmp': pil_image.save(memory_file, format='bmp') else: raise Code128.UnknownFormatError('Only png and bmp are supported.') base64_image = base64.b64encode(memory_file.getvalue()).decode('ascii') data_url = 'data:image/{format};base64,{base64_data}'.format( format=image_format, base64_data=base64_image ) return data_url
Get a data URL representing the barcode. >>> barcode = Code128('Hello!', charset='B') >>> barcode.data_url() # doctest: +ELLIPSIS 'data:image/png;base64,...' :param image_format: Either 'png' or 'bmp'. :param add_quiet_zone: Add a 10 white pixels on either side of the barcode. :raises: Code128.UnknownFormatError :raises: Code128.MissingDependencyError :rtype: str :returns: A data URL with the barcode as an image.
def app_score(self): precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False) app = 0 total = 0 for k in range(len(precisions)-1): cur_prec = precisions[k] cur_pp = pct_pred_pos[k] cur_tau = taus[k] next_prec = precisions[k+1] next_pp = pct_pred_pos[k+1] next_tau = taus[k+1] mid_prec = (cur_prec + next_prec) / 2.0 width_pp = np.abs(next_pp - cur_pp) app += mid_prec * width_pp total += width_pp return app
Computes the area under the app curve.
def load_all_yamls(cls, directories): yaml_files = [] loaded_yamls = {} for d in directories: if d.startswith('/home') and not os.path.exists(d): os.makedirs(d) for dirname, subdirs, files in os.walk(d): yaml_files.extend(map(lambda x: os.path.join(dirname, x), filter(lambda x: x.endswith('.yaml'), files))) for f in yaml_files: loaded_yamls[f] = cls.load_yaml_by_path(f) return loaded_yamls
Loads yaml files from all given directories. Args: directories: list of directories to search Returns: dict of {fullpath: loaded_yaml_structure}
def is_client(self): return (self.args.client or self.args.browser) and not self.args.server
Return True if Glances is running in client mode.
def count_lines(fname, mode='rU'): with open(fname, mode) as f: for i, l in enumerate(f): pass return i + 1
Count the number of lines in a file Only faster way would be to utilize multiple processor cores to perform parallel reads. http://stackoverflow.com/q/845058/623735
def timeit(method): import datetime @functools.wraps(method) def timed_method(self, rinput): time_start = datetime.datetime.utcnow() result = method(self, rinput) time_end = datetime.datetime.utcnow() result.time_it(time_start, time_end) self.logger.info('total time measured') return result return timed_method
Decorator to measure the time used by the recipe
def all(): return [goal for _, goal in sorted(Goal._goal_by_name.items()) if goal.active]
Returns all active registered goals, sorted alphabetically by name. :API: public
def url_to_image(url, flag=cv2.IMREAD_COLOR): resp = urlopen(url) image = np.asarray(bytearray(resp.read()), dtype="uint8") image = cv2.imdecode(image, flag) return image
download the image, convert it to a NumPy array, and then read it into OpenCV format
def _reset_suffix_links(self): self._suffix_links_set = False for current, _parent in self.dfs(): current.suffix = None current.dict_suffix = None current.longest_prefix = None
Reset all suffix links in all nodes in this trie.
def edit_dataset_metadata(request, dataset_id=None): if request.method == 'POST': return add_dataset(request, dataset_id) elif request.method == 'GET': if dataset_id: metadata_form = DatasetUploadForm( instance=get_object_or_404(Dataset, pk=dataset_id) ) else: metadata_form = DatasetUploadForm() return render( request, 'datafreezer/upload.html', { 'fileUploadForm': metadata_form, } )
Renders a template to upload or edit a Dataset. Most of the heavy lifting is done by add_dataset(...).
def rssi_bars(self) -> int: rssi_db = self.rssi_db if rssi_db < 45: return 0 elif rssi_db < 60: return 1 elif rssi_db < 75: return 2 elif rssi_db < 90: return 3 return 4
Received Signal Strength Indication, from 0 to 4 bars.
def add_feature(feature, package=None, source=None, limit_access=False, enable_parent=False, image=None, restart=False): cmd = ['DISM', '/Quiet', '/Image:{0}'.format(image) if image else '/Online', '/Enable-Feature', '/FeatureName:{0}'.format(feature)] if package: cmd.append('/PackageName:{0}'.format(package)) if source: cmd.append('/Source:{0}'.format(source)) if limit_access: cmd.append('/LimitAccess') if enable_parent: cmd.append('/All') if not restart: cmd.append('/NoRestart') return __salt__['cmd.run_all'](cmd)
Install a feature using DISM Args: feature (str): The feature to install package (Optional[str]): The parent package for the feature. You do not have to specify the package if it is the Windows Foundation Package. Otherwise, use package to specify the parent package of the feature source (Optional[str]): The optional source of the capability. Default is set by group policy and can be Windows Update limit_access (Optional[bool]): Prevent DISM from contacting Windows Update for the source package enable_parent (Optional[bool]): True will enable all parent features of the specified feature image (Optional[str]): The path to the root directory of an offline Windows image. If `None` is passed, the running operating system is targeted. Default is None. restart (Optional[bool]): Reboot the machine if required by the install Returns: dict: A dictionary containing the results of the command CLI Example: .. code-block:: bash salt '*' dism.add_feature NetFx3
def sort(self, key, reverse=False, none_greater=False): for i in range(0, len(self.table)): min = i for j in range(i + 1, len(self.table)): if internal.is_first_lessor(self.table[j], self.table[min], key, none_greater=none_greater, reverse=reverse): min = j if i!=min: self.table[i], self.table[min] = self.table[min], self.table[i] self.index_track[i], self.index_track[min] = self.index_track[min], self.index_track[i] return self
Sort the list in the order of the dictionary key. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).sort("name").returnString() [ {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]} ] >>> print PLOD(test).sort("income").returnString() [ {age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]}, {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 18, income: 93000, name: 'Jim' , wigs: 68} ] >>> print PLOD(test).sort(["age", "income"]).returnString() [ {age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]}, {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]} ] .. versionadded:: 0.0.2 :param key: A dictionary key (or a list of keys) that should be the basis of the sorting. :param reverse: Defaults to False. If True, then list is sorted decrementally. :param none_greater: Defaults to False. If True, then entries missing the key/value pair are considered be of greater value than the non-missing values. :returns: self
def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None): return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale), numRows, numCols, numPartitions, seed)
Generates an RDD comprised of vectors containing i.i.d. samples drawn from the Gamma distribution. :param sc: SparkContext used to create the RDD. :param shape: Shape (> 0) of the Gamma distribution :param scale: Scale (> 0) of the Gamma distribution :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale). >>> import numpy as np >>> from math import sqrt >>> shape = 1.0 >>> scale = 2.0 >>> expMean = shape * scale >>> expStd = sqrt(shape * scale * scale) >>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect()) >>> mat.shape (100, 100) >>> abs(mat.mean() - expMean) < 0.1 True >>> abs(mat.std() - expStd) < 0.1 True
def unicode_urlencode(query, doseq=True): pairs = [] for key, value in query.items(): if isinstance(value, list): value = list(map(to_utf8, value)) else: value = to_utf8(value) pairs.append((to_utf8(key), value)) encoded_query = dict(pairs) xx = urlencode(encoded_query, doseq) return xx
Custom wrapper around urlencode to support unicode Python urlencode doesn't handle unicode well so we need to convert to bytestrings before using it: http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround
def append_sint32(self, value): zigzag_value = wire_format.zig_zag_encode(value) self._stream.append_var_uint32(zigzag_value)
Appends a 32-bit integer to our buffer, zigzag-encoded and then varint-encoded.
def describe_cache_parameters(name=None, conn=None, region=None, key=None, keyid=None, profile=None, **args): ret = {} generic = _describe_resource(name=name, name_param='CacheParameterGroupName', res_type='cache_parameter', info_node='Parameters', conn=conn, region=region, key=key, keyid=keyid, profile=profile, **args) specific = _describe_resource(name=name, name_param='CacheParameterGroupName', res_type='cache_parameter', info_node='CacheNodeTypeSpecificParameters', conn=conn, region=region, key=key, keyid=keyid, profile=profile, **args) ret.update({'Parameters': generic}) if generic else None ret.update({'CacheNodeTypeSpecificParameters': specific}) if specific else None return ret
Returns the detailed parameter list for a particular cache parameter group. name The name of a specific cache parameter group to return details for. CacheParameterGroupName The name of a specific cache parameter group to return details for. Generally not required, as `name` will be used if not provided. Source Optionally, limit the parameter types to return. Valid values: - user - system - engine-default Example: .. code-block:: bash salt myminion boto3_elasticache.describe_cache_parameters name=myParamGroup Source=user
def fetch_data(self): choices = self.available_data choices.insert(0, 'All') selected_data_type = utils.select_item( choices, 'Please select what data to fetch:', 'Available data:', ) if selected_data_type == 'All': selected_data_type = ','.join(self.available_data) utils.pending_message('Performing fetch data task...') fetch_data_task = self.client.data( account=self.account, data=selected_data_type, ) fetch_data_task.wait_for_result(timeout=self.timeout) fetch_data_result = json.loads(fetch_data_task.result) task_id = fetch_data_task.uuid filepath = utils.get_or_create_filepath('%s.json' % task_id) with open(filepath, 'w') as out: json.dump(fetch_data_result, out, indent=2) utils.info_message('Fetch data successful. Output file: %s.json' % task_id) return fetch_data_result
Prompt for a data type choice and execute the `fetch_data` task. The results are saved to a file in json format.
def Unequal(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.not_(exprnode.eq(*xs)) if simplify: y = y.simplify() return _expr(y)
Expression inequality operator If *simplify* is ``True``, return a simplified expression.
def _encode_dict_as_string(value): if value.startswith("{\n"): value = "{" + value[2:] if value.endswith("\n}"): value = value[:-2] + "}" return value.replace('"', '\\"').replace("\\n", "\\\\n").replace("\n", "\\n")
Takes the PLIST string of a dict, and returns the same string encoded such that it can be included in the string representation of a GSNode.
def handle_login_failure(self, provider, reason): logger.error('Authenication Failure: {0}'.format(reason)) messages.error(self.request, 'Authenication Failed. Please try again') return redirect(self.get_error_redirect(provider, reason))
Message user and redirect on error.
def update(self, cont): self.max_time = max(self.max_time, cont.max_time) if cont.items is not None: if self.items is None: self.items = cont.items else: self.items.update(cont.items)
Update this instance with the contextualize passed.
def reverse_iter(self, start=None, stop=None, count=2000): cursor = '0' count = 1000 start = start if start is not None else (-1 * count) stop = stop if stop is not None else -1 _loads = self._loads while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) for x in reversed(cursor or []): yield _loads(x) start -= count stop -= count
-> yields items of the list in reverse
def make_query(self, **kw): query = kw.pop("query", {}) query.update(self.get_request_query()) query.update(self.get_custom_query()) query.update(self.get_keyword_query(**kw)) sort_on, sort_order = self.get_sort_spec() if sort_on and "sort_on" not in query: query.update({"sort_on": sort_on}) if sort_order and "sort_order" not in query: query.update({"sort_order": sort_order}) logger.info("make_query:: query={} | catalog={}".format( query, self.catalog)) return query
create a query suitable for the catalog
def load_config_from_cli_arguments(self, *args, **kwargs): self._load_config_from_cli_argument(key='handlers_package', **kwargs) self._load_config_from_cli_argument(key='auth', **kwargs) self._load_config_from_cli_argument(key='user_stream', **kwargs) self._load_config_from_cli_argument(key='min_seconds_between_errors', **kwargs) self._load_config_from_cli_argument(key='sleep_seconds_on_consecutive_errors', **kwargs)
Get config values of passed in CLI options. :param dict kwargs: CLI options
def _save(self, name, content): name = self.clean_name(name) if not self._exists_with_etag(name, content): content.seek(0) super(StaticCloudinaryStorage, self)._save(name, content) return self._prepend_prefix(name)
Saves only when a file with a name and a content is not already uploaded to Cloudinary.
def list_clusters(kwargs=None, call=None): if call != 'function': raise SaltCloudSystemExit( 'The list_clusters function must be called with ' '-f or --function.' ) return {'Clusters': salt.utils.vmware.list_clusters(_get_si())}
List all the clusters for this VMware environment CLI Example: .. code-block:: bash salt-cloud -f list_clusters my-vmware-config
def numeric(self, code, padded=False): code = self.alpha2(code) try: num = self.alt_codes[code][1] except KeyError: return None if padded: return "%03d" % num return num
Return the ISO 3166-1 numeric country code matching the provided country code. If no match is found, returns ``None``. :param padded: Pass ``True`` to return a 0-padded three character string, otherwise an integer will be returned.
def contracts_source_path_with_stem(stem): return { 'lib': _BASE.joinpath(stem, 'lib'), 'raiden': _BASE.joinpath(stem, 'raiden'), 'test': _BASE.joinpath(stem, 'test'), 'services': _BASE.joinpath(stem, 'services'), }
The directory remapping given to the Solidity compiler.
def load_image(name): image = pyglet.image.load(name).texture verify_dimensions(image) return image
Load an image
def get_mapping_format(self): if self.format == DataFormat.json or self.format == DataFormat.avro: return self.format.name else: return DataFormat.csv.name
Dictating the corresponding mapping to the format.
def get_farthest_node(self, topology_only=False): farthest_node, farthest_dist = self.get_farthest_leaf( topology_only=topology_only) prev = self cdist = 0.0 if topology_only else prev.dist current = prev.up while current is not None: for ch in current.children: if ch != prev: if not ch.is_leaf(): fnode, fdist = ch.get_farthest_leaf( topology_only=topology_only) else: fnode = ch fdist = 0 if topology_only: fdist += 1.0 else: fdist += ch.dist if cdist+fdist > farthest_dist: farthest_dist = cdist + fdist farthest_node = fnode prev = current if topology_only: cdist += 1 else: cdist += prev.dist current = prev.up return farthest_node, farthest_dist
Returns the node's farthest descendant or ancestor node, and the distance to it. :argument False topology_only: If set to True, distance between nodes will be referred to the number of nodes between them. In other words, topological distance will be used instead of branch length distances. :return: A tuple containing the farthest node referred to the current node and the distance to it.
def start(context, mip_config, email, priority, dryrun, command, start_with, family): mip_cli = MipCli(context.obj['script']) mip_config = mip_config or context.obj['mip_config'] email = email or environ_email() kwargs = dict(config=mip_config, family=family, priority=priority, email=email, dryrun=dryrun, start_with=start_with) if command: mip_command = mip_cli.build_command(**kwargs) click.echo(' '.join(mip_command)) else: try: mip_cli(**kwargs) if not dryrun: context.obj['store'].add_pending(family, email=email) except MipStartError as error: click.echo(click.style(error.message, fg='red'))
Start a new analysis.
def weed(self): _ext = [k for k in self._dict.keys() if k not in self.c_param] for k in _ext: del self._dict[k]
Get rid of key value pairs that are not standard
def run(self): self._wake_up() while not self._q.empty(): self._config = self._q.get() while not self.exit.is_set(): settings = self._read_settings() settings['valid'] = self._valid_config(settings) self._cb(settings) if self.cooldown.is_set(): self._log.debug("Cool down process triggered") self._config['drum_motor'] = 1 self._config['heater'] = 0 self._config['solenoid'] = 1 self._config['cooling_motor'] = 1 self._config['main_fan'] = 10 if settings['valid']: self._log.debug("Settings were valid, sending...") self._send_config() time.sleep(self._config['interval'])
Run the core loop of reading and writing configurations. This is where all the roaster magic occurs. On the initial run, we prime the roaster with some data to wake it up. Once awoke, we check our shared queue to identify if the user has passed any updated configuration. Once checked, start to read and write to the Hottop roaster as long as the exit signal has not been set. All steps are repeated after waiting for a specific time interval. There are also specialized routines built into this function that are controlled via events. These events are unique to the roasting process and pre-configure the system with a configuration, so the user doesn't need to do it themselves. :returns: None
def simple_separated_format(separator): return TableFormat(None, None, None, None, headerrow=DataRow('', separator, ''), datarow=DataRow('', separator, ''), padding=0, with_header_hide=None)
Construct a simple TableFormat with columns separated by a separator. >>> tsv = simple_separated_format("\\t") ; \ tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23' True
def _has_tag(version, debug=False): cmd = sh.git.bake('show-ref', '--verify', '--quiet', "refs/tags/{}".format(version)) try: util.run_command(cmd, debug=debug) return True except sh.ErrorReturnCode: return False
Determine a version is a local git tag name or not. :param version: A string containing the branch/tag/sha to be determined. :param debug: An optional bool to toggle debug output. :return: bool
def create_db(name, **client_args): if db_exists(name, **client_args): log.info('DB \'%s\' already exists', name) return False client = _client(**client_args) client.create_database(name) return True
Create a database. name Name of the database to create. CLI Example: .. code-block:: bash salt '*' influxdb.create_db <name>
def _get_filesystem_path(self, url_path, basedir=settings.MEDIA_ROOT): if url_path.startswith(settings.MEDIA_URL): url_path = url_path[len(settings.MEDIA_URL):] return os.path.normpath(os.path.join(basedir, url2pathname(url_path)))
Makes a filesystem path from the specified URL path
def _write_plain_json(file_path, js): if file_path.endswith('.bz2'): with bz2.open(file_path, 'wt', encoding=_default_encoding) as f: json.dump(js, f, indent=2, ensure_ascii=False) else: with open(file_path, 'w', encoding=_default_encoding) as f: json.dump(js, f, indent=2, ensure_ascii=False)
Write information to a JSON file This makes sure files are created with the proper encoding and consistent indenting Parameters ---------- file_path : str Full path to the file to write to. It will be overwritten if it exists js : dict JSON information to write
def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None): if (cls._compressed_stream_remainder_list is None or cls._compressed_stream_store is None): specification_store, remainder_list = cls._GetSpecificationStore( definitions.FORMAT_CATEGORY_COMPRESSED_STREAM) cls._compressed_stream_remainder_list = remainder_list cls._compressed_stream_store = specification_store if cls._compressed_stream_scanner is None: cls._compressed_stream_scanner = cls._GetSignatureScanner( cls._compressed_stream_store) return cls._GetTypeIndicators( cls._compressed_stream_scanner, cls._compressed_stream_store, cls._compressed_stream_remainder_list, path_spec, resolver_context=resolver_context)
Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
def allow_pgcodes(cr, *codes): try: with cr.savepoint(): with core.tools.mute_logger('odoo.sql_db'): yield except (ProgrammingError, IntegrityError) as error: msg = "Code: {code}. Class: {class_}. Error: {error}.".format( code=error.pgcode, class_=errorcodes.lookup(error.pgcode[:2]), error=errorcodes.lookup(error.pgcode)) if error.pgcode in codes or error.pgcode[:2] in codes: logger.info(msg) else: logger.exception(msg) raise
Context manager that will omit specified error codes. E.g., suppose you expect a migration to produce unique constraint violations and you want to ignore them. Then you could just do:: with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) SELECT name FROM you") .. warning:: **All** sentences inside this context will be rolled back if **a single error** is raised, so the above example would insert **nothing** if a single row violates a unique constraint. This would ignore duplicate files but insert the others:: cr.execute("SELECT name FROM you") for row in cr.fetchall(): with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) VALUES (%s)", row[0]) :param *str codes: Undefined amount of error codes found in :mod:`psycopg2.errorcodes` that are allowed. Codes can have either 2 characters (indicating an error class) or 5 (indicating a concrete error). Any other errors will be raised.
def delete_api_stage(restApiId, stageName, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_stage(restApiId=restApiId, stageName=stageName) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
Deletes stage identified by stageName from API identified by restApiId CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_stage restApiId stageName
def show_bgp_speaker(self, bgp_speaker_id, **_params): return self.get(self.bgp_speaker_path % (bgp_speaker_id), params=_params)
Fetches information of a certain BGP speaker.
async def render_template(template_name_or_list: Union[str, List[str]], **context: Any) -> str: await current_app.update_template_context(context) template = current_app.jinja_env.get_or_select_template(template_name_or_list) return await _render(template, context)
Render the template with the context given. Arguments: template_name_or_list: Template name to render of a list of possible template names. context: The variables to pass to the template.
def title_has_tag(page, lang, tag): from .models import TitleTags if hasattr(tag, 'slug'): slug = tag.slug else: slug = tag try: return page.get_title_obj( language=lang, fallback=False ).titletags.tags.filter(slug=slug).exists() except TitleTags.DoesNotExist: return False
Check if a Title object is associated with the given tag. This function does not use fallbacks to retrieve title object. :param page: a Page instance :param lang: a language code :param tag: a Tag instance or a slug string. :return: whether the Title instance has the given tag attached (False if no Title or no attached TitleTags exists) :type: Boolean
def stop_capture(self): if self._capture_node: yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"])) self._capture_node = None yield from super().stop_capture()
Stop capture on a link
def get_mate_center(self, angle=0): return Mate(self, CoordSystem.from_plane( cadquery.Plane( origin=(0, 0, self.width / 2), xDir=(1, 0, 0), normal=(0, 0, 1), ).rotated((0, 0, angle)) ))
Mate at ring's center rotated ``angle`` degrees. :param angle: rotation around z-axis (unit: deg) :type angle: :class:`float` :return: mate in ring's center rotated about z-axis :rtype: :class:`Mate <cqparts.constraint.Mate>`
def get_incidents(self): resp = requests.get(CRIME_URL, params=self._get_params(), headers=self.headers) incidents = [] data = resp.json() if ATTR_CRIMES not in data: return incidents for incident in data.get(ATTR_CRIMES): if _validate_incident_date_range(incident, self.days): if _incident_in_types(incident, self.incident_types): incidents.append(_incident_transform(incident)) return incidents
Get incidents.
def update_user_label(self): self._user_label = _uniqueid_to_uniquetwig(self._bundle, self.unique_label) self._set_curly_label()
finds this parameter and gets the least_unique_twig from the bundle
def update_frequency(shell_ctx): frequency_path = os.path.join(shell_ctx.config.get_config_dir(), shell_ctx.config.get_frequency()) if os.path.exists(frequency_path): with open(frequency_path, 'r') as freq: try: frequency = json.load(freq) except ValueError: frequency = {} else: frequency = {} with open(frequency_path, 'w') as freq: now = day_format(datetime.datetime.utcnow()) val = frequency.get(now) frequency[now] = val + 1 if val else 1 json.dump(frequency, freq) return frequency
updates the frequency from files
def add(self, *args, **kwargs): if self.start and self.start.state == 'done' and kwargs.get('log_action') != 'done': raise ProgressLoggingError("Can't add -- process section is done") self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'add') rec = Process(**kwargs) self._session.add(rec) self.rec = rec if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
Add a new record to the section
def FilterArgsFromSemanticProtobuf(protobuf, kwargs): for descriptor in protobuf.type_infos: value = kwargs.pop(descriptor.name, None) if value is not None: setattr(protobuf, descriptor.name, value)
Assign kwargs to the protobuf, and remove them from the kwargs dict.
def clistream(reporter, *args, **kwargs): files = kwargs.get('files') encoding = kwargs.get('input_encoding', DEFAULT_ENCODING) processes = kwargs.get('processes') chunksize = kwargs.get('chunksize') from clitool.processor import CliHandler, Streamer Handler = kwargs.get('Handler') if Handler: warnings.warn('"Handler" keyword will be removed from next release.', DeprecationWarning) else: Handler = CliHandler s = Streamer(reporter, processes=processes, *args) handler = Handler(s, kwargs.get('delimiter')) return handler.handle(files, encoding, chunksize)
Handle stream data on command line interface, and returns statistics of success, error, and total amount. More detailed information is available on underlying feature, :mod:`clitool.processor`. :param Handler: [DEPRECATED] Handler for file-like streams. (default: :class:`clitool.processor.CliHandler`) :type Handler: object which supports `handle` method. :param reporter: callback to report processed value :type reporter: callable :param delimiter: line delimiter [optional] :type delimiter: string :param args: functions to parse each item in the stream. :param kwargs: keywords, including ``files`` and ``input_encoding``. :rtype: list
def kill_all(self): logger.info('Job {0} killing all currently running tasks'.format(self.name)) for task in self.tasks.itervalues(): if task.started_at and not task.completed_at: task.kill()
Kill all currently running jobs.
def SensorMetatagsGet(self, sensor_id, namespace = None): ns = "default" if namespace is None else namespace if self.__SenseApiCall__('/sensors/{0}/metatags.json'.format(sensor_id), 'GET', parameters = {'namespace': ns}): return True else: self.__error__ = "api call unsuccessful" return False
Retrieve the metatags of a sensor. @param sensor_id (int) - Id of the sensor to retrieve metatags from @param namespace (stirng) - Namespace for which to retrieve metatags. @return (bool) - Boolean indicating whether SensorMetatagsGet was successful
def output_notebook( d3js_url="//d3js.org/d3.v3.min", requirejs_url="//cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js", html_template=None ): if html_template is None: html_template = read_lib('html', 'setup') setup_html = populate_template( html_template, d3js=d3js_url, requirejs=requirejs_url ) display_html(setup_html) return
Import required Javascript libraries to Jupyter Notebook.
def make_message(subject="", body="", from_email=None, to=None, bcc=None, attachments=None, headers=None, priority=None): to = filter_recipient_list(to) bcc = filter_recipient_list(bcc) core_msg = EmailMessage( subject=subject, body=body, from_email=from_email, to=to, bcc=bcc, attachments=attachments, headers=headers ) db_msg = Message(priority=priority) db_msg.email = core_msg return db_msg
Creates a simple message for the email parameters supplied. The 'to' and 'bcc' lists are filtered using DontSendEntry. If needed, the 'email' attribute can be set to any instance of EmailMessage if e-mails with attachments etc. need to be supported. Call 'save()' on the result when it is ready to be sent, and not before.
def _indent(x): lines = x.splitlines() for i, line in enumerate(lines): lines[i] = ' ' + line return '\n'.join(lines)
Indent a string by 4 characters.
def next(self): while True: self.cur_idx += 1 if self.__datasource.populate_iteration(self): return self raise StopIteration
Move to the next valid locus. Will only return valid loci or exit via StopIteration exception