code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get(self, name, acc=None, default=None): if acc in self.data['accounts'] and name in self.data['accounts'][acc]: return self.data['accounts'][acc][name] if name in self.data: return self.data[name] return default
Return the named config for the given account. If an account is given, first checks the account space for the name. If no account given, or if the name not found in the account space, look for the name in the global config space. If still not found, return the default, if given, otherwise ``None``.
def scale_joint_sfs_folded(s, n1, n2): out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out
Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum.
def OnGridEditorCreated(self, event): editor = event.GetControl() editor.Bind(wx.EVT_KILL_FOCUS, self.OnGridEditorClosed) event.Skip()
Used to capture Editor close events
def atlas_peer_get_request_count( peer_hostport, peer_table=None ): with AtlasPeerTableLocked(peer_table) as ptbl: if peer_hostport not in ptbl.keys(): return 0 count = 0 for (t, r) in ptbl[peer_hostport]['time']: if r: count += 1 return count
How many times have we contacted this peer?
def content(self): if not self._content_data: if is_seekable(self.file): with wpull.util.reset_file_offset(self.file): self._content_data = self.file.read() else: self._content_data = self.file.read() return self._content_data
Return the content of the file. If this function is invoked, the contents of the entire file is read and cached. Returns: ``bytes``: The entire content of the file.
def getall(self): interfaces_re = re.compile(r'(?<=^interface\s)([Et|Po].+)$', re.M) response = dict() for name in interfaces_re.findall(self.config): interface = self.get(name) if interface: response[name] = interface return response
Returns a dict object to all Switchports This method will return all of the configured switchports as a dictionary object keyed by the interface identifier. Returns: A Python dictionary object that represents all configured switchports in the current running configuration
def insert(self, data): row = {key:self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for key, val in data.items(): if key in ('_uid', '_default'): logging.warn("Cannot manually set columns _uid or _default of a row! Given data: {0}".format(data)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._table.append(row) self._save() return row['_uid']
Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type.
def send_vdp_assoc(self, vsiid=None, mgrid=None, typeid=None, typeid_ver=None, vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID, filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0, mac="", vlan=0, oui_id="", oui_data="", sw_resp=False): if sw_resp and filter_frmt == vdp_const.VDP_FILTER_GIDMACVID: reply = self.send_vdp_query_msg("assoc", mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data) vlan_resp, fail_reason = self.get_vlan_from_query_reply( reply, vsiid, mac) if vlan_resp != constants.INVALID_VLAN: return vlan_resp, fail_reason reply = self.send_vdp_msg("assoc", mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data, sw_resp) if sw_resp: vlan, fail_reason = self.get_vlan_from_associate_reply( reply, vsiid, mac) return vlan, fail_reason return None, None
Sends the VDP Associate Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param vsiid: VSI value, Only UUID supported for now :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return vlan: VLAN value returned by vdptool which in turn is given : by Switch
def close(self): for impyla_connection in self._connections: impyla_connection.close() self._connections.clear() self.connection_pool.clear()
Close all open Impyla sessions
def main(): if sys.argv[1:]: copy(' '.join(sys.argv[1:])) elif not sys.stdin.isatty(): copy(''.join(sys.stdin.readlines()).rstrip('\n')) else: print(paste())
Entry point for cli.
def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''): histObj = {} if fromVal != toVal: histObj[lookupType] = {"from": fromVal, "to": toVal} if lookupType in ['deriveValue', 'deriveRegex', 'copyValue', 'normIncludes', 'deriveIncludes'] and using!='': histObj[lookupType]["using"] = using if lookupType in ['genericRegex', 'fieldSpecificRegex', 'normRegex', 'deriveRegex'] and pattern!='': histObj[lookupType]["pattern"] = pattern return histObj
Return a dictionary detailing what, if any, change was made to a record field :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, normLookup, normRegex, normIncludes, deriveValue, copyValue, deriveRegex :param string fromVal: previous field value :param string toVal: new string value :param dict using: field values used to derive new values; only applicable for deriveValue, copyValue, deriveRegex :param string pattern: which regex pattern was matched to make the change; only applicable for genericRegex, fieldSpecificRegex, deriveRegex
def unique(transactions): seen = set() return [x for x in transactions if not (x in seen or seen.add(x))]
Remove any duplicate entries.
def _filter_defs_at_call_sites(self, defs): filtered_defs = LiveDefinitions() for variable, locs in defs.items(): if isinstance(variable, SimRegisterVariable): if self.project.arch.name == 'X86': if variable.reg in (self.project.arch.registers['eax'][0], self.project.arch.registers['ecx'][0], self.project.arch.registers['edx'][0]): continue filtered_defs.add_defs(variable, locs) return filtered_defs
If we are not tracing into the function that are called in a real execution, we should properly filter the defs to account for the behavior of the skipped function at this call site. This function is a WIP. See TODOs inside. :param defs: :return:
def footrule_dist(params1, params2=None): r assert params2 is None or len(params1) == len(params2) ranks1 = rankdata(params1, method="average") if params2 is None: ranks2 = np.arange(1, len(params1) + 1, dtype=float) else: ranks2 = rankdata(params2, method="average") return np.sum(np.abs(ranks1 - ranks2))
r"""Compute Spearman's footrule distance between two models. This function computes Spearman's footrule distance between the rankings induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item ``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank in the model described by ``params2``. Spearman's footrule distance is defined by .. math:: \sum_{i=1}^N | \sigma_i - \tau_i | By convention, items with the lowest parameters are ranked first (i.e., sorted using the natural order). If the argument ``params2`` is ``None``, the second model is assumed to rank the items by their index: item ``0`` has rank 1, item ``1`` has rank 2, etc. Parameters ---------- params1 : array_like Parameters of the first model. params2 : array_like, optional Parameters of the second model. Returns ------- dist : float Spearman's footrule distance.
def variable(dims=1): if dims == 1: return Poly({(1,): 1}, dim=1, shape=()) return Poly({ tuple(indices): indices for indices in numpy.eye(dims, dtype=int) }, dim=dims, shape=(dims,))
Simple constructor to create single variables to create polynomials. Args: dims (int): Number of dimensions in the array. Returns: (Poly): Polynomial array with unit components in each dimension. Examples: >>> print(variable()) q0 >>> print(variable(3)) [q0, q1, q2]
def log_learning_rates(self, model: Model, optimizer: torch.optim.Optimizer): if self._should_log_learning_rate: names = {param: name for name, param in model.named_parameters()} for group in optimizer.param_groups: if 'lr' not in group: continue rate = group['lr'] for param in group['params']: effective_rate = rate * float(param.requires_grad) self.add_train_scalar("learning_rate/" + names[param], effective_rate)
Send current parameter specific learning rates to tensorboard
def set_section_config(self, section, content): if not self._config.has_section(section): self._config.add_section(section) for key in content: if isinstance(content[key], bool): content[key] = str(content[key]).lower() self._config.set(section, key, content[key]) self._override_config[section] = content
Set a specific configuration section. It's not dumped on the disk. :param section: Section name :param content: A dictionary with section content
def fetchAllUsers(self, rawResults = False) : r = self.connection.session.get(self.URL) if r.status_code == 200 : data = r.json() if rawResults : return data["result"] else : res = [] for resu in data["result"] : u = User(self, resu) res.append(u) return res else : raise ConnectionError("Unable to get user list", r.url, r.status_code)
Returns all available users. if rawResults, the result will be a list of python dicts instead of User objects
def get_string(self, origin=None): token = self.get().unescape() if not (token.is_identifier() or token.is_quoted_string()): raise dns.exception.SyntaxError('expecting a string') return token.value
Read the next token and interpret it as a string. @raises dns.exception.SyntaxError: @rtype: string
def render(self, fname=''): import qnet.visualization.circuit_pyx as circuit_visualization from tempfile import gettempdir from time import time, sleep if not fname: tmp_dir = gettempdir() fname = os.path.join(tmp_dir, "tmp_{}.png".format(hash(time))) if circuit_visualization.draw_circuit(self, fname): done = False for k in range(20): if os.path.exists(fname): done = True break else: sleep(.5) if done: return fname raise CannotVisualize()
Render the circuit expression and store the result in a file Args: fname (str): Path to an image file to store the result in. Returns: str: The path to the image file
def list_snapshots(domain=None, **kwargs): ret = dict() conn = __get_conn(**kwargs) for vm_domain in _get_domain(conn, *(domain and [domain] or list()), iterable=True): ret[vm_domain.name()] = [_parse_snapshot_description(snap) for snap in vm_domain.listAllSnapshots()] or 'N/A' conn.close() return ret
List available snapshots for certain vm or for all. :param domain: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' virt.list_snapshots salt '*' virt.list_snapshots <domain>
def validate_context(self, context): return all( [ hasattr(context, attr) for attr in [ "aws_request_id", "function_name", "function_version", "get_remaining_time_in_millis", "invoked_function_arn", "log_group_name", "log_stream_name", "memory_limit_in_mb", ] ] ) and callable(context.get_remaining_time_in_millis)
Checks to see if we're working with a valid lambda context object. :returns: True if valid, False if not :rtype: bool
def add_menu(self, name): if name in self._menus: raise exceptions.MenuAlreadyExists("Menu name {!r} already exists.".format(name)) menu = self._menu.addMenu(name) self._menus[name] = menu
Add a top-level menu. The menu manager only allows one menu of the same name. However, it does not make sure that there are no pre-existing menus of that name.
def persist_experiment(experiment): from benchbuild.utils.schema import Experiment, Session session = Session() cfg_exp = experiment.id LOG.debug("Using experiment ID stored in config: %s", cfg_exp) exps = session.query(Experiment).filter(Experiment.id == cfg_exp) desc = str(CFG["experiment_description"]) name = experiment.name if exps.count() == 0: newe = Experiment() newe.id = cfg_exp newe.name = name newe.description = desc session.add(newe) ret = newe else: exps.update({'name': name, 'description': desc}) ret = exps.first() try: session.commit() except IntegrityError: session.rollback() persist_experiment(experiment) return (ret, session)
Persist this experiment in the benchbuild database. Args: experiment: The experiment we want to persist.
def _get_ngrams(n, text): ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams
def cycles(self): def walk_node(node, seen): if node in seen: yield (node,) return seen.add(node) for edge in self.edges[node]: for cycle in walk_node(edge, set(seen)): yield (node,) + cycle cycles = chain.from_iterable( (walk_node(node, set()) for node in self.nodes)) shortest = set() for cycle in sorted(cycles, key=len): for el in shortest: if set(el).issubset(set(cycle)): break else: shortest.add(cycle) return shortest
Fairly expensive cycle detection algorithm. This method will return the shortest unique cycles that were detected. Debug usage may look something like: print("The following cycles were found:") for cycle in network.cycles(): print(" ", " -> ".join(cycle))
def setProfile(self, name): if self.name or self.useBegin: if self.name == name: return raise VObjectError("This component already has a PROFILE or " "uses BEGIN.") self.name = name.upper()
Assign a PROFILE to this unnamed component. Used by vCard, not by vCalendar.
def path(self): if isinstance(self.dir, Directory): return self.dir._path elif isinstance(self.dir, ROOT.TDirectory): return self.dir.GetPath() elif isinstance(self.dir, _FolderView): return self.dir.path() else: return str(self.dir)
Get the path of the wrapped folder
def get_current_venv(): if 'VIRTUAL_ENV' in os.environ: venv = os.environ['VIRTUAL_ENV'] elif os.path.exists('.python-version'): try: subprocess.check_output(['pyenv', 'help'], stderr=subprocess.STDOUT) except OSError: print("This directory seems to have pyenv's local venv, " "but pyenv executable was not found.") with open('.python-version', 'r') as f: env_name = f.readline().strip() bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8') venv = bin_path[:bin_path.rfind(env_name)] + env_name else: return None return venv
Returns the path to the current virtualenv
def _parse_include(self, include): ret = {} for item in include: if '.' in item: local, remote = item.split('.', 1) else: local = item remote = None ret.setdefault(local, []) if remote: ret[local].append(remote) return ret
Parse the querystring args or parent includes for includes. :param include: Dict of query args or includes
def is_ipynb(): try: shell = get_ipython().__class__.__name__ if shell == 'ZMQInteractiveShell': return True elif shell == 'TerminalInteractiveShell': return False else: return False except NameError: return False
Return True if the module is running in IPython kernel, False if in IPython shell or other Python shell. Copied from: http://stackoverflow.com/a/37661854/1592810 There are other methods there too >>> is_ipynb() False
def sad(patch, cols, splits, clean=True): (spp_col, count_col), patch = \ _get_cols(['spp_col', 'count_col'], cols, patch) full_spp_list = np.unique(patch.table[spp_col]) result_list = [] for substring, subpatch in _yield_subpatches(patch, splits): sad_list = [] for spp in full_spp_list: this_spp = (subpatch.table[spp_col] == spp) count = np.sum(subpatch.table[count_col][this_spp]) sad_list.append(count) subdf = pd.DataFrame({'spp': full_spp_list, 'y': sad_list}) if clean: subdf = subdf[subdf['y'] > 0] result_list.append((substring, subdf)) return result_list
Calculates an empirical species abundance distribution Parameters ---------- {0} clean : bool If True, all species with zero abundance are removed from SAD results. Default False. Returns ------- {1} Result has two columns: spp (species identifier) and y (individuals of that species). Notes ----- {2} {3} Examples -------- {4} >>> # Get the SAD of the full plot >>> sad = meco.empirical.sad(pat, 'spp_col:spp; count_col:count', '') >>> # Extract the SAD >>> sad_df = sad[0][1] >>> sad_df spp y 0 arsp1 2 1 cabr 31 2 caspi1 58 3 chst 1 4 comp1 5 5 cran 4 6 crcr 65 7 crsp2 79 8 enfa 1 9 gnwe 41 10 grass 1110 11 lesp1 1 12 magl 1 13 mesp 6 14 mobe 4 15 phdi 210 16 plsp1 1 17 pypo 73 18 sasp 2 19 ticr 729 20 unsh1 1 21 unsp1 18 22 unsp3 1 23 unsp4 1 >>> # Get SAD for 4 subplots within the full plot and keep absent species >>> # using clean = False >>> sad_subplots = meco.empirical.sad(pat, 'spp_col:spp; count_col:count', splits = "row:2; column:2", clean=False) >>> len(sad_subplots) 4 >>> # Look at SAD in one of the 4 cells >>> sad_subplots[0] ('row>=-0.5; row<1.5; column>=-0.5; column<1.5', spp y 0 arsp1 0 1 cabr 7 2 caspi1 0 3 chst 1 4 comp1 1 5 cran 3 6 crcr 21 7 crsp2 16 8 enfa 0 9 gnwe 8 10 grass 236 11 lesp1 0 12 magl 0 13 mesp 4 14 mobe 0 15 phdi 33 16 plsp1 1 17 pypo 8 18 sasp 2 19 ticr 317 20 unsh1 1 21 unsp1 0 22 unsp3 1 23 unsp4 1) See http://www.macroeco.org/tutorial_macroeco.html for additional examples and explanation
def get_statistics(self): return { 'cumulative_elapsed_time': self.get_cumulative_elapsed_time(), 'percentage': self.get_percentage(), 'n_splits': self.get_n_splits(), 'mean_per_split': self.get_mean_per_split(), }
Get all statistics as a dictionary. Returns ------- statistics : Dict[str, List]
def generate_slug(self, model_instance): queryset = model_instance.__class__._default_manager.all() lookup = {'%s__regex' % self.attname: r'^.{%s}$' % self.length} if queryset.filter(**lookup).count() >= len(self.chars)**self.length: raise FieldError("No available slugs remaining.") slug = get_random_string(self.length, self.chars) if model_instance.pk: queryset = queryset.exclude(pk=model_instance.pk) kwargs = {} for params in model_instance._meta.unique_together: if self.attname in params: for param in params: kwargs[param] = getattr(model_instance, param, None) kwargs[self.attname] = slug while queryset.filter(**kwargs): slug = get_random_string(self.length, self.chars) kwargs[self.attname] = slug return slug
Returns a unique slug.
def aln_tree_seqs(seqs, input_handler=None, tree_type='neighborjoining', params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_hours=5.0, constructor=PhyloNode, clean_up=True ): params["-maxhours"] = max_hours if tree_type: params["-cluster2"] = tree_type params["-tree2"] = get_tmp_filename(WorkingDir) params["-out"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, input_handler=input_handler, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree2Out"], constructor=constructor) aln = [line for line in muscle_res["MuscleOut"]] if clean_up: muscle_res.cleanUp() return tree, aln
Muscle align sequences and report tree from iteration2. Unlike cluster_seqs, returns tree2 which is the tree made during the second muscle iteration (it should be more accurate that the cluster from the first iteration which is made fast based on k-mer words) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. tree_type: can be either neighborjoining (default) or upgmb for UPGMA clean_up: When true, will clean up output files
def update(self, global_size=None, lower_extent=None, upper_extent=None, description=None): if global_size is not None: self._global_size = global_size if lower_extent is not None: self._lower_extent = lower_extent if upper_extent is not None: self._upper_extent = upper_extent if description is not None: self._description = description self.validate()
Update the dimension properties Parameters ---------- global_size : int Global dimension size (Default value = None) lower_extent : int Lower dimension extent (Default value = None) upper_extent : int Upper dimension extent (Default value = None) description : str Dimension description (Default value = None)
def setup(): index_template = BlogPost._index.as_template(ALIAS, PATTERN) index_template.save() if not BlogPost._index.exists(): migrate(move_data=False)
Create the index template in elasticsearch specifying the mappings and any settings to be used. This can be run at any time, ideally at every new code deploy.
def initialize_path(self, path_num=None): for c in self.consumers: c.initialize_path(path_num) self.state = [c.state for c in self.consumers]
make the consumer_state ready for the next MC path :param int path_num:
def _readbin(fid, fmt='i', nwords=1, file64=False, unpack=True): if fmt in 'if': fmt += '8' if file64 else '4' elts = np.fromfile(fid, fmt, nwords) if unpack and len(elts) == 1: elts = elts[0] return elts
Read n words of 4 or 8 bytes with fmt format. fmt: 'i' or 'f' or 'b' (integer or float or bytes) 4 or 8 bytes: depends on header Return an array of elements if more than one element. Default: read 1 word formatted as an integer.
def common_package_action_options(f): @click.option( "-s", "--skip-errors", default=False, is_flag=True, help="Skip/ignore errors when copying packages.", ) @click.option( "-W", "--no-wait-for-sync", default=False, is_flag=True, help="Don't wait for package synchronisation to complete before " "exiting.", ) @click.option( "-I", "--wait-interval", default=5.0, type=float, show_default=True, help="The time in seconds to wait between checking synchronisation.", ) @click.option( "--sync-attempts", default=3, type=int, help="Number of times to attempt package synchronisation. If the " "package fails the first time, the client will attempt to " "automatically resynchronise it.", ) @click.pass_context @functools.wraps(f) def wrapper(ctx, *args, **kwargs): return ctx.invoke(f, *args, **kwargs) return wrapper
Add common options for package actions.
def get_cursors(source, spelling): cursors = [] children = [] if isinstance(source, Cursor): children = source.get_children() else: children = source.cursor.get_children() for cursor in children: if cursor.spelling == spelling: cursors.append(cursor) cursors.extend(get_cursors(cursor, spelling)) return cursors
Obtain all cursors from a source object with a specific spelling. This provides a convenient search mechanism to find all cursors with specific spelling within a source. The first argument can be either a TranslationUnit or Cursor instance. If no cursors are found, an empty list is returned.
def get_available_plugins(self): available_plugins = [] PluginData = namedtuple('PluginData', 'name, plugin_class, conf, is_allowed_to_fail') for plugin_request in self.plugins_conf: plugin_name = plugin_request['name'] try: plugin_class = self.plugin_classes[plugin_name] except KeyError: if plugin_request.get('required', True): msg = ("no such plugin: '%s', did you set " "the correct plugin type?") % plugin_name exc = PluginFailedException(msg) self.on_plugin_failed(plugin_name, exc) logger.error(msg) raise exc else: logger.warning("plugin '%s' requested but not available", plugin_name) continue plugin_is_allowed_to_fail = plugin_request.get('is_allowed_to_fail', getattr(plugin_class, "is_allowed_to_fail", True)) plugin_conf = plugin_request.get("args", {}) plugin = PluginData(plugin_name, plugin_class, plugin_conf, plugin_is_allowed_to_fail) available_plugins.append(plugin) return available_plugins
check requested plugins availability and handle missing plugins :return: list of namedtuples, runnable plugins data
def Validate(self, value): if value is None: return None if not isinstance(value, self.rdfclass): try: r = self.rdfclass() r.FromDict(value) return r except (AttributeError, TypeError, rdfvalue.InitializeError): raise TypeValueError("Value for arg %s should be an %s" % (self.name, self.rdfclass.__name__)) return value
Validate the value. Args: value: Value is expected to be a dict-like object that a given RDFStruct can be initialized from. Raises: TypeValueError: If the value is not a valid dict-like object that a given RDFStruct can be initialized from. Returns: A valid instance of self.rdfclass or None.
def create_from_name_and_dictionary(self, name, datas): category = ObjectCategory(name) self.set_common_datas(category, name, datas) if "order" in datas: category.order = int(datas["order"]) return category
Return a populated object Category from dictionary datas
def schedule_job(date, callable_name, content_object=None, expires='7d', args=(), kwargs={}): assert callable_name and isinstance(callable_name, basestring), callable_name if isinstance(date, basestring): date = parse_timedelta(date) if isinstance(date, datetime.timedelta): date = datetime.datetime.now() + date job = ScheduledJob(callable_name=callable_name, time_slot_start=date) if expires: if isinstance(expires, basestring): expires = parse_timedelta(expires) if isinstance(expires, datetime.timedelta): expires = date + expires job.time_slot_end = expires if content_object: job.content_object = content_object job.args = args job.kwargs = kwargs job.save() return job
Schedule a job. `date` may be a datetime.datetime or a datetime.timedelta. The callable to be executed may be specified in two ways: - set `callable_name` to an identifier ('mypackage.myapp.some_function'). - specify an instance of a model as content_object and set `callable_name` to a method name ('do_job') The scheduler will not attempt to run the job if its expiration date has passed.
def prepare_for_reraise(error, exc_info=None): if not hasattr(error, "_type_"): if exc_info is None: exc_info = sys.exc_info() error._type_ = exc_info[0] error._traceback = exc_info[2] return error
Prepares the exception for re-raising with reraise method. This method attaches type and traceback info to the error object so that reraise can properly reraise it using this info.
def call(self, rs, name, user, fields): if name not in self._objects: return '[ERR: Object Not Found]' func = self._objects[name] reply = '' try: reply = func(rs, fields) if reply is None: reply = '' except Exception as e: raise PythonObjectError("Error executing Python object: " + str(e)) return text_type(reply)
Invoke a previously loaded object. :param RiveScript rs: the parent RiveScript instance. :param str name: The name of the object macro to be called. :param str user: The user ID invoking the object macro. :param []str fields: Array of words sent as the object's arguments. :return str: The output of the object macro.
def _register_key(fingerprint, gpg): for private_key in gpg.list_keys(True): try: if str(fingerprint) == private_key['fingerprint']: config["gpg_key_fingerprint"] = \ repr(private_key['fingerprint']) except KeyError: pass
Registers key in config
def lazyread(f, delimiter): try: running = f.read(0) except Exception as e: if e.__class__.__name__ == 'IncompleteReadError': running = b'' else: raise while True: new_data = f.read(1024) if not new_data: yield running return running += new_data while delimiter in running: curr, running = running.split(delimiter, 1) yield curr + delimiter
Generator which continually reads ``f`` to the next instance of ``delimiter``. This allows you to do batch processing on the contents of ``f`` without loading the entire file into memory. :param f: Any file-like object which has a ``.read()`` method. :param delimiter: Delimiter on which to split up the file.
def getBoundsColor(self, nNumOutputColors, flCollisionBoundsFadeDistance): fn = self.function_table.getBoundsColor pOutputColorArray = HmdColor_t() pOutputCameraColor = HmdColor_t() fn(byref(pOutputColorArray), nNumOutputColors, flCollisionBoundsFadeDistance, byref(pOutputCameraColor)) return pOutputColorArray, pOutputCameraColor
Get the current chaperone bounds draw color and brightness
def split_by_percent(self, spin_systems_list): chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit] if sum(chunk_sizes) < len(spin_systems_list): difference = len(spin_systems_list) - sum(chunk_sizes) chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference assert sum(chunk_sizes) == len(spin_systems_list), \ "sum of chunk sizes must be equal to spin systems list length." intervals = self.calculate_intervals(chunk_sizes) chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals] return chunks_of_spin_systems_by_percentage
Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list`
def port_type(arg): error_msg = "{0} is not a valid port".format(repr(arg)) try: arg = ast.literal_eval(arg) except ValueError: raise argparse.ArgumentTypeError(error_msg) if arg < 0 or arg > 65535: raise argparse.ArgumentTypeError(error_msg) return arg
An argparse type representing a tcp or udp port number.
def relevant_kwargs(function, exclude_keys='self', exclude_values=None, extra_values=None): args = function_args(function) locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys) if extra_values: locals_values.update(extra_values) return {k: v for k, v in locals_values.items() if k in args}
This will return a dictionary of local variables that are parameters to the function provided in the arg. Example: function(**relevant_kwargs(function)) :param function: function to select parameters for :param exclude_keys: str,list,func if not a function it will be converted into a funciton, defaults to excluding None :param exclude_values: obj,list,func if not a function it will be convereted into one, defaults to excluding 'self' :param extra_values: dict of other values to include with local :return: dict of local variables for the function
def list_images(self): response = [] for im in self.d.images(): try: i_name, tag = parse_reference(im["RepoTags"][0]) except (IndexError, TypeError): i_name, tag = None, None d_im = DockerImage(i_name, tag=tag, identifier=im["Id"], pull_policy=DockerImagePullPolicy.NEVER) inspect_to_metadata(d_im.metadata, im) response.append(d_im) return response
List all available docker images. Image objects returned from this methods will contain a limited amount of metadata in property `short_metadata`. These are just a subset of `.inspect()`, but don't require an API call against dockerd. :return: collection of instances of :class:`conu.DockerImage`
def int_subtype(i, bits, signed) : "returns integer i after checking that it fits in the given number of bits." if not isinstance(i, int) : raise TypeError("value is not int: %s" % repr(i)) if signed : lo = - 1 << bits - 1 hi = (1 << bits - 1) - 1 else : lo = 0 hi = (1 << bits) - 1 if i < lo or i > hi : raise ValueError \ ( "%d not in range of %s %d-bit value" % (i, ("unsigned", "signed")[signed], bits) ) return \ i
returns integer i after checking that it fits in the given number of bits.
def get_multi( self, keys, missing=None, deferred=None, transaction=None, eventual=False ): if not keys: return [] ids = set(key.project for key in keys) for current_id in ids: if current_id != self.project: raise ValueError("Keys do not match project") if transaction is None: transaction = self.current_transaction entity_pbs = _extended_lookup( datastore_api=self._datastore_api, project=self.project, key_pbs=[key.to_protobuf() for key in keys], eventual=eventual, missing=missing, deferred=deferred, transaction_id=transaction and transaction.id, ) if missing is not None: missing[:] = [ helpers.entity_from_protobuf(missed_pb) for missed_pb in missing ] if deferred is not None: deferred[:] = [ helpers.key_from_protobuf(deferred_pb) for deferred_pb in deferred ] return [helpers.entity_from_protobuf(entity_pb) for entity_pb in entity_pbs]
Retrieve entities, along with their attributes. :type keys: list of :class:`google.cloud.datastore.key.Key` :param keys: The keys to be retrieved from the datastore. :type missing: list :param missing: (Optional) If a list is passed, the key-only entities returned by the backend as "missing" will be copied into it. If the list is not empty, an error will occur. :type deferred: list :param deferred: (Optional) If a list is passed, the keys returned by the backend as "deferred" will be copied into it. If the list is not empty, an error will occur. :type transaction: :class:`~google.cloud.datastore.transaction.Transaction` :param transaction: (Optional) Transaction to use for read consistency. If not passed, uses current transaction, if set. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: list of :class:`google.cloud.datastore.entity.Entity` :returns: The requested entities. :raises: :class:`ValueError` if one or more of ``keys`` has a project which does not match our project. :raises: :class:`ValueError` if eventual is True and in a transaction.
def set_transmit_mode(self, mode): self.api.call_rc('port setTransmitMode {} {}'.format(mode, self.uri))
set port transmit mode :param mode: request transmit mode :type mode: ixexplorer.ixe_port.IxeTransmitMode
def public_url(self): return "{storage_base_url}/{bucket_name}/{quoted_name}".format( storage_base_url=_API_ACCESS_ENDPOINT, bucket_name=self.bucket.name, quoted_name=quote(self.name.encode("utf-8")), )
The public URL for this blob. Use :meth:`make_public` to enable anonymous access via the returned URL. :rtype: `string` :returns: The public URL for this blob.
def count(self): sql = u'SELECT count() FROM (%s)' % self.as_sql() raw = self._database.raw(sql) return int(raw) if raw else 0
Returns the number of rows after aggregation.
def parse_yaml_node(self, y): if 'participant' not in y: raise InvalidParticipantNodeError self.target_component = TargetComponent().parse_yaml_node(y['participant']) return self
Parse a YAML specification of a participant into this object.
def get_search_results(self, request, queryset, search_term): def construct_search(field_name): if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] else: return "%s__icontains" % field_name use_distinct = False if self.search_fields and search_term: orm_lookups = [construct_search(str(search_field)) for search_field in self.search_fields] for bit in search_term.split(): or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] queryset = queryset.filter(reduce(operator.or_, or_queries)) if not use_distinct: for search_spec in orm_lookups: if lookup_needs_distinct(self.opts, search_spec): use_distinct = True break return queryset, use_distinct
Returns a tuple containing a queryset to implement the search, and a boolean indicating if the results may contain duplicates.
def determine_emitter(self, request, *args, **kwargs): em = kwargs.pop('emitter_format', None) if not em: em = request.GET.get('format', 'json') return em
Function for determening which emitter to use for output. It lives here so you can easily subclass `Resource` in order to change how emission is detected. You could also check for the `Accept` HTTP header here, since that pretty much makes sense. Refer to `Mimer` for that as well.
def _scheduling_block_config(num_blocks=5, start_sbi_id=0, start_pb_id=0, project='sip'): pb_id = start_pb_id for sb_id, sbi_id in _scheduling_block_ids(num_blocks, start_sbi_id, project): sub_array_id = 'subarray-{:02d}'.format(random.choice(range(5))) config = dict(id=sbi_id, sched_block_id=sb_id, sub_array_id=sub_array_id, processing_blocks=_generate_processing_blocks(pb_id)) pb_id += len(config['processing_blocks']) yield config
Return a Scheduling Block Configuration dictionary
def add(self, operator): if not isinstance(operator, (BaseTaskTransformer, FeatureExtractor)): raise ParameterError('operator={} must be one of ' '(BaseTaskTransformer, FeatureExtractor)' .format(operator)) if operator.name in self.opmap: raise ParameterError('Duplicate operator name detected: ' '{}'.format(operator)) super(Pump, self).add(operator) self.opmap[operator.name] = operator self.ops.append(operator)
Add an operation to this pump. Parameters ---------- operator : BaseTaskTransformer, FeatureExtractor The operation to add Raises ------ ParameterError if `op` is not of a correct type
def sys_call(cmd): p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) return p.stdout.readlines(), p.stderr.readlines()
Execute cmd and capture stdout and stderr :param cmd: command to be executed :return: (stdout, stderr)
def egg_name(self): filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename
Return what this distribution's standard .egg filename should be
def flipwritable(fn, mode=None): if os.access(fn, os.W_OK): return None old_mode = os.stat(fn).st_mode os.chmod(fn, stat.S_IWRITE | old_mode) return old_mode
Flip the writability of a file and return the old mode. Returns None if the file is already writable.
def delta_to_str(rd): parts = [] if rd.days > 0: parts.append("%d day%s" % (rd.days, plural(rd.days))) clock_parts = [] if rd.hours > 0: clock_parts.append("%02d" % rd.hours) if rd.minutes > 0 or rd.hours > 0: clock_parts.append("%02d" % rd.minutes) if rd.seconds > 0 or rd.minutes > 0 or rd.hours > 0: clock_parts.append("%02d" % rd.seconds) if clock_parts: parts.append(":".join(clock_parts)) return " ".join(parts)
Convert a relativedelta to a human-readable string
def delete_line(self): cursor = self.textCursor() if self.has_selected_text(): self.extend_selection_to_complete_lines() start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd() cursor.setPosition(start_pos) else: start_pos = end_pos = cursor.position() cursor.beginEditBlock() cursor.setPosition(start_pos) cursor.movePosition(QTextCursor.StartOfBlock) while cursor.position() <= end_pos: cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor) if cursor.atEnd(): break cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor) cursor.removeSelectedText() cursor.endEditBlock() self.ensureCursorVisible()
Delete current line
def brpop(self, keys, timeout=0): if timeout is None: timeout = 0 keys = list_or_args(keys, None) keys.append(timeout) return self.execute_command('BRPOP', *keys)
RPOP a value off of the first non-empty list named in the ``keys`` list. If none of the lists in ``keys`` has a value to RPOP, then block for ``timeout`` seconds, or until a value gets pushed on to one of the lists. If timeout is 0, then block indefinitely.
def get_raw_query(self): query = self.base_query.copy() search_query = self.search_query.copy() query.update(search_query) sorting = self.resolve_sorting(query) query.update(sorting) catalog = api.get_tool(self.catalog_name) sort_on = query.get("sort_on", None) if sort_on and not self.is_sortable_index(sort_on, catalog): del(query["sort_on"]) return query
Returns the raw query to use for current search, based on the base query + update query
def get_repos(path): p = str(path) ret = [] if not os.path.exists(p): return ret for d in os.listdir(p): pd = os.path.join(p, d) if os.path.exists(pd) and is_repo(pd): ret.append(Local(pd)) return ret
Returns list of found branches. :return: List of grit.Local objects
def alphabetize_attributes(self): self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
Orders attributes names alphabetically, except for the class attribute, which is kept last.
def exclude(self, col: str, val): try: self.df = self.df[self.df[col] != val] except Exception as e: self.err(e, "Can not exclude rows based on value " + str(val))
Delete rows based on value :param col: column name :type col: str :param val: value to delete :type val: any :example: ``ds.exclude("Col 1", "value")``
def match_function_pattern(self, first, rest=None, least=1, offset=0): if not self.has_space(offset=offset): return '' firstchar = self.string[self.pos + offset] if not first(firstchar): return '' output = [firstchar] pattern = first if rest is None else rest for char in self.string[self.pos + offset + 1:]: if pattern(char): output.append(char) else: break if len(output) < least: return '' return ''.join(output)
Match each char sequentially from current SourceString position until the pattern doesnt match and return all maches. Integer argument least defines and minimum amount of chars that can be matched. This version takes functions instead of string patterns. Each function must take one argument, a string, and return a value that can be evauluated as True or False. If rest is defined then first is used only to match the first arg and the rest of the chars are matched against rest.
def rename(self, req, parent, name, newparent, newname): self.reply_err(req, errno.EROFS)
Rename a file Valid replies: reply_err
def replace_characters(self, text, characters, replacement=''): if not characters: return text characters = ''.join(sorted(characters)) if characters in self._characters_regexes: characters_regex = self._characters_regexes[characters] else: characters_regex = re.compile("[%s]" % re.escape(characters)) self._characters_regexes[characters] = characters_regex return characters_regex.sub(replacement, text)
Remove characters from text. Removes custom characters from input text or replaces them with a string if specified. Args: text: The text to be processed. characters: Characters that will be replaced. replacement: New text that will replace the custom characters. Returns: The text without the given characters.
def guest_get_console_output(self, userid): action = "get the console output of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): output = self._vmops.get_console_output(userid) return output
Get the console output of the guest virtual machine. :param str userid: the user id of the vm :returns: console log string :rtype: str
def _unpack(self, record, key, expected): attrs = record.get(key) if attrs is None: return obj = unpack_from_dynamodb( attrs=attrs, expected=expected, model=self.model, engine=self.engine ) object_loaded.send(self.engine, engine=self.engine, obj=obj) record[key] = obj
Replaces the attr dict at the given key with an instance of a Model
def removi(item, inset): assert isinstance(inset, stypes.SpiceCell) assert inset.dtype == 2 item = ctypes.c_int(item) libspice.removi_c(item, ctypes.byref(inset))
Remove an item from an integer set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/removi_c.html :param item: Item to be removed. :type item: int :param inset: Set to be updated. :type inset: spiceypy.utils.support_types.SpiceCell
def get_channel_info(self): self.request(EP_GET_CHANNEL_INFO) return {} if self.last_response is None else self.last_response.get('payload')
Get the current channel info.
def register(self, endpoint, scheme=None, handler=None, **kwargs): assert endpoint is not None, "endpoint is required" if endpoint is TChannel.FALLBACK: decorator = partial(self._handler.register, TChannel.FALLBACK) if handler is not None: return decorator(handler) else: return decorator if not scheme: if inspect.ismodule(endpoint): scheme = "thrift" else: scheme = "raw" scheme = scheme.lower() if scheme == 'thrift': decorator = partial(self._register_thrift, endpoint, **kwargs) else: decorator = partial( self._register_simple, endpoint, scheme, **kwargs ) if handler is not None: return decorator(handler) else: return decorator
Register a handler with this TChannel. This may be used as a decorator: .. code-block:: python app = TChannel(name='bar') @app.register("hello", "json") def hello_handler(request, response): params = yield request.get_body() Or as a function: .. code-block:: python # Here we have a Thrift handler for `Foo::hello` app.register(Foo, "hello", hello_thrift_handler) :param endpoint: Name of the endpoint being registered. This should be a reference to the Thrift-generated module if this is a Thrift endpoint. It may also be ``TChannel.FALLBACK`` if it's intended to be a catch-all endpoint. :param scheme: Name of the scheme under which the endpoint is being registered. One of "raw", "json", and "thrift". Defaults to "raw", except if "endpoint" was a module, in which case this defaults to "thrift". :param handler: If specified, this is the handler function. If ignored, this function returns a decorator that can be used to register the handler function. :returns: If ``handler`` was specified, this returns ``handler``. Otherwise, it returns a decorator that can be applied to a function to register it as the handler.
def converter(input_string, block_size=2): sentences = textprocessing.getSentences(input_string) blocks = textprocessing.getBlocks(sentences, block_size) parse.makeIdentifiers(blocks)
The cli tool as a built-in function. :param input_string: A string that should be converted to a set of facts. :type input_string: str. :param blocks_size: Optional block size of sentences (Default: 2). :type block_size: int.
def _process_response(self, response): rsp_lines = response.rstrip('\r\n').split('\r') if len(rsp_lines) > 0: echoed_command = rsp_lines[0] del rsp_lines[0] else: echoed_command = None if len(rsp_lines) > 0 and \ rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \ '*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \ '*UNDEFINED_LABEL'): err = rsp_lines[0][1:] del rsp_lines[0] else: err = None return [response, echoed_command, err, rsp_lines]
Processes a response from the drive. Processes the response returned from the drive. It is broken down into the echoed command (drive echoes it back), any error returned by the drive (leading '*' is stripped), and the different lines of the response. Parameters ---------- response : str The response returned by the drive. Returns ------- processed_response : list A 4-element ``list``. The elements, in order, are `response` (``str``), the echoed command (``str``), any error response (``None`` if none, or the ``str`` of the error), and the lines of the response that are not the echo or error line (``list`` of ``str`` with newlines stripped).
def from_config(self, k, v): if k == "setup": return from_commandline(v, classname=to_commandline(datagen.DataGenerator())) return super(DataGenerator, self).from_config(k, v)
Hook method that allows converting values from the dictionary. :param k: the key in the dictionary :type k: str :param v: the value :type v: object :return: the potentially parsed value :rtype: object
def rollback_migration(engine, connection, path, migration_to_rollback): migrations_applied = get_migrations_applied(engine, connection) if not is_applied(migrations_applied, migration_to_rollback): raise RuntimeError( '`%s` is not in the list of previously applied migrations.' % (migration_to_rollback)) file = path + migration_to_rollback + '/down.sql' check_exists(file) basename = os.path.basename(os.path.dirname(file)) source = get_migration_source(file) run_migration(connection, source, engine) delete_migration(connection, basename) print(' -> Migration `%s` has been rolled back' % (basename)) return True
Rollback a migration
def _as_dict(self) -> Dict[str, JsonTypes]: return {k: v._as_dict if isinstance(v, JsonObj) else self.__as_list(v) if isinstance(v, list) else v for k, v in self.__dict__.items()}
Convert a JsonObj into a straight dictionary :return: dictionary that cooresponds to the json object
def image_coarsen(xlevel=0, ylevel=0, image="auto", method='average'): if image == "auto": image = _pylab.gca().images[0] Z = _n.array(image.get_array()) global image_undo_list image_undo_list.append([image, Z]) if len(image_undo_list) > 10: image_undo_list.pop(0) image.set_array(_fun.coarsen_matrix(Z, ylevel, xlevel, method)) _pylab.draw()
This will coarsen the image data by binning each xlevel+1 along the x-axis and each ylevel+1 points along the y-axis type can be 'average', 'min', or 'max'
def make_type_consistent(s1, s2): if isinstance(s1, str) and isinstance(s2, str): return s1, s2 elif isinstance(s1, unicode) and isinstance(s2, unicode): return s1, s2 else: return unicode(s1), unicode(s2)
If both objects aren't either both string or unicode instances force them to unicode
def write_metadata(self, handler): if self.metadata is not None: handler.write_metadata(self.cname, self.metadata)
set the meta data
async def read_frame(self) -> DataFrame: if self._data_frames.qsize() == 0 and self.closed: raise StreamConsumedError(self.id) frame = await self._data_frames.get() self._data_frames.task_done() if frame is None: raise StreamConsumedError(self.id) return frame
Read a single frame from the local buffer. If no frames are available but the stream is still open, waits until more frames arrive. Otherwise, raises StreamConsumedError. When a stream is closed, a single `None` is added to the data frame Queue to wake up any waiting `read_frame` coroutines.
def _write(self, session, openFile, replaceParamFile): timeSeries = self.timeSeries numTS = len(timeSeries) valList = [] for tsNum, ts in enumerate(timeSeries): values = ts.values for value in values: valDict = {'time': value.simTime, 'tsNum': tsNum, 'value': value.value} valList.append(valDict) result = pivot(valList, ('time',), ('tsNum',), 'value') for line in result: valString = '' for n in range(0, numTS): val = '%.6f' % line[(n,)] valString = '%s%s%s' % ( valString, ' ' * (13 - len(str(val))), val) openFile.write(' %.8f%s\n' % (line['time'], valString))
Generic Time Series Write to File Method
def destroy_volume_snapshot(volume_id, snapshot_id, profile, **libcloud_kwargs): conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) snapshot = _get_by_id(conn.list_volume_snapshots(volume), snapshot_id) return conn.destroy_volume_snapshot(snapshot, **libcloud_kwargs)
Destroy a volume snapshot. :param volume_id: Volume ID from which the snapshot belongs :type volume_id: ``str`` :param snapshot_id: Volume Snapshot ID from which to destroy :type snapshot_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's destroy_volume_snapshot method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.destroy_volume_snapshot snap1 profile1
def group_join(self, inner_iterable, outer_key_selector=identity, inner_key_selector=identity, result_selector=lambda outer, grouping: grouping): if self.closed(): raise ValueError("Attempt to call group_join() on a closed Queryable.") if not is_iterable(inner_iterable): raise TypeError("Cannot compute group_join() with inner_iterable of non-iterable {type}".format( type=str(type(inner_iterable))[7: -1])) if not is_callable(outer_key_selector): raise TypeError("group_join() parameter outer_key_selector={outer_key_selector} is not callable".format( outer_key_selector=repr(outer_key_selector))) if not is_callable(inner_key_selector): raise TypeError("group_join() parameter inner_key_selector={inner_key_selector} is not callable".format( inner_key_selector=repr(inner_key_selector))) if not is_callable(result_selector): raise TypeError("group_join() parameter result_selector={result_selector} is not callable".format( result_selector=repr(result_selector))) return self._create(self._generate_group_join_result(inner_iterable, outer_key_selector, inner_key_selector, result_selector))
Match elements of two sequences using keys and group the results. The group_join() query produces a hierarchical result, with all of the inner elements in the result grouped against the matching outer element. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from an outer element and the Grouping of matching inner elements. The first positional argument is the outer elements and the second in the Grouping of inner elements which match the outer element according to the key selectors used. If omitted, the result elements will be the Groupings directly. Returns: A Queryable over a sequence with one element for each group in the result as returned by the result_selector. If the default result selector is used, the result is a sequence of Grouping objects. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable.
def unindent(self, lines): indent = min( len(self.re.match(r'^ *', line).group()) for line in lines) return [line[indent:].rstrip() for line in lines]
Removes any indentation that is common to all of the given lines.
def _split_scheme(expression): match = re.search(r'^([a-z]+):(.*)$', expression) if not match: scheme = 'plain' actual = expression else: scheme = match.group(1) actual = match.group(2) return scheme, actual
Splits the scheme and actual expression :param str expression: The expression. :rtype: str
def listFigures(self,walkTrace=tuple(),case=None,element=None): if case == 'sectionmain': print(walkTrace,self.title) if case == 'figure': caption,fig = element try: print(walkTrace,fig._leopardref,caption) except AttributeError: fig._leopardref = next(self._reportSection._fignr) print(walkTrace,fig._leopardref,caption)
List section figures.
def enable_policies(zap_helper, policy_ids): if not policy_ids: policy_ids = _get_all_policy_ids(zap_helper) with zap_error_handler(): zap_helper.enable_policies_by_ids(policy_ids)
Set the enabled policies to use in a scan. When you enable a selection of policies, all other policies are disabled.
def login_details(self): if not self.__login_details: self.__login_details = LoginDetails(self.__connection) return self.__login_details
Gets the login details Returns: List of login details
def make_ring_filename(self, source_name, ring, galprop_run): format_dict = self.__dict__.copy() format_dict['sourcekey'] = self._name_factory.galprop_ringkey(source_name=source_name, ringkey="ring_%i" % ring) format_dict['galprop_run'] = galprop_run return self._name_factory.galprop_gasmap(**format_dict)
Make the name of a gasmap file for a single ring Parameters ---------- source_name : str The galprop component, used to define path to gasmap files ring : int The ring index galprop_run : str String identifying the galprop parameters