code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def download_api(branch=None) -> str: habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica' if not branch: branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name'] curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)] tar = local['tar'][ 'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout'] grep = local['grep']['@api'] sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-'] return (curl | tar | grep | sed)()
download API documentation from _branch_ of Habitica\'s repo on Github
def all_host_infos(): output = [] output.append(["Operating system", os()]) output.append(["CPUID information", cpu()]) output.append(["CC information", compiler()]) output.append(["JDK information", from_cmd("java -version")]) output.append(["MPI information", from_cmd("mpirun -version")]) output.append(["Scala information", from_cmd("scala -version")]) output.append(["OpenCL headers", from_cmd( "find /usr/include|grep opencl.h")]) output.append(["OpenCL libraries", from_cmd( "find /usr/lib/ -iname '*opencl*'")]) output.append(["NVidia SMI", from_cmd("nvidia-smi -q")]) output.append(["OpenCL Details", opencl()]) return output
Summarize all host information.
def get_values(self, *args, **kwargs): if isinstance(args[0], list): raise ValueError("Can only get_values() for a single tag.") response = self.get_datapoints(*args, **kwargs) for value in response['tags'][0]['results'][0]['values']: yield [datetime.datetime.utcfromtimestamp(value[0]/1000), value[1], value[2]]
Convenience method that for simple single tag queries will return just the values to be iterated on.
def check_dipole(inp, name, verb): r _check_shape(np.squeeze(inp), name, (3,)) inp[0] = _check_var(inp[0], float, 1, name+'-x') inp[1] = _check_var(inp[1], float, 1, name+'-y', inp[0].shape) inp[2] = _check_var(inp[2], float, 1, name+'-z', (1,)) if verb > 2: if name == 'src': longname = ' Source(s) : ' else: longname = ' Receiver(s) : ' print(longname, str(inp[0].size), 'dipole(s)') tname = ['x ', 'y ', 'z '] for i in range(3): text = " > " + tname[i] + " [m] : " _prnt_min_max_val(inp[i], text, verb) return inp, inp[0].size
r"""Check dipole parameters. This check-function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- inp : list of floats or arrays Pole coordinates (m): [pole-x, pole-y, pole-z]. name : str, {'src', 'rec'} Pole-type. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- inp : list List of pole coordinates [x, y, z]. ninp : int Number of inp-elements
def has_extensions(self, *exts): file_ext = splitext(self.filename)[1] file_ext = file_ext.lower() for e in exts: if file_ext == e: return True return False
Check if file has one of the extensions.
def search_people_by_bio(query, limit_results=DEFAULT_LIMIT, index=['onename_people_index']): from pyes import QueryStringQuery, ES conn = ES() q = QueryStringQuery(query, search_fields=['username', 'profile_bio'], default_operator='and') results = conn.search(query=q, size=20, indices=index) count = conn.count(query=q) count = count.count if(count == 0): q = QueryStringQuery(query, search_fields=['username', 'profile_bio'], default_operator='or') results = conn.search(query=q, size=20, indices=index) results_list = [] counter = 0 for profile in results: username = profile['username'] results_list.append(username) counter += 1 if(counter == limit_results): break return results_list
queries lucene index to find a nearest match, output is profile username
def set_pattern_step_setpoint(self, patternnumber, stepnumber, setpointvalue): _checkPatternNumber(patternnumber) _checkStepNumber(stepnumber) _checkSetpointValue(setpointvalue, self.setpoint_max) address = _calculateRegisterAddress('setpoint', patternnumber, stepnumber) self.write_register(address, setpointvalue, 1)
Set the setpoint value for a step. Args: * patternnumber (integer): 0-7 * stepnumber (integer): 0-7 * setpointvalue (float): Setpoint value
def connection_class(self, adapter): if self.adapters.get(adapter): return self.adapters[adapter] try: class_prefix = getattr( __import__('db.' + adapter, globals(), locals(), ['__class_prefix__']), '__class_prefix__') driver = self._import_class('db.' + adapter + '.connection.' + class_prefix + 'Connection') except ImportError: raise DBError("Must install adapter `%s` or doesn't support" % (adapter)) self.adapters[adapter] = driver return driver
Get connection class by adapter
def _have_pyspark(): if _have_pyspark.flag is None: try: if PackageStore.get_parquet_lib() is ParquetLib.SPARK: import pyspark _have_pyspark.flag = True else: _have_pyspark.flag = False except ImportError: _have_pyspark.flag = False return _have_pyspark.flag
Check if we're running Pyspark
def _filter_nonextensions(cls, obj): if hasattr(obj, '__dict__') and obj.__dict__.get('__NO_EXTENSION__', False) is True: return False return True
Remove all classes marked as not extensions. This allows us to have a deeper hierarchy of classes than just one base class that is filtered by _filter_subclasses. Any class can define a class propery named: __NO_EXTENSION__ = True That class will never be returned as an extension. This is useful for masking out base classes for extensions that are declared in CoreTools and would be present in module imports but should not create a second entry point.
def _get_node_groups(self): node_dict = {node['data']['id']: {'sources': [], 'targets': []} for node in self._nodes} for edge in self._edges: edge_data = (edge['data']['i'], edge['data']['polarity'], edge['data']['source']) node_dict[edge['data']['target']]['sources'].append(edge_data) edge_data = (edge['data']['i'], edge['data']['polarity'], edge['data']['target']) node_dict[edge['data']['source']]['targets'].append(edge_data) node_key_dict = collections.defaultdict(lambda: []) for node_id, node_d in node_dict.items(): key = self._get_node_key(node_d) node_key_dict[key].append(node_id) node_groups = [g for g in node_key_dict.values() if (len(g) > 1)] return node_groups
Return a list of node id lists that are topologically identical. First construct a node_dict which is keyed to the node id and has a value which is a dict with keys 'sources' and 'targets'. The 'sources' and 'targets' each contain a list of tuples (i, polarity, source) edge of the node. node_dict is then processed by _get_node_key() which returns a tuple of (s,t) where s,t are sorted tuples of the ids for the source and target nodes. (s,t) is then used as a key in node_key_dict where the values are the node ids. node_groups is restricted to groups greater than 1 node.
def _check_local_option(self, option): if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() return self.telnet_opt_dict[option].local_option
Test the status of local negotiated Telnet options.
def raw(self, query: Any, data: Any = None): assert isinstance(query, str) input_db = self.conn['data'][self.schema_name] result = None try: query = query.replace("'", "\"") criteria = json.loads(query) for key, value in criteria.items(): input_db = self.provider._evaluate_lookup(key, value, False, input_db) items = list(input_db.values()) result = ResultSet( offset=1, limit=len(items), total=len(items), items=items) except json.JSONDecodeError: raise Exception("Query Malformed") return result
Run raw query on Repository. For this stand-in repository, the query string is a json string that contains kwargs criteria with straigh-forward equality checks. Individual criteria are always ANDed and the result is always a subset of the full repository. We will ignore the `data` parameter for this kind of repository.
def ffn_expert_fn(input_size, hidden_sizes, output_size, hidden_activation=tf.nn.relu): def my_fn(x): layer_sizes = [input_size] + hidden_sizes + [output_size] for i in range(1 + len(hidden_sizes)): w = tf.get_variable("w_%d" % i, layer_sizes[i:i+2], tf.float32) x = tf.matmul(x, w) if i < len(hidden_sizes): x = hidden_activation(x) if layer_sizes[i] != input_size: x *= (layer_sizes[i] / float(input_size))**-0.5 return x return my_fn
Returns a function that creates a feed-forward network. Use this function to create the expert_fn argument to distributed_moe. Args: input_size: an integer hidden_sizes: a list of integers output_size: an integer hidden_activation: a unary function. Returns: a unary function
def _sidConversion(cls, val, **kwargs): if isinstance(val, six.string_types): val = val.split(',') usernames = [] for _sid in val: try: userSid = win32security.LookupAccountSid('', _sid) if userSid[1]: userSid = '{1}\\{0}'.format(userSid[0], userSid[1]) else: userSid = '{0}'.format(userSid[0]) except Exception: userSid = win32security.ConvertSidToStringSid(_sid) log.warning('Unable to convert SID "%s" to a friendly name. The SID will be disaplayed instead of a user/group name.', userSid) usernames.append(userSid) return usernames
converts a list of pysid objects to string representations
def rmtree(path): def handle_remove_readonly(func, path, exc): excvalue = exc[1] if ( func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES ): os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) func(path) else: raise shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
On windows, rmtree fails for readonly dirs.
def geo_length(arg, use_spheroid=None): op = ops.GeoLength(arg, use_spheroid) return op.to_expr()
Compute length of a geo spatial data Parameters ---------- arg : geometry or geography use_spheroid : default None Returns ------- length : double scalar
def _handle_resps(self, root): resps, bearers = self.get_resps(root) if not resps: return root file_desc = root.xpath( '/tei:teiCorpus/tei:teiHeader/tei:fileDesc', namespaces=constants.NAMESPACES)[0] edition_stmt = etree.Element(TEI + 'editionStmt') file_desc.insert(1, edition_stmt) for index, (resp_resp, resp_name) in enumerate(resps): resp_stmt = etree.SubElement(edition_stmt, TEI + 'respStmt') xml_id = 'resp{}'.format(index+1) resp_stmt.set(constants.XML + 'id', xml_id) resp = etree.SubElement(resp_stmt, TEI + 'resp') resp.text = resp_resp name = etree.SubElement(resp_stmt, TEI + 'name') name.text = resp_name resp_data = '{{{}|{}}}'.format(resp_resp, resp_name) self._update_refs(root, bearers, 'resp', resp_data, xml_id) return root
Returns `root` with a resp list added to the TEI header and @resp values changed to references.
def total(self): if self._result_cache: return self._result_cache.total return self.all().total
Return the total number of records
def get(self, secret_id): return self.prepare_model(self.client.api.inspect_secret(secret_id))
Get a secret. Args: secret_id (str): Secret ID. Returns: (:py:class:`Secret`): The secret. Raises: :py:class:`docker.errors.NotFound` If the secret does not exist. :py:class:`docker.errors.APIError` If the server returns an error.
def forward_remote( self, remote_port, local_port=None, remote_host="127.0.0.1", local_host="localhost", ): if not local_port: local_port = remote_port tunnels = [] def callback(channel, src_addr_tup, dst_addr_tup): sock = socket.socket() sock.connect((local_host, local_port)) tunnel = Tunnel(channel=channel, sock=sock, finished=Event()) tunnel.start() tunnels.append(tunnel) try: self.transport.request_port_forward( address=remote_host, port=remote_port, handler=callback ) yield finally: for tunnel in tunnels: tunnel.finished.set() tunnel.join() self.transport.cancel_port_forward( address=remote_host, port=remote_port )
Open a tunnel connecting ``remote_port`` to the local environment. For example, say you're running a daemon in development mode on your workstation at port 8080, and want to funnel traffic to it from a production or staging environment. In most situations this isn't possible as your office/home network probably blocks inbound traffic. But you have SSH access to this server, so you can temporarily make port 8080 on that server act like port 8080 on your workstation:: from fabric import Connection c = Connection('my-remote-server') with c.forward_remote(8080): c.run("remote-data-writer --port 8080") # Assuming remote-data-writer runs until interrupted, this will # stay open until you Ctrl-C... This method is analogous to using the ``-R`` option of OpenSSH's ``ssh`` program. :param int remote_port: The remote port number on which to listen. :param int local_port: The local port number. Defaults to the same value as ``remote_port``. :param str local_host: The local hostname/interface the forwarded connection talks to. Default: ``localhost``. :param str remote_host: The remote interface address to listen on when forwarding connections. Default: ``127.0.0.1`` (i.e. only listen on the remote localhost). :returns: Nothing; this method is only useful as a context manager affecting local operating system state. .. versionadded:: 2.0
def parse_rune_links(html: str) -> dict: soup = BeautifulSoup(html, 'lxml') single_page_raw = soup.find_all('li', class_='champion') single_page = {re.split('\W+', x.a.div.div['style'])[-3].lower(): [x.a['href']] for x in single_page_raw if x.a is not None} double_page_raw = soup.find_all('div', class_='champion-modal-open') double_page_decode = [json.loads(x['data-loadouts']) for x in double_page_raw] double_page = {re.sub('[^A-Za-z0-9]+', '', x[0]['champion'].lower()): [x[0]['link'], x[1]['link']] for x in double_page_decode} champs_combined = {**single_page, **double_page} return champs_combined
A function which parses the main Runeforge website into dict format. Parameters ---------- html : str The string representation of the html obtained via a GET request. Returns ------- dict The nested rune_links champ rune pages from runeforge.
def __expire_files(self): self.__files = OrderedDict( item for item in self.__files.items() if not item[1].expired )
Because files are always unclean
def find_by_tooltip(browser, tooltip): return ElementSelector( world.browser, str('//*[@title=%(tooltip)s or @data-original-title=%(tooltip)s]' % dict(tooltip=string_literal(tooltip))), filter_displayed=True, )
Find elements with the given tooltip. :param browser: ``world.browser`` :param tooltip: Tooltip to search for Returns: an :class:`ElementSelector`
def plot_dop(bands, int_max, dop, hund_cu, name): data = ssplt.calc_z(bands, dop, np.arange(0, int_max, 0.1), hund_cu, name) ssplt.plot_curves_z(data, name)
Plot of Quasiparticle weight for N degenerate bands under selected doping shows transition only at half-fill the rest are metallic states
def reset(self, keep_state=False): if not keep_state: self.state = state.ManagerState(state.MANAGER_STATE_PREFIX) self.state.reset() async_to_sync(consumer.run_consumer)(timeout=1) async_to_sync(self.sync_counter.reset)()
Reset the shared state and drain Django Channels. :param keep_state: If ``True``, do not reset the shared manager state (useful in tests, where the settings overrides need to be kept). Defaults to ``False``.
def match(self, **kwargs): if kwargs: if self.definition.get('model') is None: raise ValueError("match() with filter only available on relationships with a model") output = process_filter_args(self.definition['model'], kwargs) if output: self.filters.append(output) return self
Traverse relationships with properties matching the given parameters. e.g: `.match(price__lt=10)` :param kwargs: see `NodeSet.filter()` for syntax :return: self
def for_window(cls, window): utcnow = datetime.datetime.utcnow() return cls(utcnow - window, 0)
Given a timedelta window, return a timestamp representing that time.
def color_split_position(self): return self.get_text_width(' ') + self.label_width + \ int(float(self.font_width) * float(self.num_padding_chars))
The SVG x position where the color split should occur.
def kick(self, channel, nick, message=None): self.send("KICK", channel, nick, ":%s" % (message or self.user.nick))
Attempt to kick a user from a channel. If a message is not provided, defaults to own nick.
def itervalues(self, key_type=None): if(key_type is not None): intermediate_key = str(key_type) if intermediate_key in self.__dict__: for direct_key in self.__dict__[intermediate_key].values(): yield self.items_dict[direct_key] else: for value in self.items_dict.values(): yield value
Returns an iterator over the dictionary's values. @param key_type if specified, iterator will be returning only values pointed by keys of this type. Otherwise (if not specified) all values in this dictinary will be generated.
def extract_mean_or_value(cls, obs_or_pred, key=None): result = None if not isinstance(obs_or_pred, dict): result = obs_or_pred else: keys = ([key] if key is not None else []) + ['mean', 'value'] for k in keys: if k in obs_or_pred: result = obs_or_pred[k] break if result is None: raise KeyError(("%s has neither a mean nor a single " "value" % obs_or_pred)) return result
Extracts the mean, value, or user-provided key from an observation or prediction dictionary.
def subprocess_manager(self, exec_args): try: sp = gevent.subprocess.Popen(exec_args, stdout=gevent.subprocess.PIPE, stderr=gevent.subprocess.PIPE) except OSError: raise RuntimeError('Could not run bro executable (either not installed or not in path): %s' % (exec_args)) out, err = sp.communicate() if out: print 'standard output of subprocess: %s' % out if err: raise RuntimeError('%s\npcap_bro had output on stderr: %s' % (exec_args, err)) if sp.returncode: raise RuntimeError('%s\npcap_bro had returncode: %d' % (exec_args, sp.returncode))
Bro subprocess manager
def _whatsnd(data): hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None
Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix.
def __set_default_ui_state(self, *args): LOGGER.debug("> Setting default View state!") if not self.model(): return self.expandAll() for column in range(len(self.model().horizontal_headers)): self.resizeColumnToContents(column)
Sets the Widget default ui state. :param \*args: Arguments. :type \*args: \*
def describe(self): desc = { 'name': self.name, 'description': self.description, 'type': self.type or 'unknown', } for attr in ['min', 'max', 'allowed', 'default']: v = getattr(self, attr) if v is not None: desc[attr] = v return desc
Information about this parameter
def get(self, name, default=None): session = self.__get_session_from_db() return session.get(name, default)
Gets the object for "name", or None if there's no such object. If "default" is provided, return it if no object is found.
def get_instance(page_to_crawl): global _instances if isinstance(page_to_crawl, basestring): uri = page_to_crawl page_to_crawl = crawlpage.get_instance(uri) elif isinstance(page_to_crawl, crawlpage.CrawlPage): uri = page_to_crawl.uri else: raise TypeError( "get_instance() expects a parker.CrawlPage " "or basestring derivative." ) try: instance = _instances[uri] except KeyError: instance = CrawlModel(page_to_crawl) _instances[uri] = instance return instance
Return an instance of CrawlModel.
def clean(self, value): if ( self.base_type is not None and value is not None and not isinstance(value, self.base_type) ): if isinstance(self.base_type, tuple): allowed_types = [typ.__name__ for typ in self.base_type] allowed_types_text = ' or '.join(allowed_types) else: allowed_types_text = self.base_type.__name__ err_msg = 'Value must be of %s type.' % allowed_types_text raise ValidationError(err_msg) if not self.has_value(value): if self.default is not None: raise StopValidation(self.default) if self.required: raise ValidationError('This field is required.') else: raise StopValidation(self.blank_value) return value
Take a dirty value and clean it.
def CaptureVariablesList(self, items, depth, empty_message, limits): v = [] for name, value in items: if (self._total_size >= self.max_size) or ( len(v) >= limits.max_list_items): v.append({ 'status': { 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': ('Only first $0 items were captured. Use in an ' 'expression to see all items.'), 'parameters': [str(len(v))]}}}) break v.append(self.CaptureNamedVariable(name, value, depth, limits)) if not v: return [{'status': { 'refersTo': 'VARIABLE_NAME', 'description': {'format': empty_message}}}] return v
Captures list of named items. Args: items: iterable of (name, value) tuples. depth: nested depth of dictionaries and vectors for items. empty_message: info status message to set if items is empty. limits: Per-object limits for capturing variable data. Returns: List of formatted variable objects.
def parse_config(args): config_path = path.expanduser(args.config_file) if not path.exists(config_path): if args.config_file != DEFAULT_JOURNAL_RC: print("journal: error: config file '" + args.config_file + "' not found") sys.exit() else: return DEFAULT_JOURNAL config = ConfigParser.SafeConfigParser({ 'journal':{'default':'__journal'}, '__journal':{'location':DEFAULT_JOURNAL} }) config.read(config_path) journal_location = config.get(config.get('journal', 'default'), 'location'); if args.journal: journal_location = config.get(args.journal, 'location'); return journal_location
Try to load config, to load other journal locations Otherwise, return default location Returns journal location
def _prune_hit(hit, model): hit_id = hit["_id"] hit_index = hit["_index"] if model.objects.in_search_queryset(hit_id, index=hit_index): logger.debug( "%s with id=%s exists in the '%s' index queryset.", model, hit_id, hit_index ) return None else: logger.debug( "%s with id=%s does not exist in the '%s' index queryset and will be pruned.", model, hit_id, hit_index, ) return model(pk=hit_id)
Check whether a document should be pruned. This method uses the SearchDocumentManagerMixin.in_search_queryset method to determine whether a 'hit' (search document) should be pruned from an index, and if so it returns the hit as a Django object(id=hit_id). Args: hit: dict object the represents a document as returned from the scan_index function. (Contains object id and index.) model: the Django model (not object) from which the document was derived. Used to get the correct model manager and bulk action. Returns: an object of type model, with id=hit_id. NB this is not the object itself, which by definition may not exist in the underlying database, but a temporary object with the document id - which is enough to create a 'delete' action.
def delete_group(self, group_id, keep_non_orphans=False, keep_orphans=False): params = {'keepNonOrphans': str(keep_non_orphans).lower(), 'keepOrphans': str(keep_orphans).lower()} self._delete(self._service_url(['triggers', 'groups', group_id], params=params))
Delete a group trigger :param group_id: ID of the group trigger to delete :param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers :param keep_orphans: if True converts the orphan member triggers to standard triggers
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool: pred_ind1 = get_predicate_indices(tags1) pred_ind2 = get_predicate_indices(tags2) return any(set.intersection(set(pred_ind1), set(pred_ind2)))
Tests whether the predicate in BIO tags1 overlap with those of tags2.
def calibrate(self): if self._driver and self._driver.is_connected(): self._driver.probe_plate() self._engaged = False
Calibration involves probing for top plate to get the plate height
def match_level(self, overlay): slice_width = len(self._pattern_spec) if slice_width > len(overlay): return None best_lvl, match_slice = (0, None) for i in range(len(overlay)-slice_width+1): overlay_slice = overlay.values()[i:i+slice_width] lvl = self._slice_match_level(overlay_slice) if lvl is None: continue if lvl > best_lvl: best_lvl = lvl match_slice = (i, i+slice_width) return (best_lvl, match_slice) if best_lvl != 0 else None
Given an overlay, return the match level and applicable slice of the overall overlay. The level an integer if there is a match or None if there is no match. The level integer is the number of matching components. Higher values indicate a stronger match.
def drop_nan(self, col: str=None, method: str="all", **kwargs): try: if col is None: self.df = self.df.dropna(how=method, **kwargs) else: self.df = self.df[self.df[col].notnull()] except Exception as e: self.err(e, "Error dropping nan values")
Drop rows with NaN values from the main dataframe :param col: name of the column, defaults to None. Drops in all columns if no value is provided :type col: str, optional :param method: ``how`` param for ``df.dropna``, defaults to "all" :type method: str, optional :param \*\*kwargs: params for ``df.dropna`` :type \*\*kwargs: optional :example: ``ds.drop_nan("mycol")``
def load(self, mdl_file): import dill as pickle mdl_file_e = op.expanduser(mdl_file) sv = pickle.load(open(mdl_file_e, "rb")) self.mdl = sv["mdl"] self.modelparams.update(sv["modelparams"]) logger.debug("loaded model from path: " + mdl_file_e)
load model from file. fv_type is not set with this function. It is expected to set it before.
def home_wins(self): try: wins, losses = re.findall(r'\d+', self._home_record) return wins except ValueError: return 0
Returns an ``int`` of the number of games the home team won after the conclusion of the game.
def cli_opts(): parser = argparse.ArgumentParser() parser.add_argument( "--homeassistant-config", type=str, required=False, dest="config", help="Create configuration section for home assistant",) parser.add_argument( "-f", "--filter", type=str, required=False, dest="filter", help="Ignore events related with these devices",) parser.add_argument( "-o", "--output", type=str, required=False, dest="output", help="Send output to file",) parser.add_argument( "-v", "--verbose", action="store_true", dest="verbose", help="Verbose output",) parser.add_argument('device') return parser.parse_args()
Handle the command line options
def handle_triple(self, lhs, relation, rhs): relation = relation.replace(':', '', 1) if self.is_relation_inverted(relation): source, target, inverted = rhs, lhs, True relation = self.invert_relation(relation) else: source, target, inverted = lhs, rhs, False source = _default_cast(source) target = _default_cast(target) if relation == '': relation = None return Triple(source, relation, target, inverted)
Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object.
def list_vrf(self, auth, spec=None): if spec is None: spec = {} self._logger.debug("list_vrf called; spec: %s" % unicode(spec)) sql = "SELECT * FROM ip_net_vrf" params = list() if spec is not None and not {}: where, params = self._expand_vrf_spec(spec) if len(params) > 0: sql += " WHERE " + where sql += " ORDER BY vrf_rt_order(rt) NULLS FIRST" self._execute(sql, params) res = list() for row in self._curs_pg: res.append(dict(row)) return res
Return a list of VRFs matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [vrf_spec] A VRF specification. If omitted, all VRFs are returned. Returns a list of dicts. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.list_vrf` for full understanding.
def _get_internal_field_by_name(self, name): field = self._all_fields.get(name, self._all_fields.get('%s.%s' % (self._full_name, name))) if field is not None: return field for field_name in self._all_fields: if field_name.endswith('.%s' % name): return self._all_fields[field_name]
Gets the field by name, or None if not found.
def _ProcessArtifactFilesSource(self, source): if source.path_type != rdf_paths.PathSpec.PathType.OS: raise ValueError("Only supported path type is OS.") paths = [] pathspec_attribute = source.base_source.attributes.get("pathspec_attribute") for source_result_list in self._ProcessSources( source.artifact_sources, parser_factory=None): for response in source_result_list: path = _ExtractPath(response, pathspec_attribute) if path is not None: paths.append(path) file_finder_action = rdf_file_finder.FileFinderAction.Download() request = rdf_file_finder.FileFinderArgs( paths=paths, pathtype=source.path_type, action=file_finder_action) action = file_finder.FileFinderOSFromClient yield action, request
Get artifact responses, extract paths and send corresponding files.
def _read_depth_images(self, num_images): depth_images = self._ros_read_images(self._depth_image_buffer, num_images, self.staleness_limit) for i in range(0, num_images): depth_images[i] = depth_images[i] * MM_TO_METERS if self._flip_images: depth_images[i] = np.flipud(depth_images[i]) depth_images[i] = np.fliplr(depth_images[i]) depth_images[i] = DepthImage(depth_images[i], frame=self._frame) return depth_images
Reads depth images from the device
def get(self, id_): if self.api.queue_exists(id_): return Queue(self, {"queue": {"name": id_, "id_": id_}}, key="queue") raise exc.NotFound("The queue '%s' does not exist." % id_)
Need to customize, since Queues are not returned with normal response bodies.
def get_text(self, text): if sys.maxunicode == 0xffff: return text[self.offset:self.offset + self.length] if not isinstance(text, bytes): entity_text = text.encode('utf-16-le') else: entity_text = text entity_text = entity_text[self.offset * 2:(self.offset + self.length) * 2] return entity_text.decode('utf-16-le')
Get value of entity :param text: full text :return: part of text
def graph_from_edges(edge_list, node_prefix='', directed=False): if edge_list is None: edge_list = [] graph_type = "digraph" if directed else "graph" with_prefix = functools.partial("{0}{1}".format, node_prefix) graph = Dot(graph_type=graph_type) for src, dst in edge_list: src = with_prefix(src) dst = with_prefix(dst) graph.add_edge(Edge(src, dst)) return graph
Creates a basic graph out of an edge list. The edge list has to be a list of tuples representing the nodes connected by the edge. The values can be anything: bool, int, float, str. If the graph is undirected by default, it is only calculated from one of the symmetric halves of the matrix.
def setServer(self, server): if server == 'live': self.__server__ = server self.__server_url__ = 'api.sense-os.nl' self.setUseHTTPS() return True elif server == 'dev': self.__server__ = server self.__server_url__ = 'api.dev.sense-os.nl' self.setUseHTTPS(False) return True elif server == 'rc': self.__server__ = server self.__server_url__ = 'api.rc.dev.sense-os.nl' self.setUseHTTPS(False) else: return False
Set server to interact with. @param server (string) - 'live' for live server, 'dev' for test server, 'rc' for release candidate @return (boolean) - Boolean indicating whether setServer succeeded
def _GetNextPath(self): paths = sorted(path for path in io_wrapper.ListDirectoryAbsolute(self._directory) if self._path_filter(path)) if not paths: return None if self._path is None: return paths[0] if not io_wrapper.IsCloudPath(paths[0]) and not self._ooo_writes_detected: current_path_index = bisect.bisect_left(paths, self._path) ooo_check_start = max(0, current_path_index - self._OOO_WRITE_CHECK_COUNT) for path in paths[ooo_check_start:current_path_index]: if self._HasOOOWrite(path): self._ooo_writes_detected = True break next_paths = list(path for path in paths if self._path is None or path > self._path) if next_paths: return min(next_paths) else: return None
Gets the next path to load from. This function also does the checking for out-of-order writes as it iterates through the paths. Returns: The next path to load events from, or None if there are no more paths.
def get_content(self, key): LOGGER.debug("> Retrieving '{0}' content from the cache.".format(self.__class__.__name__, key)) return self.get(key)
Gets given content from the cache. Usage:: >>> cache = Cache() >>> cache.add_content(John="Doe", Luke="Skywalker") True >>> cache.get_content("Luke") 'Skywalker' :param key: Content to retrieve. :type key: object :return: Content. :rtype: object
def construct_makeblastdb_cmd( filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT ): title = os.path.splitext(os.path.split(filename)[-1])[0] outfilename = os.path.join(outdir, os.path.split(filename)[-1]) return ( "{0} -dbtype nucl -in {1} -title {2} -out {3}".format( blastdb_exe, filename, title, outfilename ), outfilename, )
Returns a single makeblastdb command. - filename - input filename - blastdb_exe - path to the makeblastdb executable
def initialize_from_sql_cursor(self, sqlcursor): tuples = 0 data = sqlcursor.fetchmany() while 0 < len(data): for entry in data: self.add_entry(str(entry[0]), entry[1]) data = sqlcursor.fetchmany() self._normalized = self._check_normalization return tuples
Initializes the TimeSeries's data from the given SQL cursor. You need to set the time stamp format using :py:meth:`TimeSeries.set_timeformat`. :param SQLCursor sqlcursor: Cursor that was holds the SQL result for any given "SELECT timestamp, value, ... FROM ..." SQL query. Only the first two attributes of the SQL result will be used. :return: Returns the number of entries added to the TimeSeries. :rtype: integer
def create_treeitem(self, ): p = self.get_parent() root = self.get_root() if p: pitem = p.get_treeitem() else: pitem = root.get_rootitem() idata = root.create_itemdata(self) item = TreeItem(idata, parent=pitem) return item
Create a new treeitem for this reftrack instance. .. Note:: Parent should be set, Parent should already have a treeitem. If there is no parent, the root tree item is used as parent for the treeitem. :returns: a new treeitem that contains a itemdata with the reftrack instanec. :rtype: :class:`TreeItem` :raises: None
def _ior(self, other): if not isinstance(other, _basebag): other = self._from_iterable(other) for elem, other_count in other.counts(): old_count = self.count(elem) new_count = max(other_count, old_count) self._set_count(elem, new_count) return self
Set multiplicity of each element to the maximum of the two collections. if isinstance(other, _basebag): This runs in O(other.num_unique_elements()) else: This runs in O(len(other))
def histogram_voltage(self, timestep=None, title=True, **kwargs): data = self.network.results.v_res() if title is True: if timestep is not None: title = "Voltage histogram for time step {}".format(timestep) else: title = "Voltage histogram \nfor time steps {} to {}".format( data.index[0], data.index[-1]) elif title is False: title = None plots.histogram(data=data, title=title, timeindex=timestep, **kwargs)
Plots histogram of voltages. For more information see :func:`edisgo.tools.plots.histogram`. Parameters ---------- timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional Specifies time step histogram is plotted for. If timestep is None all time steps voltages are calculated for are used. Default: None. title : :obj:`str` or :obj:`bool`, optional Title for plot. If True title is auto generated. If False plot has no title. If :obj:`str`, the provided title is used. Default: True.
def next_sibling(self): if self.parent is None: return None for i, child in enumerate(self.parent.children): if child is self: try: return self.parent.children[i+1] except IndexError: return None
The node immediately following the invocant in their parent's children list. If the invocant does not have a next sibling, it is None
def create_move(project, resource, offset=None): if offset is None: return MoveModule(project, resource) this_pymodule = project.get_pymodule(resource) pyname = evaluate.eval_location(this_pymodule, offset) if pyname is not None: pyobject = pyname.get_object() if isinstance(pyobject, pyobjects.PyModule) or \ isinstance(pyobject, pyobjects.PyPackage): return MoveModule(project, pyobject.get_resource()) if isinstance(pyobject, pyobjects.PyFunction) and \ isinstance(pyobject.parent, pyobjects.PyClass): return MoveMethod(project, resource, offset) if isinstance(pyobject, pyobjects.PyDefinedObject) and \ isinstance(pyobject.parent, pyobjects.PyModule) or \ isinstance(pyname, pynames.AssignedName): return MoveGlobal(project, resource, offset) raise exceptions.RefactoringError( 'Move only works on global classes/functions/variables, modules and ' 'methods.')
A factory for creating Move objects Based on `resource` and `offset`, return one of `MoveModule`, `MoveGlobal` or `MoveMethod` for performing move refactoring.
def _createJobStateFile(self): jobStateFile = os.path.join(self.localTempDir, '.jobState') jobState = {'jobPID': os.getpid(), 'jobName': self.jobName, 'jobDir': self.localTempDir, 'deferredFunctions': []} with open(jobStateFile + '.tmp', 'wb') as fH: dill.dump(jobState, fH) os.rename(jobStateFile + '.tmp', jobStateFile) return jobStateFile
Create the job state file for the current job and fill in the required values. :return: Path to the job state file :rtype: str
def calc_et0_v1(self): con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.et0[k] = (con.ke[k]*(((8.64*inp.glob+93.*con.kf[k]) * (flu.tkor[k]+22.)) / (165.*(flu.tkor[k]+123.) * (1.+0.00019*min(con.hnn[k], 600.)))))
Calculate reference evapotranspiration after Turc-Wendling. Required control parameters: |NHRU| |KE| |KF| |HNN| Required input sequence: |Glob| Required flux sequence: |TKor| Calculated flux sequence: |ET0| Basic equation: :math:`ET0 = KE \\cdot \\frac{(8.64 \\cdot Glob+93 \\cdot KF) \\cdot (TKor+22)} {165 \\cdot (TKor+123) \\cdot (1 + 0.00019 \\cdot min(HNN, 600))}` Example: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(3) >>> ke(1.1) >>> kf(0.6) >>> hnn(200.0, 600.0, 1000.0) >>> inputs.glob = 200.0 >>> fluxes.tkor = 15.0 >>> model.calc_et0_v1() >>> fluxes.et0 et0(3.07171, 2.86215, 2.86215)
def update_input(filelist, ivmlist=None, removed_files=None): newfilelist = [] if removed_files == []: return filelist, ivmlist else: sci_ivm = list(zip(filelist, ivmlist)) for f in removed_files: result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ] ivmlist = [el[1] for el in sci_ivm] newfilelist = [el[0] for el in sci_ivm] return newfilelist, ivmlist
Removes files flagged to be removed from the input filelist. Removes the corresponding ivm files if present.
def seq_md5(seq, normalize=True): seq = normalize_sequence(seq) if normalize else seq bseq = seq.encode("ascii") return hashlib.md5(bseq).hexdigest()
returns unicode md5 as hex digest for sequence `seq`. >>> seq_md5('') 'd41d8cd98f00b204e9800998ecf8427e' >>> seq_md5('ACGT') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5('ACGT*') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5(' A C G T ') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5('acgt') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5('acgt', normalize=False) 'db516c3913e179338b162b2476d1c23f'
def set_path(self, file_path): if not file_path: self.read_data = self.memory_read self.write_data = self.memory_write elif not is_valid(file_path): self.write_data(file_path, {}) self.path = file_path
Set the path of the database. Create the file if it does not exist.
def close(self): self._serial.write(b"@c") self._serial.read() self._serial.close()
Closes the connection to the serial port and ensure no pending operatoin are left
def get_hoisted(dct, child_name): child = dct[child_name] del dct[child_name] dct.update(child) return dct
Pulls all of a child's keys up to the parent, with the names unchanged.
def exec_func(code, glob_vars, loc_vars=None): if loc_vars is None: exec(code, glob_vars) else: exec(code, glob_vars, loc_vars)
Wrapper around exec.
def validate_root_vertex_directives(root_ast): directives_present_at_root = set() for directive_obj in root_ast.directives: directive_name = directive_obj.name.value if is_filter_with_outer_scope_vertex_field_operator(directive_obj): raise GraphQLCompilationError(u'Found a filter directive with an operator that is not' u'allowed on the root vertex: {}'.format(directive_obj)) directives_present_at_root.add(directive_name) disallowed_directives = directives_present_at_root & VERTEX_DIRECTIVES_PROHIBITED_ON_ROOT if disallowed_directives: raise GraphQLCompilationError(u'Found prohibited directives on root vertex: ' u'{}'.format(disallowed_directives))
Validate the directives that appear at the root vertex field.
def _pretty_time_delta(td): seconds = td.total_seconds() sign_string = '-' if seconds < 0 else '' seconds = abs(int(seconds)) days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) d = dict(sign=sign_string, days=days, hours=hours, minutes=minutes, seconds=seconds) if days > 0: return '{sign}{days}d{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d) elif hours > 0: return '{sign}{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d) elif minutes > 0: return '{sign}{minutes:02d}m:{seconds:02d}s'.format(**d) else: return '{sign}{seconds:02d}s'.format(**d)
Creates a string representation of a time delta. Parameters ---------- td : :class:`datetime.timedelta` Returns ------- pretty_formatted_datetime : str
def _FlushInput(self): self.ser.flush() flushed = 0 while True: ready_r, ready_w, ready_x = select.select([self.ser], [], [self.ser], 0) if len(ready_x) > 0: logging.error("Exception from serial port.") return None elif len(ready_r) > 0: flushed += 1 self.ser.read(1) self.ser.flush() else: break
Flush all read data until no more available.
def get_from_layer(self, name, layer=None): if name not in self._children: if self._frozen: raise KeyError(name) self._children[name] = ConfigTree(layers=self._layers) child = self._children[name] if isinstance(child, ConfigNode): return child.get_value(layer) else: return child
Get a configuration value from the named layer. Parameters ---------- name : str The name of the value to retrieve layer: str The name of the layer to retrieve the value from. If it is not supplied then the outermost layer in which the key is defined will be used.
def stats_add_duration(self, key, duration): if not self._measurement: if not self.IGNORE_OOB_STATS: self.logger.warning( 'stats_add_timing invoked outside execution') return self._measurement.add_duration(key, duration)
Add a duration to the per-message measurements .. versionadded:: 3.19.0 .. note:: If this method is called when there is not a message being processed, a message will be logged at the ``warning`` level to indicate the value is being dropped. To suppress these warnings, set the :attr:`~rejected.consumer.Consumer.IGNORE_OOB_STATS` attribute to :data:`True`. :param key: The key to add the timing to :type key: :class:`str` :param duration: The timing value in seconds :type duration: :class:`int` or :class:`float`
def humanize_bytes(bytesize, precision=2): abbrevs = ( (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'bytes') ) if bytesize == 1: return '1 byte' for factor, suffix in abbrevs: if bytesize >= factor: break if factor == 1: precision = 0 return '%.*f %s' % (precision, bytesize / float(factor), suffix)
Humanize byte size figures https://gist.github.com/moird/3684595
def photos_search(user_id='', auth=False, tags='', tag_mode='', text='',\ min_upload_date='', max_upload_date='',\ min_taken_date='', max_taken_date='', \ license='', per_page='', page='', sort=''): method = 'flickr.photos.search' data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\ min_upload_date=min_upload_date,\ max_upload_date=max_upload_date, \ min_taken_date=min_taken_date, \ max_taken_date=max_taken_date, \ license=license, per_page=per_page,\ page=page, sort=sort) photos = [] if isinstance(data.rsp.photos.photo, list): for photo in data.rsp.photos.photo: photos.append(_parse_photo(photo)) else: photos = [_parse_photo(data.rsp.photos.photo)] return photos
Returns a list of Photo objects. If auth=True then will auth the user. Can see private etc
def get_name(node): if isinstance(node, gast.Name): return node.id elif isinstance(node, (gast.Subscript, gast.Attribute)): return get_name(node.value) else: raise TypeError
Get the name of a variable. Args: node: A `Name`, `Subscript` or `Attribute` node. Returns: The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
def download_artifact_bundle(self, id_or_uri, file_path): uri = self.DOWNLOAD_PATH + '/' + extract_id_from_uri(id_or_uri) return self._client.download(uri, file_path)
Download the Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. file_path(str): Destination file path. Returns: bool: Successfully downloaded.
def update(self, data): self.name = data["name"] self.description = data['description'] self.win_index = data['win_index'] if conf.use_winpcapy: self._update_pcapdata() try: self.ip = socket.inet_ntoa(get_if_raw_addr(data['guid'])) except (KeyError, AttributeError, NameError): pass try: self.mac = data['mac'] except KeyError: pass
Update info about network interface according to given dnet dictionary
def live_scores(self, live_scores): headers = ['League', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] result.extend([game['league'], game['homeTeamName'], game['goalsHomeTeam'], game['goalsAwayTeam'], game['awayTeamName']] for game in live_scores['games']) self.generate_output(result)
Store output of live scores to a CSV file
def allpathsX(args): p = OptionParser(allpathsX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): assemble_pairs(p, pf, tag)
%prog allpathsX folder tag Run assembly on a folder of paired reads and apply tag (PE-200, PE-500). Allow multiple tags separated by comma, e.g. PE-350,TT-1050
def stop_gradient(input_layer): if input_layer.is_sequence(): result = [tf.stop_gradient(t) for t in input_layer.sequence] return input_layer.with_sequence(result) else: return tf.stop_gradient(input_layer)
Cuts off the gradient at this point. This works on both sequence and regular Pretty Tensors. Args: input_layer: The input. Returns: A new Pretty Tensor of the same type with stop_gradient applied.
def gff(args): p = OptionParser(gff.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gbkfile, = args MultiGenBank(gbkfile)
%prog gff seq.gbk Convert Genbank file to GFF and FASTA file. The Genbank file can contain multiple records.
def validate_output(schema): location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_response(fn, schema) record_schemas( fn, wrapper, location, response_schema=sort_schema(schema)) return wrapper return decorator
Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python list or dictionary. For example:: from snapstore_schemas import validate_output @validate_output({ 'type': 'object', 'properties': { 'ok': {'type': 'boolean'}, }, 'required': ['ok'], 'additionalProperties': False } def my_flask_view(): # view code here return {'ok': True} Every view response will be evaluated against the schema. Any that do not comply with the schema will cause DataValidationError to be raised.
def _should_fuzz_node(self, fuzz_node, stage): if stage == ClientFuzzer.STAGE_ANY: return True if fuzz_node.name.lower() == stage.lower(): if self._index_in_path == len(self._fuzz_path) - 1: return True else: return False
The matching stage is either the name of the last node, or ClientFuzzer.STAGE_ANY. :return: True if we are in the correct model node
def set_stop_chars(self, stop_chars): warnings.warn("Method set_stop_chars is deprecated, " "use `set_stop_chars_left` or " "`set_stop_chars_right` instead", DeprecationWarning) self._stop_chars = set(stop_chars) self._stop_chars_left = self._stop_chars self._stop_chars_right = self._stop_chars
Set stop characters used when determining end of URL. .. deprecated:: 0.7 Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right` instead. :param list stop_chars: list of characters
def show(self, frame): if len(frame.shape) != 3: raise ValueError('frame should have shape with only 3 dimensions') if not self.is_open: self.open() self._window.clear() self._window.switch_to() self._window.dispatch_events() image = ImageData( frame.shape[1], frame.shape[0], 'RGB', frame.tobytes(), pitch=frame.shape[1]*-3 ) image.blit(0, 0, width=self._window.width, height=self._window.height) self._window.flip()
Show an array of pixels on the window. Args: frame (numpy.ndarray): the frame to show on the window Returns: None
def update(self, **kwargs): if self.condition is not None: self.result = self.do_(self.model.table.update().where(self.condition).values(**kwargs)) else: self.result = self.do_(self.model.table.update().values(**kwargs)) return self.result
Execute update table set field = field+1 like statement
def mkdir(dir_path): if not os.path.isdir(dir_path) or not os.path.exists(dir_path): os.makedirs(dir_path)
Make directory if not existed
def _remove_last(votes, fpl, cl, ranking): for v in votes: for r in v: if r == fpl[-1]: v.remove(r) for c in cl: if c == fpl[-1]: if c not in ranking: ranking.append((c, len(ranking) + 1))
Remove last candidate in IRV voting.
def calldata(vcf_fn, region=None, samples=None, ploidy=2, fields=None, exclude_fields=None, dtypes=None, arities=None, fills=None, vcf_types=None, count=None, progress=0, logstream=None, condition=None, slice_args=None, verbose=True, cache=False, cachedir=None, skip_cached=False, compress_cache=False, truncate=True): loader = _CalldataLoader(vcf_fn, region=region, samples=samples, ploidy=ploidy, fields=fields, exclude_fields=exclude_fields, dtypes=dtypes, arities=arities, fills=fills, vcf_types=vcf_types, count=count, progress=progress, logstream=logstream, condition=condition, slice_args=slice_args, verbose=verbose, cache=cache, cachedir=cachedir, skip_cached=skip_cached, compress_cache=compress_cache, truncate=truncate) arr = loader.load() return arr
Load a numpy 1-dimensional structured array with data from the sample columns of a VCF file. Parameters ---------- vcf_fn: string or list Name of the VCF file or list of file names. region: string Region to extract, e.g., 'chr1' or 'chr1:0-100000'. fields: list or array-like List of fields to extract from the VCF. exclude_fields: list or array-like Fields to exclude from extraction. dtypes: dict or dict-like Dictionary cotaining dtypes to use instead of the default inferred ones arities: dict or dict-like Override the amount of values to expect. fills: dict or dict-like Dictionary containing field:fillvalue mappings used to override the default fill in values in VCF fields. vcf_types: dict or dict-like Dictionary containing field:string mappings used to override any bogus type declarations in the VCF header. count: int Attempt to extract a specific number of records. progress: int If greater than 0, log parsing progress. logstream: file or file-like object Stream to use for logging progress. condition: array Boolean array defining which rows to load. slice_args: tuple or list Slice of the underlying iterator, e.g., (0, 1000, 10) takes every 10th row from the first 1000. verbose: bool Log more messages. cache: bool If True, save the resulting numpy array to disk, and load from the cache if present rather than rebuilding from the VCF. cachedir: string Manually specify the directory to use to store cache files. skip_cached: bool If True and cache file is fresh, do not load and return None. compress_cache: bool, optional If True, compress the cache file. truncate: bool, optional If True (default) only include variants whose start position is within the given region. If False, use default tabix behaviour. Examples -------- >>> from vcfnp import calldata, view2d >>> c = calldata('fixture/sample.vcf') >>> c array([ ((True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])), ((True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])), ((True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [1, 0], 8, 48, b'1|0', [51, 51]), (True, False, [1, 1], 5, 43, b'1/1', [0, 0])), ((True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [0, 1], 5, 3, b'0|1', [65, 3]), (True, False, [0, 0], 3, 41, b'0/0', [0, 0])), ((True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [2, 1], 0, 2, b'2|1', [18, 2]), (True, False, [2, 2], 4, 35, b'2/2', [0, 0])), ((True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, True, [0, 0], 4, 48, b'0|0', [51, 51]), (True, False, [0, 0], 2, 61, b'0/0', [0, 0])), ((True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 2], 2, 17, b'0/2', [0, 0]), (False, False, [-1, -1], 3, 40, b'./.', [0, 0])), ((True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, True, [0, 0], 0, 0, b'0|0', [0, 0]), (False, False, [-1, -1], 0, 0, b'./.', [0, 0])), ((True, False, [0, -1], 0, 0, b'0', [0, 0]), (True, False, [0, 1], 0, 0, b'0/1', [0, 0]), (True, True, [0, 2], 0, 0, b'0|2', [0, 0]))], dtype=[('NA00001', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]), ('NA00002', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]), ('NA00003', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))])]) >>> c['NA00001'] array([(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, False, [0, -1], 0, 0, b'0', [0, 0])], dtype=[('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]) >>> c2d = view2d(c) >>> c2d array([[(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])], [(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])], [(True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [1, 0], 8, 48, b'1|0', [51, 51]), (True, False, [1, 1], 5, 43, b'1/1', [0, 0])], [(True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [0, 1], 5, 3, b'0|1', [65, 3]), (True, False, [0, 0], 3, 41, b'0/0', [0, 0])], [(True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [2, 1], 0, 2, b'2|1', [18, 2]), (True, False, [2, 2], 4, 35, b'2/2', [0, 0])], [(True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, True, [0, 0], 4, 48, b'0|0', [51, 51]), (True, False, [0, 0], 2, 61, b'0/0', [0, 0])], [(True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 2], 2, 17, b'0/2', [0, 0]), (False, False, [-1, -1], 3, 40, b'./.', [0, 0])], [(True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, True, [0, 0], 0, 0, b'0|0', [0, 0]), (False, False, [-1, -1], 0, 0, b'./.', [0, 0])], [(True, False, [0, -1], 0, 0, b'0', [0, 0]), (True, False, [0, 1], 0, 0, b'0/1', [0, 0]), (True, True, [0, 2], 0, 0, b'0|2', [0, 0])]], dtype=[('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]) >>> c2d['genotype'] array([[[ 0, 0], [ 0, 0], [ 0, 1]], [[ 0, 0], [ 0, 0], [ 0, 1]], [[ 0, 0], [ 1, 0], [ 1, 1]], [[ 0, 0], [ 0, 1], [ 0, 0]], [[ 1, 2], [ 2, 1], [ 2, 2]], [[ 0, 0], [ 0, 0], [ 0, 0]], [[ 0, 1], [ 0, 2], [-1, -1]], [[ 0, 0], [ 0, 0], [-1, -1]], [[ 0, -1], [ 0, 1], [ 0, 2]]], dtype=int8) >>> c2d['genotype'][3, :] array([[0, 0], [0, 1], [0, 0]], dtype=int8)
def flags(self, index): activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable) item = self.item(index) column = index.column() if column > 0 and not item.childCount(): activeFlags = activeFlags | Qt.ItemIsEditable return activeFlags
Return the active flags for the given index. Add editable flag to items other than the first column.
def _check_minions_directories(pki_dir): minions_accepted = os.path.join(pki_dir, salt.key.Key.ACC) minions_pre = os.path.join(pki_dir, salt.key.Key.PEND) minions_rejected = os.path.join(pki_dir, salt.key.Key.REJ) minions_denied = os.path.join(pki_dir, salt.key.Key.DEN) return minions_accepted, minions_pre, minions_rejected, minions_denied
Return the minion keys directory paths. This function is a copy of salt.key.Key._check_minions_directories.