code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def on_save(self, event): dlg = wx.FileDialog(None, self.settings.get_title(), '', "", '*.*', wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if dlg.ShowModal() == wx.ID_OK: self.settings.save(dlg.GetPath())
called on save button
def _ewp_flags_set(self, ewp_dic_subset, project_dic, flag_type, flag_dic): try: if flag_type in project_dic['misc'].keys(): index_option = self._get_option(ewp_dic_subset, flag_dic['enable']) self._set_option(ewp_dic_subset[index_option], '1') index_option = self._get_option(ewp_dic_subset, flag_dic['set']) if type(ewp_dic_subset[index_option]['state']) != list: previous_state = ewp_dic_subset[index_option]['state'] ewp_dic_subset[index_option]['state'] = [] ewp_dic_subset[index_option]['state'].append(previous_state) for item in project_dic['misc'][flag_type]: ewp_dic_subset[index_option]['state'].append(item) except KeyError: return
Flags from misc to set to ewp project
def getGenomeList() : import rabaDB.filters as rfilt f = rfilt.RabaQuery(Genome_Raba) names = [] for g in f.iterRun() : names.append(g.name) return names
Return the names of all imported genomes
def fit(fqdn, result, *argl, **argd): global _machines out = None if len(argl) > 0: machine = argl[0] key = id(machine) _machines[key] = (machine, argl[0], argl[1]) if isclassifier(machine): out = classify_fit(fqdn, result, *argl, **argd) elif isregressor(machine): out = regress_fit(fqdn, result, *argl, **argd) return out
Analyzes the result of a generic fit operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
def save_assessment_offered(self, assessment_offered_form, *args, **kwargs): if assessment_offered_form.is_for_update(): return self.update_assessment_offered(assessment_offered_form, *args, **kwargs) else: return self.create_assessment_offered(assessment_offered_form, *args, **kwargs)
Pass through to provider AssessmentOfferedAdminSession.update_assessment_offered
def pop_events(self, regex_pattern, timeout): if not self.started: raise IllegalStateError( "Dispatcher needs to be started before popping.") deadline = time.time() + timeout while True: results = self._match_and_pop(regex_pattern) if len(results) != 0 or time.time() > deadline: break time.sleep(1) if len(results) == 0: raise queue.Empty('Timeout after {}s waiting for event: {}'.format( timeout, regex_pattern)) return sorted(results, key=lambda event: event['time'])
Pop events whose names match a regex pattern. If such event(s) exist, pop one event from each event queue that satisfies the condition. Otherwise, wait for an event that satisfies the condition to occur, with timeout. Results are sorted by timestamp in ascending order. Args: regex_pattern: The regular expression pattern that an event name should match in order to be popped. timeout: Number of seconds to wait for events in case no event matching the condition exits when the function is called. Returns: Events whose names match a regex pattern. Empty if none exist and the wait timed out. Raises: IllegalStateError: Raised if pop is called before the dispatcher starts polling. queue.Empty: Raised if no event was found before time out.
def assert_looks_like(first, second, msg=None): first = _re.sub("\s+", " ", first.strip()) second = _re.sub("\s+", " ", second.strip()) if first != second: raise AssertionError(msg or "%r does not look like %r" % (first, second))
Compare two strings if all contiguous whitespace is coalesced.
def _collect_infos(dirname): for r, _ds, fs in walk(dirname): if not islink(r) and r != dirname: i = ZipInfo() i.filename = join(relpath(r, dirname), "") i.file_size = 0 i.compress_size = 0 i.CRC = 0 yield i.filename, i for f in fs: df = join(r, f) relfn = relpath(join(r, f), dirname) if islink(df): pass elif isfile(df): i = ZipInfo() i.filename = relfn i.file_size = getsize(df) i.compress_size = i.file_size i.CRC = file_crc32(df) yield i.filename, i else: pass
Utility function used by ExplodedZipFile to generate ZipInfo entries for all of the files and directories under dirname
def needs_fully_loaded(method): @functools.wraps(method) def inner(self, *args, **kwargs): if not self.fully_loaded: loaded_yaml = yaml_loader.YamlLoader.load_yaml_by_path(self.path) self.parsed_yaml = loaded_yaml self.fully_loaded = True return method(self, *args, **kwargs) return inner
Wraps all publicly callable methods of YamlAssistant. If the assistant was loaded from cache, this decorator will fully load it first time a publicly callable method is used.
def _extract_subdomain(host): host = host.split(':')[0] try: socket.inet_aton(host) except socket.error: return '.'.join(host.split('.')[:-2])
Returns a subdomain from a host. This host is typically the HTTP_HOST request envvar. If the host is an IP address, `None` is returned :param host: Request's target host
def get_cur_file_size(fp, position_to_eof=False): if not position_to_eof: cur_pos = fp.tell() fp.seek(0, os.SEEK_END) cur_file_size = fp.tell() if not position_to_eof: fp.seek(cur_pos, os.SEEK_SET) return cur_file_size
Returns size of file, optionally leaving fp positioned at EOF.
def _get_local_files(local_dir, pattern=''): local_files = {} if pattern: cwd = os.getcwd() os.chdir(local_dir) patterns = pattern.split('|') local_list = set([]) for p in patterns: local_list = local_list | set(glob(p)) for path in local_list: dir, file = os.path.split(path) if os.path.isfile(path): local_files[dir] = local_files.get(dir,[])+[file] elif os.path.isdir(path): local_files[file] = local_files.get(dir,[]) os.chdir(cwd) return local_files
Returns a dictionary with directories as keys, and filenames as values for filenames matching the glob ``pattern`` under the ``local_dir`` ``pattern can contain the Boolean OR | to evaluated multiple patterns into a combined set.
def _sort_policy(doc): if isinstance(doc, list): return sorted([_sort_policy(i) for i in doc]) elif isinstance(doc, (dict, OrderedDict)): return dict([(k, _sort_policy(v)) for k, v in six.iteritems(doc)]) return doc
List-type sub-items in policies don't happen to be order-sensitive, but compare operations will render them unequal, leading to non-idempotent state runs. We'll sort any list-type subitems before comparison to reduce the likelihood of false negatives.
def parse (cls, line, lineno, log, cmddict=None): delay = -1 token = line.split()[0] start = line.find(token) pos = SeqPos(line, lineno, start + 1, start + len(token)) try: delay = float(token) except ValueError: msg = 'String "%s" could not be interpreted as a numeric time delay.' log.error(msg % token, pos) return cls(delay, pos)
Parses the SeqDelay from a line of text. Warning and error messages are logged via the SeqMsgLog log.
def to_dict(self, data=True): d = self.variable.to_dict(data=data) d.update({'coords': {}, 'name': self.name}) for k in self.coords: d['coords'][k] = self.coords[k].variable.to_dict(data=data) return d
Convert this xarray.DataArray into a dictionary following xarray naming conventions. Converts all variables and attributes to native Python objects. Useful for coverting to json. To avoid datetime incompatibility use decode_times=False kwarg in xarrray.open_dataset. Parameters ---------- data : bool, optional Whether to include the actual data in the dictionary. When set to False, returns just the schema. See also -------- DataArray.from_dict
def _init_cfg_interfaces(self, cb, intf_list=None, all_intf=True): if not all_intf: self.intf_list = intf_list else: self.intf_list = sys_utils.get_all_run_phy_intf() self.cb = cb self.intf_attr = {} self.cfg_lldp_interface_list(self.intf_list)
Configure the interfaces during init time.
def keys(self, namespace, prefix=None, limit=None, offset=None): params = [namespace] query = 'SELECT key FROM gauged_keys WHERE namespace = %s' if prefix is not None: query += ' AND key LIKE %s' params.append(prefix + '%') if limit is not None: query += ' LIMIT %s' params.append(limit) if offset is not None: query += ' OFFSET %s' params.append(offset) cursor = self.cursor cursor.execute(query, params) return [key for key, in cursor]
Get keys from a namespace
def _split_ns_command(cmd_token): namespace = None cmd_split = cmd_token.split(".", 1) if len(cmd_split) == 1: command = cmd_split[0] else: namespace = cmd_split[0] command = cmd_split[1] if not namespace: namespace = "" return namespace.lower(), command.lower()
Extracts the name space and the command name of the given command token. :param cmd_token: The command token :return: The extracted (name space, command) tuple
def read(self, path, filename=None, offset=None, size=-1): storageScheme, key = self.getkey(path, filename=filename) if offset or (size > -1): if not offset: offset = 0 if size > -1: sizeStr = offset + size - 1 else: sizeStr = "" headers = {"Range": "bytes=%d-%s" % (offset, sizeStr)} return key.get_contents_as_string(headers=headers) else: return key.get_contents_as_string()
Read a file specified by path.
def load_shared_data(path: typing.Union[str, None]) -> dict: if path is None: return dict() if not os.path.exists(path): raise FileNotFoundError('No such shared data file "{}"'.format(path)) try: with open(path, 'r') as fp: data = json.load(fp) except Exception: raise IOError('Unable to read shared data file "{}"'.format(path)) if not isinstance(data, dict): raise ValueError('Shared data must load into a dictionary object') return data
Load shared data from a JSON file stored on disk
def drawHUD(self): self.win.move(self.height - 2, self.x_pad) self.win.clrtoeol() self.win.box() self.addstr(2, self.x_pad + 1, "Population: %i" % len(self.grid)) self.addstr(3, self.x_pad + 1, "Generation: %s" % self.current_gen) self.addstr(3, self.x_grid - 21, "s: start p: pause") self.addstr(2, self.x_grid - 21, "r: restart q: quit")
Draw information on population size and current generation
def tile_y_size(self, zoom): warnings.warn(DeprecationWarning("tile_y_size is deprecated")) validate_zoom(zoom) return round(self.y_size / self.matrix_height(zoom), ROUND)
Height of a tile in SRID units at zoom level. - zoom: zoom level
def load_fixture(fixture_file): utils.check_for_local_server() local_url = config["local_server"]["url"] server = Server(local_url) fixture = json.load(fixture_file) for db_name, _items in fixture.items(): db = server[db_name] with click.progressbar( _items, label=db_name, length=len(_items) ) as items: for item in items: item_id = item["_id"] if item_id in db: old_item = db[item_id] item["_rev"] = old_item["_rev"] if item == old_item: continue db[item_id] = item
Populate the database from a JSON file. Reads the JSON file FIXTURE_FILE and uses it to populate the database. Fuxture files should consist of a dictionary mapping database names to arrays of objects to store in those databases.
def print_mem(unit="MB"): try: import psutil B = float(psutil.Process(os.getpid()).memory_info().vms) KB = B / 1024 MB = KB / 1024 GB = MB / 1024 result = vars()[unit] print_info("memory usage: %.2f(%s)" % (result, unit)) return result except ImportError: print_info("pip install psutil first.")
Show the proc-mem-cost with psutil, use this only for lazinesssss. :param unit: B, KB, MB, GB.
def item(self, name, fuzzy_threshold=100): match = process.extractOne( name, self._items.keys(), score_cutoff=(fuzzy_threshold-1), ) if match: exact_name = match[0] item = self._items[exact_name] item.decrypt_with(self) return item else: return None
Extract a password from an unlocked Keychain using fuzzy matching. ``fuzzy_threshold`` can be an integer between 0 and 100, where 100 is an exact match.
def set_affinity(pid, cpuset): _cpuset = cpu_set_t() __CPU_ZERO(_cpuset) for i in cpuset: if i in range(0, sizeof(cpu_set_t) * 8): __CPU_SET(i, _cpuset) if libnuma.sched_setaffinity(pid, sizeof(cpu_set_t), byref(_cpuset)) < 0: raise RuntimeError()
Sets the CPU affinity mask of the process whose ID is pid to the value specified by mask. If pid is zero, then the calling process is used. @param pid: process PID (0 == current process) @type pid: C{int} @param cpuset: set of CPU ids @type cpuset: C{set}
def _get_rnn_layer(mode, num_layers, input_size, hidden_size, dropout, weight_dropout): if mode == 'rnn_relu': rnn_block = functools.partial(rnn.RNN, activation='relu') elif mode == 'rnn_tanh': rnn_block = functools.partial(rnn.RNN, activation='tanh') elif mode == 'lstm': rnn_block = rnn.LSTM elif mode == 'gru': rnn_block = rnn.GRU block = rnn_block(hidden_size, num_layers, dropout=dropout, input_size=input_size) if weight_dropout: apply_weight_drop(block, '.*h2h_weight', rate=weight_dropout) return block
create rnn layer given specs
def get_new_term_doc_mat(self, doc_domains): assert len(doc_domains) == self.term_doc_matrix.get_num_docs() doc_domain_set = set(doc_domains) num_terms = self.term_doc_matrix.get_num_terms() num_domains = len(doc_domain_set) domain_mat = lil_matrix((num_domains, num_terms), dtype=int) X = self.term_doc_matrix.get_term_doc_mat() for i, domain in enumerate(doc_domain_set): domain_mat[i, :] = X[np.array(doc_domains == domain)].sum(axis=0) return domain_mat.tocsr()
Combines documents together that are in the same domain Parameters ---------- doc_domains : array-like Returns ------- scipy.sparse.csr_matrix
def _compute(self, feed_dict, shard): try: descriptor, enq = self._tfrun(self._tf_expr[shard], feed_dict=feed_dict) self._inputs_waiting.decrement(shard) except Exception as e: montblanc.log.exception("Compute Exception") raise
Call the tensorflow compute
def interface(self, value): self._interface = value if isinstance(value, int): self._device_number = value else: self._serial_number = value
Sets the interface used to connect to the device. :param value: may specify either the serial number or the device index :type value: string or int
def codemirror_field_css_assets(*args): manifesto = CodemirrorAssetTagRender() manifesto.register_from_fields(*args) return mark_safe(manifesto.css_html())
Tag to render CodeMirror CSS assets needed for all given fields. Example: :: {% load djangocodemirror_tags %} {% codemirror_field_css_assets form.myfield1 form.myfield2 %}
def fix_timezone_separator(cls, timestr): tz_sep = cls.TIMEZONE_SEPARATOR.match(timestr) if tz_sep is not None: return tz_sep.group(1) + tz_sep.group(2) + ':' + tz_sep.group(3) return timestr
Replace invalid timezone separator to prevent `dateutil.parser.parse` to raise. :return: the new string if invalid separators were found, `None` otherwise
def add_package( self, package, node_paths=None, type_option=PackageInstallationTypeOption.PROD, version_option=None): args=self._get_add_package_args( package, type_option=type_option, version_option=version_option) return self.run_command(args=args, node_paths=node_paths)
Returns a command that when executed will add a node package to current node module. :param package: string. A valid npm/yarn package description. The accepted forms are package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz https://url/to.tgz :param node_paths: A list of path that should be included in $PATH when running the script. :param type_option: A value from PackageInstallationTypeOption that indicates the type of package to be installed. Default to 'prod', which is a production dependency. :param version_option: A value from PackageInstallationVersionOption that indicates how to match version. Default to None, which uses package manager default.
def run(self): with utils.ChangeDir(self.dirname): sys.path.insert(0, self.dirname) sys.argv[1:] = self.args runpy.run_module(self.not_suffixed(self.filename), run_name='__main__', alter_sys=True)
Executes the code of the specified module.
def configure(self, args): for plug in self._plugins: plug_name = self.plugin_name(plug) plug.enabled = getattr(args, "plugin_%s" % plug_name, False) if plug.enabled and getattr(plug, "configure", None): if callable(getattr(plug, "configure", None)): plug.configure(args) LOG.debug("Available plugins: %s", self._plugins) self.plugins = [plugin for plugin in self._plugins if getattr(plugin, "enabled", False)] LOG.debug("Enabled plugins: %s", self.plugins)
Configure the set of plugins with the given args. After configuration, disabled plugins are removed from the plugins list.
def setHandler(self,handler,cbfn): if handler == "async-responses": self.async_responses_callback = cbfn elif handler == "registrations-expired": self.registrations_expired_callback = cbfn elif handler == "de-registrations": self.de_registrations_callback = cbfn elif handler == "reg-updates": self.reg_updates_callback = cbfn elif handler == "registrations": self.registrations_callback = cbfn elif handler == "notifications": self.notifications_callback = cbfn else: self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
Register a handler for a particular notification type. These are the types of notifications that are acceptable. | 'async-responses' | 'registrations-expired' | 'de-registrations' | 'reg-updates' | 'registrations' | 'notifications' :param str handler: name of the notification type :param fnptr cbfn: function to pass the notification channel messages to. :return: Nothing.
def banner(message, width=30, style='banner', out=sys.stdout): out.write(header([message], width=max(width, len(message)), style=style) + '\n') out.flush()
Prints a banner message Parameters ---------- message : string The message to print in the banner width : int The minimum width of the banner (Default: 30) style : string A line formatting style (Default: 'banner') out : writer An object that has write() and flush() methods (Default: sys.stdout)
def login_required(self, f): @wraps(f) def wrapped_f(*args, **kwargs): if current_user.anonymous: msg = ("Rejected User '%s access to '%s' as user" " could not be authenticated.") self.logger.warn(msg % ( current_user.user_id, request.path )) raise FlaskKeystoneUnauthorized() return f(*args, **kwargs) return wrapped_f
Require a user to be validated by Identity to access an endpoint. :raises: FlaskKeystoneUnauthorized This method will gate a particular endpoint to only be accessed by :class:`FlaskKeystone.User`'s. This means that a valid token will need to be passed to grant access. If a User is not authenticated, a FlaskKeystoneUnauthorized will be thrown, resulting in a 401 response to the client.
def tune(self, verbose=None): if not self._tune: return False else: self.w_tune.append( abs(self.stochastic.last_value - self.stochastic.value)) self.w = 2 * (sum(self.w_tune) / len(self.w_tune)) return True
Tuning initial slice width parameter
def on(event, *args, **kwargs): def wrapper(func): for i, arg in args: kwargs[i] = arg func.event = Event(event, kwargs) return func return wrapper
Event method wrapper for bot mixins. When a bot is constructed, its metaclass inspects all members of all base classes, and looks for methods marked with an event attribute which is assigned via this wrapper. It then stores all the methods in a dict that maps event names to lists of these methods, which are each called when the event occurs.
def _default_format(self, occur): if self.text or self.children: return self.start_tag() + "%s" + self.end_tag() return self.start_tag(empty=True)
Return the default serialization format.
def _open(filename=None, mode='r'): if not filename or filename == '-': if not mode or 'r' in mode: file = sys.stdin elif 'w' in mode: file = sys.stdout else: raise ValueError('Invalid mode for file: {}'.format(mode)) else: file = open(filename, mode) try: yield file finally: if file not in (sys.stdin, sys.stdout): file.close()
Open a file or ``sys.stdout`` depending on the provided filename. Args: filename (str): The path to the file that should be opened. If ``None`` or ``'-'``, ``sys.stdout`` or ``sys.stdin`` is returned depending on the desired mode. Defaults to ``None``. mode (str): The mode that should be used to open the file. Yields: A file handle.
def command(self, function=None, prefix=None, unobserved=False): captured_f = self.capture(function, prefix=prefix) captured_f.unobserved = unobserved self.commands[function.__name__] = captured_f return captured_f
Decorator to define a new command for this Ingredient or Experiment. The name of the command will be the name of the function. It can be called from the command-line or by using the run_command function. Commands are automatically also captured functions. The command can be given a prefix, to restrict its configuration space to a subtree. (see ``capture`` for more information) A command can be made unobserved (i.e. ignoring all observers) by passing the unobserved=True keyword argument.
def config(self, charm_id, channel=None): url = '{}/{}/meta/charm-config'.format(self.url, _get_path(charm_id)) data = self._get(_add_channel(url, channel)) return data.json()
Get the config data for a charm. @param charm_id The charm's id. @param channel Optional channel name.
def ApplyPluginToMultiTypeCollection(plugin, output_collection, source_urn=None): for chunk in plugin.Start(): yield chunk for stored_type_name in sorted(output_collection.ListStoredTypes()): stored_cls = rdfvalue.RDFValue.classes[stored_type_name] def GetValues(): for timestamp, value in output_collection.ScanByType(stored_type_name): _ = timestamp if source_urn: value.source = source_urn yield value for chunk in plugin.ProcessValues(stored_cls, GetValues): yield chunk for chunk in plugin.Finish(): yield chunk
Applies instant output plugin to a multi-type collection. Args: plugin: InstantOutputPlugin instance. output_collection: MultiTypeCollection instance. source_urn: If not None, override source_urn for collection items. This has to be used when exporting flow results - their GrrMessages don't have "source" attribute set. Yields: Bytes chunks, as generated by the plugin.
def plot_state_histogram(self, ax): title = "Estimated state" nqc = int(round(np.log2(self.rho_est.data.shape[0]))) labels = ut.basis_labels(nqc) return ut.state_histogram(self.rho_est, ax, title)
Visualize the complex matrix elements of the estimated state. :param matplotlib.Axes ax: A matplotlib Axes object to plot into.
def _build_xpath_expr(attrs): if 'class_' in attrs: attrs['class'] = attrs.pop('class_') s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()] return '[{expr}]'.format(expr=' and '.join(s))
Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes.
def normalize_missing(xs): if isinstance(xs, dict): for k, v in xs.items(): xs[k] = normalize_missing(v) elif isinstance(xs, (list, tuple)): xs = [normalize_missing(x) for x in xs] elif isinstance(xs, six.string_types): if xs.lower() in ["none", "null"]: xs = None elif xs.lower() == "true": xs = True elif xs.lower() == "false": xs = False return xs
Normalize missing values to avoid string 'None' inputs.
def literalize(self): if self.isliteral: return self args = tuple(arg.literalize() for arg in self.args) if all(arg is self.args[i] for i, arg in enumerate(args)): return self return self.__class__(*args)
Return an expression where NOTs are only occurring as literals. Applied recursively to subexpressions.
def LoadFromFile(cls, script_path): _name, dev = ComponentRegistry().load_extension(script_path, class_filter=VirtualTile, unique=True) return dev
Import a virtual tile from a file rather than an installed module script_path must point to a python file ending in .py that contains exactly one VirtualTile class definition. That class is loaded and executed as if it were installed. To facilitate development, if there is a proxy object defined in the same file, it is also added to the HardwareManager proxy registry so that it can be found and used with the device. Args: script_path (string): The path to the script to load Returns: VirtualTile: A subclass of VirtualTile that was loaded from script_path
def save_config(self): if not os.path.exists(self._conf_dir): os.makedirs(self._conf_dir) conf_file = os.path.join(self._conf_dir, "dql.json") with open(conf_file, "w") as ofile: json.dump(self.conf, ofile, indent=2)
Save the conf file
def get_broks_from_satellites(self): for satellites in [self.conf.brokers, self.conf.schedulers, self.conf.pollers, self.conf.reactionners, self.conf.receivers]: for satellite in satellites: if not satellite.reachable: continue logger.debug("Getting broks from: %s", satellite.name) new_broks = satellite.get_and_clear_broks() if new_broks: logger.debug("Got %d broks from: %s", len(new_broks), satellite.name) for brok in new_broks: self.add(brok)
Get broks from my all internal satellite links The arbiter get the broks from ALL the known satellites :return: None
def get_diagonalizing_basis(list_of_pauli_terms): qubit_ops = set(reduce(lambda x, y: x + y, [list(term._ops.items()) for term in list_of_pauli_terms])) qubit_ops = sorted(list(qubit_ops), key=lambda x: x[0]) return PauliTerm.from_list(list(map(lambda x: tuple(reversed(x)), qubit_ops)))
Find the Pauli Term with the most non-identity terms :param list_of_pauli_terms: List of Pauli terms to check :return: The highest weight Pauli Term :rtype: PauliTerm
def convert_coordinates(self, points, axisorder='blr'): return convert_coordinates_sequence(points,self._boundary_scale, self._axis_limits, axisorder)
Convert data coordinates to simplex coordinates for plotting in the case that axis limits have been applied.
def _get_column_dtype(llwcol): try: dtype = llwcol.dtype if dtype is numpy.dtype('O'): raise AttributeError return dtype except AttributeError: try: llwtype = llwcol.parentNode.validcolumns[llwcol.Name] except AttributeError: try: return type(llwcol[0]) except IndexError: return None else: from ligo.lw.types import (ToPyType, ToNumPyType) try: return ToNumPyType[llwtype] except KeyError: return ToPyType[llwtype]
Get the data type of a LIGO_LW `Column` Parameters ---------- llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable a LIGO_LW column, a numpy array, or an iterable Returns ------- dtype : `type`, None the object data type for values in the given column, `None` is returned if ``llwcol`` is a `numpy.ndarray` with `numpy.object_` dtype, or no data type can be parsed (e.g. empty list)
def search_results_total(html, xpath, check, delimiter): for container in html.findall(xpath): if check in container.findtext('.'): text = container.findtext('.').split(delimiter) total = int(text[-1].strip()) return total
Get the total number of results from the DOM of a search index.
def set_replication_enabled(status, host=None, core_name=None): if not _is_master() and _get_none_or_value(host) is None: return _get_return_dict(False, errors=['Only minions configured as master can run this']) cmd = 'enablereplication' if status else 'disablereplication' if _get_none_or_value(core_name) is None and _check_for_cores(): ret = _get_return_dict() success = True for name in __opts__['solr.cores']: resp = set_replication_enabled(status, host, name) if not resp['success']: success = False data = {name: {'data': resp['data']}} ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) return ret else: if status: return _replication_request(cmd, host=host, core_name=core_name) else: return _replication_request(cmd, host=host, core_name=core_name)
MASTER ONLY Sets the master to ignore poll requests from the slaves. Useful when you don't want the slaves replicating during indexing or when clearing the index. status : boolean Sets the replication status to the specified state. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to set the status on all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.set_replication_enabled false, None, music
def eps(self, file, scale=1, module_color=(0, 0, 0), background=None, quiet_zone=4): builder._eps(self.code, self.version, file, scale, module_color, background, quiet_zone)
This method writes the QR code out as an EPS document. The code is drawn by only writing the data modules corresponding to a 1. They are drawn using a line, such that contiguous modules in a row are drawn with a single line. The *file* parameter is used to specify where to write the document to. It can either be a writable (text) stream or a file path. The *scale* parameter sets how large to draw a single module. By default one point (1/72 inch) is used to draw a single module. This may make the code to small to be read efficiently. Increasing the scale will make the code larger. This method will accept fractional scales (e.g. 2.5). The *module_color* parameter sets the color of the data modules. The *background* parameter sets the background (page) color to use. They are specified as either a triple of floats, e.g. (0.5, 0.5, 0.5), or a triple of integers, e.g. (128, 128, 128). The default *module_color* is black. The default *background* color is no background at all. The *quiet_zone* parameter sets how large to draw the border around the code. As per the standard, the default value is 4 modules. Examples: >>> qr = pyqrcode.create('Hello world') >>> qr.eps('hello-world.eps', scale=2.5, module_color='#36C') >>> qr.eps('hello-world2.eps', background='#eee') >>> out = io.StringIO() >>> qr.eps(out, module_color=(.4, .4, .4))
def ec2_credentials_create(user_id=None, name=None, tenant_id=None, tenant=None, profile=None, **connection_args): kstone = auth(profile, **connection_args) if name: user_id = user_get(name=name, profile=profile, **connection_args)[name]['id'] if not user_id: return {'Error': 'Could not resolve User ID'} if tenant: tenant_id = tenant_get(name=tenant, profile=profile, **connection_args)[tenant]['id'] if not tenant_id: return {'Error': 'Could not resolve Tenant ID'} newec2 = kstone.ec2.create(user_id, tenant_id) return {'access': newec2.access, 'secret': newec2.secret, 'tenant_id': newec2.tenant_id, 'user_id': newec2.user_id}
Create EC2-compatible credentials for user per tenant CLI Examples: .. code-block:: bash salt '*' keystone.ec2_credentials_create name=admin tenant=admin salt '*' keystone.ec2_credentials_create \ user_id=c965f79c4f864eaaa9c3b41904e67082 \ tenant_id=722787eb540849158668370dc627ec5f
def create_graphics(self): rnftools.utils.shell('"{}" "{}"'.format("gnuplot", self._gp_fn)) if self.render_pdf_method is not None: svg_fn = self._svg_fn pdf_fn = self._pdf_fn svg42pdf(svg_fn, pdf_fn, method=self.render_pdf_method)
Create images related to this BAM file using GnuPlot.
def list_(prefix='', region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) def extract_name(queue_url): return _urlparse(queue_url).path.split('/')[2] try: r = conn.list_queues(QueueNamePrefix=prefix) urls = r.get('QueueUrls', []) return {'result': [extract_name(url) for url in urls]} except botocore.exceptions.ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Return a list of the names of all visible queues. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt myminion boto_sqs.list region=us-east-1
def display_info(self): if self.moc is None: print('No MOC information present') return if self.moc.name is not None: print('Name:', self.moc.name) if self.moc.id is not None: print('Identifier:', self.moc.id) print('Order:', self.moc.order) print('Cells:', self.moc.cells) print('Area:', self.moc.area_sq_deg, 'square degrees')
Display basic information about the running MOC.
def open(self, print_matlab_welcome=False): if self.process and not self.process.returncode: raise MatlabConnectionError('Matlab(TM) process is still active. Use close to ' 'close it') self.process = subprocess.Popen( [self.matlab_process_path, '-nojvm', '-nodesktop'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) flags = fcntl.fcntl(self.process.stdout, fcntl.F_GETFL) fcntl.fcntl(self.process.stdout, fcntl.F_SETFL, flags| os.O_NONBLOCK) if print_matlab_welcome: self._sync_output() else: self._sync_output(None)
Opens the matlab process.
def createNode(self, cls, name, *args, **kw): m = self.findNode(name) if m is None: m = cls(name, *args, **kw) self.addNode(m) return m
Add a node of type cls to the graph if it does not already exist by the given name
def _query(self, text): params = ( ('v', self.api_version), ('query', text), ('lang', self.language), ('sessionId', self.session_id), ('timezone', self.timezone), ) if self.query_response: self.previous_query_response = self.query_response self.query_response = result = self.session.get(url=self.query_url, params=params).json() return result
Takes natural language text and information as query parameters and returns information as JSON.
def fn_std(self, a, axis=None): return numpy.nanstd(self._to_ndarray(a), axis=axis)
Compute the standard deviation of an array, ignoring NaNs. :param a: The array. :return: The standard deviation of the array.
def set_level(self, level): for handler in self.__coloredlogs_handlers: handler.setLevel(level=level) self.logger.setLevel(level=level)
Set the logging level of this logger. :param level: must be an int or a str.
def get_clusters(self, platform, retry_contexts, all_clusters): possible_cluster_info = {} candidates = set(copy.copy(all_clusters)) while candidates and not possible_cluster_info: wait_for_any_cluster(retry_contexts) for cluster in sorted(candidates, key=attrgetter('priority')): ctx = retry_contexts[cluster.name] if ctx.in_retry_wait: continue if ctx.failed: continue try: cluster_info = self.get_cluster_info(cluster, platform) possible_cluster_info[cluster] = cluster_info except OsbsException: ctx.try_again_later(self.find_cluster_retry_delay) candidates -= set([c for c in candidates if retry_contexts[c.name].failed]) ret = sorted(possible_cluster_info.values(), key=lambda c: c.cluster.priority) ret = sorted(ret, key=lambda c: c.load) return ret
return clusters sorted by load.
def cmd_tr(self, x=None, y=None, xy=None, ch=None): viewer = self.get_viewer(ch) if viewer is None: self.log("No current viewer/channel.") return fx, fy, sxy = viewer.get_transforms() if x is None and y is None and xy is None: self.log("x=%s y=%s xy=%s" % (fx, fy, sxy)) else: if x is None: x = fx else: x = (x != 0) if y is None: y = fy else: y = (y != 0) if xy is None: xy = sxy else: xy = (xy != 0) viewer.transform(x, y, xy)
tr x=0|1 y=0|1 xy=0|1 ch=chname Transform the image for the given viewer/channel by flipping (x=1 and/or y=1) or swapping axes (xy=1). If no value is given, reports the current rotation.
def report(self, name, **kwargs): group_obj = Report(name, **kwargs) return self._group(group_obj)
Add Report data to Batch object. Args: name (str): The name for this Group. file_name (str): The name for the attached file for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. file_content (str;method, kwargs): The file contents or callback method to retrieve file content. publish_date (str, kwargs): The publish datetime expression for this Group. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Report.
def override_temp(replacement): pkg_resources.py31compat.makedirs(replacement, exist_ok=True) saved = tempfile.tempdir tempfile.tempdir = replacement try: yield finally: tempfile.tempdir = saved
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
def checkASN(filename): extnType = filename[filename.rfind('_')+1:filename.rfind('.')] if isValidAssocExtn(extnType): return True else: return False
Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value
def get_state(self): D = {} for key in self._state_props: D[key] = getattr(self, key) return D
Get the current view state of the camera Returns a dict of key-value pairs. The exact keys depend on the camera. Can be passed to set_state() (of this or another camera of the same type) to reproduce the state.
def __get_connection_info(): conn_info = {} try: conn_info['hostname'] = __opts__['mysql_auth']['hostname'] conn_info['username'] = __opts__['mysql_auth']['username'] conn_info['password'] = __opts__['mysql_auth']['password'] conn_info['database'] = __opts__['mysql_auth']['database'] conn_info['auth_sql'] = __opts__['mysql_auth']['auth_sql'] except KeyError as e: log.error('%s does not exist', e) return None return conn_info
Grab MySQL Connection Details
def ancestor(self, index): if not isinstance(index, int): self.log_exc(u"index is not an integer", None, True, TypeError) if index < 0: self.log_exc(u"index cannot be negative", None, True, ValueError) parent_node = self for i in range(index): if parent_node is None: break parent_node = parent_node.parent return parent_node
Return the ``index``-th ancestor. The 0-th ancestor is the node itself, the 1-th ancestor is its parent node, etc. :param int index: the number of levels to go up :rtype: :class:`~aeneas.tree.Tree` :raises: TypeError if ``index`` is not an int :raises: ValueError if ``index`` is negative
def delete(python_data: LdapObject, database: Optional[Database] = None) -> None: dn = python_data.get_as_single('dn') assert dn is not None database = get_database(database) connection = database.connection connection.delete(dn)
Delete a LdapObject from the database.
def get_key_policy(key_id, policy_name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: key_policy = conn.get_key_policy(key_id, policy_name) r['key_policy'] = salt.serializers.json.deserialize( key_policy['Policy'], object_pairs_hook=odict.OrderedDict ) except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
Get the policy for the specified key. CLI example:: salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy
def wncomd(left, right, window): assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 left = ctypes.c_double(left) right = ctypes.c_double(right) result = stypes.SpiceCell.double(window.size) libspice.wncomd_c(left, right, ctypes.byref(window), result) return result
Determine the complement of a double precision window with respect to a specified interval. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wncomd_c.html :param left: left endpoints of complement interval. :type left: float :param right: right endpoints of complement interval. :type right: float :param window: Input window :type window: spiceypy.utils.support_types.SpiceCell :return: Complement of window with respect to left and right. :rtype: spiceypy.utils.support_types.SpiceCell
def decode_header(header, normalize=False): regex = r'"(=\?.+?\?.+?\?[^ ?]+\?=)"' value = re.sub(regex, r'\1', header) logging.debug("unquoted header: |%s|", value) valuelist = email.header.decode_header(value) decoded_list = [] for v, enc in valuelist: v = string_decode(v, enc) decoded_list.append(string_sanitize(v)) value = ''.join(decoded_list) if normalize: value = re.sub(r'\n\s+', r' ', value) return value
decode a header value to a unicode string values are usually a mixture of different substrings encoded in quoted printable using different encodings. This turns it into a single unicode string :param header: the header value :type header: str :param normalize: replace trailing spaces after newlines :type normalize: bool :rtype: str
def sitespeptidesproteins(df, site_localization_probability=0.75): sites = filters.filter_localization_probability(df, site_localization_probability)['Sequence window'] peptides = set(df['Sequence window']) proteins = set([str(p).split(';')[0] for p in df['Proteins']]) return len(sites), len(peptides), len(proteins)
Generate summary count of modified sites, peptides and proteins in a processed dataset ``DataFrame``. Returns the number of sites, peptides and proteins as calculated as follows: - `sites` (>0.75; or specified site localization probability) count of all sites > threshold - `peptides` the set of `Sequence windows` in the dataset (unique peptides) - `proteins` the set of unique leading proteins in the dataset :param df: Pandas ``DataFrame`` of processed data :param site_localization_probability: ``float`` site localization probability threshold (for sites calculation) :return: ``tuple`` of ``int``, containing sites, peptides, proteins
def register(cls, *args, **kwargs): if cls.app is None: return register(*args, handler=cls, **kwargs) return cls.app.register(*args, handler=cls, **kwargs)
Register view to handler.
def get_merged_filter(self): track = set() follow = set() for handler in self.handlers: track.update(handler.filter.track) follow.update(handler.filter.follow) return TweetFilter(track=list(track), follow=list(follow))
Return merged filter from list of handlers :return: merged filter :rtype: :class:`~responsebot.models.TweetFilter`
def parse_PISCES_output(pisces_output, path=False): pisces_dict = {} if path: pisces_path = Path(pisces_output) pisces_content = pisces_path.read_text().splitlines()[1:] else: pisces_content = pisces_output.splitlines()[1:] for line in pisces_content: pdb = line.split()[0][:4].lower() chain = line.split()[0][-1] pdb_dict = {'length': line.split()[1], 'method': line.split()[2], 'resolution': line.split()[3], 'R-factor': line.split()[4], 'R-free': line.split()[5]} if pdb in pisces_dict: pisces_dict[pdb]['chains'].append(chain) else: pdb_dict['chains'] = [chain] pisces_dict[pdb] = pdb_dict return pisces_dict
Takes the output list of a PISCES cull and returns in a usable dictionary. Notes ----- Designed for outputs of protein sequence redundancy culls conducted using the PISCES server. http://dunbrack.fccc.edu/PISCES.php G. Wang and R. L. Dunbrack, Jr. PISCES: a protein sequence culling server. Bioinformatics, 19:1589-1591, 2003. Parameters ---------- pisces_output : str or path Output list of non-redundant protein chains from PISCES, or path to text file. path : bool True if path given rather than string. Returns ------- pisces_dict : dict Data output by PISCES in dictionary form.
def get_sequence_rules(self): collection = JSONClientValidated('assessment_authoring', collection='SequenceRule', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.SequenceRuleList(result, runtime=self._runtime, proxy=self._proxy)
Gets all ``SequenceRules``. return: (osid.assessment.authoring.SequenceRuleList) - the returned ``SequenceRule`` list raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def shell_process(command, input_data=None, background=False, exitcode=False): data = None try: kwargs = { 'shell': isinstance(command, basestring), 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE } if not input_data is None: kwargs['stdin'] = subprocess.PIPE proc = subprocess.Popen(command, **kwargs) if not background: output, _ = proc.communicate(input_data) retcode = proc.returncode if retcode == 0: data = str(output).rstrip() else: retcode = None if input_data: raise TypeError(u'Backgrounded does not support input data.') except OSError as exc: retcode = -exc.errno if exitcode: return data, retcode else: return data
Shells a process with the given shell command. `command` Shell command to spawn. `input_data` String to pipe to process as input. `background` Set to ``True`` to fork process into background. NOTE: This exits immediately with no result returned. `exitcode` Set to ``True`` to also return process exit status code. if `exitcode` is ``False``, then this returns output string from process or ``None`` if it failed. otherwise, this returns a tuple with output string from process or ``None`` if it failed and the exit status code. Example:: (``None``, 1) <-- failed ('Some data', 0) <-- success
def push(item, remote_addr, trg_queue, protocol=u'jsonrpc'): if protocol == u'jsonrpc': try: server = Server(remote_addr, encoding=_c.FSQ_CHARSET) return server.enqueue(item.id, trg_queue, item.item.read()) except Exception, e: raise FSQPushError(e) raise ValueError('Unknown protocol: {0}'.format(protocol))
Enqueue an FSQWorkItem at a remote queue
def get_global_shelf_fpath(appname='default', ensure=False): global_cache_dir = get_global_cache_dir(appname, ensure=ensure) shelf_fpath = join(global_cache_dir, meta_util_constants.global_cache_fname) return shelf_fpath
Returns the filepath to the global shelf
def _add_plots_to_output(out, data): out["plot"] = {} diagram_plot = _add_diagram_plot(out, data) if diagram_plot: out["plot"]["diagram"] = diagram_plot scatter = _add_scatter_plot(out, data) if scatter: out["plot"]["scatter"] = scatter scatter_global = _add_global_scatter_plot(out, data) if scatter_global: out["plot"]["scatter_global"] = scatter_global return out
Add CNVkit plots summarizing called copy number values.
def partof(self, ns1, id1, ns2, id2): rel_fun = lambda node, graph: self.partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.partof_closure, rel_fun)
Return True if one entity is "partof" another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "partof" relationship with t2, either directly or through a series of intermediates; False otherwise.
def get_dataset(self, X, y=None): if is_dataset(X): return X dataset = self.dataset is_initialized = not callable(dataset) kwargs = self._get_params_for('dataset') if kwargs and is_initialized: raise TypeError("Trying to pass an initialized Dataset while " "passing Dataset arguments ({}) is not " "allowed.".format(kwargs)) if is_initialized: return dataset return dataset(X, y, **kwargs)
Get a dataset that contains the input data and is passed to the iterator. Override this if you want to initialize your dataset differently. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. Returns ------- dataset The initialized dataset.
def get(cls, pid_value, pid_type=None, **kwargs): return cls( PersistentIdentifier.get(pid_type or cls.pid_type, pid_value, pid_provider=cls.pid_provider), **kwargs)
Get a persistent identifier for this provider. :param pid_type: Persistent identifier type. (Default: configured :attr:`invenio_pidstore.providers.base.BaseProvider.pid_type`) :param pid_value: Persistent identifier value. :param kwargs: See :meth:`invenio_pidstore.providers.base.BaseProvider` required initialization properties. :returns: A :class:`invenio_pidstore.providers.base.BaseProvider` instance.
def getBlocks(sentences, n): blocks = [] for i in range(0, len(sentences), n): blocks.append(sentences[i:(i+n)]) return blocks
Get blocks of n sentences together. :param sentences: List of strings where each string is a sentence. :type sentences: list :param n: Maximum blocksize for sentences, i.e. a block will be composed of ``n`` sentences. :type n: int. :returns: Blocks of n sentences. :rtype: list-of-lists .. code-block:: python import rnlp example = "Hello there. How are you? I am fine." sentences = rnlp.getSentences(example) # ['Hello there', 'How are you', 'I am fine'] blocks = rnlp.getBlocks(sentences, 2) # with 1: [['Hello there'], ['How are you'], ['I am fine']] # with 2: [['Hello there', 'How are you'], ['I am fine']] # with 3: [['Hello there', 'How are you', 'I am fine']]
def _rel_path(self, path, basepath=None): basepath = basepath or self.src_dir return path[len(basepath) + 1:]
trim off basepath
def cal_k_bm3(p, k): v = cal_v_bm3(p, k) return cal_k_bm3_from_v(v, k)
calculate bulk modulus :param p: pressure :param k: [v0, k0, k0p] :return: bulk modulus at high pressure
def LaplaceCentreWeight(self): sz = [1,] * self.S.ndim for ax in self.axes: sz[ax] = self.S.shape[ax] lcw = 2*len(self.axes)*np.ones(sz, dtype=self.dtype) for ax in self.axes: lcw[(slice(None),)*ax + ([0, -1],)] -= 1.0 return lcw
Centre weighting matrix for TV Laplacian.
def new_filename(data, file_kind, ext): nb_key = file_kind + "number" if nb_key not in data.keys(): data[nb_key] = -1 if not data["override externals"]: file_exists = True while file_exists: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) file_exists = os.path.isfile(filename) else: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) if data["rel data path"]: rel_filepath = posixpath.join(data["rel data path"], name) else: rel_filepath = name return filename, rel_filepath
Returns an available filename. :param file_kind: Name under which numbering is recorded, such as 'img' or 'table'. :type file_kind: str :param ext: Filename extension. :type ext: str :returns: (filename, rel_filepath) where filename is a path in the filesystem and rel_filepath is the path to be used in the tex code.
def get_fallback_languages(): lang = translation.get_language() fallback_list = settings.FALLBACK_LANGUAGES.get(lang, None) if fallback_list: return fallback_list return settings.FALLBACK_LANGUAGES.get(lang[:2], [])
Retrieve the fallback languages from the settings.py
def error(self, argparser, target, message): warnings.warn( 'Runtime.error is deprecated and will be removed by calmjs-4.0.0', DeprecationWarning) details = self.get_argparser_details(argparser) argparser = details.subparsers[target] if details else self.argparser argparser.error(message)
This was used as part of the original non-recursive lookup for the target parser.
def has_changed(self, field_name: str = None) -> bool: changed = self._diff_with_initial.keys() if self._meta.get_field(field_name).get_internal_type() == 'ForeignKey': if not field_name.endswith('_id'): field_name = field_name+'_id' if field_name in changed: return True return False
Check if a field has changed since the model was instantiated.
def spec(self, name): if isinstance(name, (BaseData, Parameter)): name = name.name if name in self._param_specs: return self._param_specs[name] else: return self.bound_spec(name)
Returns either the input corresponding to a fileset or field field spec or a spec or parameter that has either been passed to the study as an input or can be derived. Parameters ---------- name : Str | BaseData | Parameter A parameter, fileset or field or name of one