code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def delete_ace(self, domain=None, user=None, sid=None): if sid is None: if domain is None: domain = self.cifs_server.domain sid = UnityAclUser.get_sid(self._cli, user=user, domain=domain) if isinstance(sid, six.string_types): sid = [sid] ace_list = [self._make_remove_ace_entry(s) for s in sid] resp = self.action("setACEs", cifsShareACEs=ace_list) resp.raise_if_err() return resp
delete ACE for the share delete ACE for the share. User could either supply the domain and username or the sid of the user. :param domain: domain of the user :param user: username :param sid: sid of the user or sid list of the user :return: REST API response
def segment_file(self, value): assert os.path.isfile(value), "%s is not a valid file" % value self._segment_file = value
Setter for _segment_file attribute
def ask_user_for_telemetry(): answer = " " while answer.lower() != 'yes' and answer.lower() != 'no': answer = prompt(u'\nDo you agree to sending telemetry (yes/no)? Default answer is yes: ') if answer == '': answer = 'yes' return answer
asks the user for if we can collect telemetry
def user_agent_info(sdk_version, custom_user_agent): python_version = ".".join(str(x) for x in sys.version_info[0:3]) user_agent = "ask-python/{} Python/{}".format( sdk_version, python_version) if custom_user_agent is None: return user_agent else: return user_agent + " {}".format(custom_user_agent)
Return the user agent info along with the SDK and Python Version information. :param sdk_version: Version of the SDK being used. :type sdk_version: str :param custom_user_agent: Custom User Agent string provided by the developer. :type custom_user_agent: str :return: User Agent Info string :rtype: str
def create(self, data, *args, **kwargs): super(MambuUser, self).create(data) self['user'][self.customFieldName] = self['customInformation'] self.init(attrs=self['user'])
Creates an user in Mambu Parameters -data dictionary with data to send
def importPuppetClasses(self, smartProxyId): return self.api.create('{}/{}/import_puppetclasses' .format(self.objName, smartProxyId), '{}')
Function importPuppetClasses Force the reload of puppet classes @param smartProxyId: smartProxy Id @return RETURN: the API result
def formatMessageForBuildResults(self, mode, buildername, buildset, build, master, previous_results, blamelist): ss_list = buildset['sourcestamps'] results = build['results'] ctx = dict(results=build['results'], mode=mode, buildername=buildername, workername=build['properties'].get( 'workername', ["<unknown>"])[0], buildset=buildset, build=build, projects=self.getProjects(ss_list, master), previous_results=previous_results, status_detected=self.getDetectedStatus( mode, results, previous_results), build_url=utils.getURLForBuild( master, build['builder']['builderid'], build['number']), buildbot_url=master.config.buildbotURL, blamelist=blamelist, summary=self.messageSummary(build, results), sourcestamps=self.messageSourceStamps(ss_list) ) yield self.buildAdditionalContext(master, ctx) msgdict = self.renderMessage(ctx) return msgdict
Generate a buildbot mail message and return a dictionary containing the message body, type and subject.
def open_file(filename, as_text=False): if filename.lower().endswith('.gz'): if as_text: return gzip.open(filename, 'rt') else: return gzip.open(filename, 'rb') else: if as_text: return open(filename, 'rt') else: return open(filename, 'rb')
Open the file gunzipping it if it ends with .gz. If as_text the file is opened in text mode, otherwise the file's opened in binary mode.
def add_cmd_output(self, cmds, suggest_filename=None, root_symlink=None, timeout=300, stderr=True, chroot=True, runat=None, env=None, binary=False, sizelimit=None, pred=None): if isinstance(cmds, six.string_types): cmds = [cmds] if len(cmds) > 1 and (suggest_filename or root_symlink): self._log_warn("ambiguous filename or symlink for command list") if sizelimit is None: sizelimit = self.get_option("log_size") for cmd in cmds: self._add_cmd_output(cmd, suggest_filename=suggest_filename, root_symlink=root_symlink, timeout=timeout, stderr=stderr, chroot=chroot, runat=runat, env=env, binary=binary, sizelimit=sizelimit, pred=pred)
Run a program or a list of programs and collect the output
def resource_associate_permission(self, token, id, name, scopes, **kwargs): return self._realm.client.post( '{}/{}'.format(self.well_known['policy_endpoint'], id), data=self._get_data(name=name, scopes=scopes, **kwargs), headers=self.get_headers(token) )
Associates a permission with a Resource. https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_authorization_uma_policy_api :param str token: client access token :param str id: resource id :param str name: permission name :param list scopes: scopes access is wanted :param str description:optional :param list roles: (optional) :param list groups: (optional) :param list clients: (optional) :param str condition: (optional) :rtype: dict
def watch(args): " Watch directory for changes and auto pack sources " assert op.isdir(args.source), "Watch mode allowed only for directories." print 'Zeta-library v. %s watch mode' % VERSION print '================================' print 'Ctrl+C for exit\n' observer = Observer() handler = ZetaTrick(args=args) observer.schedule(handler, args.source, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() print "\nWatch mode stoped." observer.join()
Watch directory for changes and auto pack sources
def render_category(slug): try: category = EntryCategory.objects.get(slug=slug) except EntryCategory.DoesNotExist: pass else: return {'category': category} return {}
Template tag to render a category with all it's entries.
def factory(type, module=None, **kwargs): cls = type if module is None: module = __name__ fn = lambda member: inspect.isclass(member) and member.__module__==module classes = odict(inspect.getmembers(sys.modules[module], fn)) members = odict([(k.lower(),v) for k,v in classes.items()]) lower = cls.lower() if lower not in list(members.keys()): msg = "Unrecognized class: %s.%s"%(module,cls) raise KeyError(msg) return members[lower](**kwargs)
Factory for creating objects. Arguments are passed directly to the constructor of the chosen class.
def decode_network_packet(buf): off = 0 blen = len(buf) while off < blen: ptype, plen = header.unpack_from(buf, off) if plen > blen - off: raise ValueError("Packet longer than amount of data in buffer") if ptype not in _decoders: raise ValueError("Message type %i not recognized" % ptype) yield ptype, _decoders[ptype](ptype, plen, buf[off:]) off += plen
Decodes a network packet in collectd format.
def print_new(ctx, name, migration_type): click.echo(ctx.obj.repository.generate_migration_name(name, migration_type))
Prints filename of a new migration
def replace_in_file(self, file_path, old_exp, new_exp): self.term.print_info(u"Making replacement into {}" .format(self.term.text_in_color(file_path, TERM_GREEN))) tmp_file = tempfile.NamedTemporaryFile(mode='w+t', delete=False) for filelineno, line in enumerate(io.open(file_path, encoding="utf-8")): if old_exp in line: line = line.replace(old_exp, new_exp) try: tmp_file.write(line.encode('utf-8')) except TypeError: tmp_file.write(line) name = tmp_file.name tmp_file.close() shutil.copy(name, file_path) os.remove(name)
In the given file, replace all 'old_exp' by 'new_exp'.
def datetime(self): date_string = '%s %s %s' % (self._day, self._date, self._year) return datetime.strptime(date_string, '%a %B %d %Y')
Returns a datetime object representing the date the game was played.
def comments(recid): from invenio_access.local_config import VIEWRESTRCOLL from invenio_access.mailcookie import \ mail_cookie_create_authorize_action from .api import check_user_can_view_comments auth_code, auth_msg = check_user_can_view_comments(current_user, recid) if auth_code and current_user.is_guest: cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, { 'collection': g.collection}) url_args = {'action': cookie, 'ln': g.ln, 'referer': request.referrer} flash(_("Authorization failure"), 'error') return redirect(url_for('webaccount.login', **url_args)) elif auth_code: flash(auth_msg, 'error') abort(401) comments = CmtRECORDCOMMENT.query.filter(db.and_( CmtRECORDCOMMENT.id_bibrec == recid, CmtRECORDCOMMENT.in_reply_to_id_cmtRECORDCOMMENT == 0, CmtRECORDCOMMENT.star_score == 0 )).order_by(CmtRECORDCOMMENT.date_creation).all() return render_template('comments/comments.html', comments=comments, option='comments')
Display comments.
def _tl15(self, data, wavenumber): return ((C2 * wavenumber) / xu.log((1.0 / data) * C1 * wavenumber ** 3 + 1.0))
Compute the L15 temperature.
def migrate_connections(new_data_path: str): dest_connections = os.path.join( new_data_path, 'lib', 'NetworkManager', 'system-connections') os.makedirs(dest_connections, exist_ok=True) with mount_state_partition() as state_path: src_connections = os.path.join( state_path, 'root-overlay', 'etc', 'NetworkManager', 'system-connections') LOG.info(f"migrate_connections: moving nmcli connections from" f" {src_connections} to {dest_connections}") found = migrate_system_connections(src_connections, dest_connections) if found: return LOG.info( "migrate_connections: No connections found in state, checking boot") with mount_boot_partition() as boot_path: src_connections = os.path.join( boot_path, 'system-connections') LOG.info(f"migrate_connections: moving nmcli connections from" f" {src_connections} to {dest_connections}") found = migrate_system_connections(src_connections, dest_connections) if not found: LOG.info("migrate_connections: No connections found in boot")
Migrate wifi connection files to new locations and patch them :param new_data_path: The path to where the new data partition is mounted
def get_price(self): for cond in self.conditions: for p in cond.parameters: if p.name == '_amount': return p.value
Return the price from the conditions parameters. :return: Int
def _add_edges(self, ast_node, trunk=None): atom_indices = self._atom_indices for atom in ast_node.tail: if atom.head == 'atom': atom_idx = atom_indices[id(atom)] if atom.is_first_kid and atom.parent().head == 'branch': trunk_idx = atom_indices[id(trunk)] self.add_edge(atom_idx, trunk_idx) if not atom.is_last_kid: if atom.next_kid.head == 'atom': next_idx = atom_indices[id(atom.next_kid)] self.add_edge(atom_idx, next_idx) elif atom.next_kid.head == 'branch': trunk = atom else: return elif atom.head == 'branch': self._add_edges(atom, trunk)
Add all bonds in the SMARTS string as edges in the graph.
def save_subresource(self, subresource): data = deepcopy(subresource._resource) data.pop('id', None) data.pop(self.resource_type + '_id', None) subresources = getattr(self, subresource.parent_key, {}) subresources[subresource.id] = data setattr(self, subresource.parent_key, subresources) yield self._save()
Save the sub-resource NOTE: Currently assumes subresources are stored within a dictionary, keyed with the subresource's ID
def _run_atexit(): global _atexit for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:]
Hook frameworks must invoke this after the main hook body has successfully completed. Do not invoke it if the hook fails.
def update(self, *args): "Appends any passed in byte arrays to the digest object." for string in args: self._hobj.update(string) self._fobj = None
Appends any passed in byte arrays to the digest object.
def process_rst_and_summaries(content_generators): for generator in content_generators: if isinstance(generator, generators.ArticlesGenerator): for article in ( generator.articles + generator.translations + generator.drafts): rst_add_mathjax(article) if process_summary.mathjax_script is not None: process_summary(article) elif isinstance(generator, generators.PagesGenerator): for page in generator.pages: rst_add_mathjax(page) for page in generator.hidden_pages: rst_add_mathjax(page)
Ensure mathjax script is applied to RST and summaries are corrected if specified in user settings. Handles content attached to ArticleGenerator and PageGenerator objects, since the plugin doesn't know how to handle other Generator types. For reStructuredText content, examine both articles and pages. If article or page is reStructuredText and there is math present, append the mathjax script. Also process summaries if present (only applies to articles) and user wants summaries processed (via user settings)
def set_parent(self, key_name, new_parent): self.unbake() kf = self.dct[key_name] kf['parent'] = new_parent self.bake()
Sets the parent of the key.
def _key(self): return Key(self._schema.key_type, self._identity, self._name, [str(item.value) for item in self._dimension_fields.values()])
Generates the Key object based on dimension fields.
def call(self, itemMethod): item = itemMethod.im_self method = itemMethod.im_func.func_name return self.batchController.getProcess().addCallback( CallItemMethod(storepath=item.store.dbdir, storeid=item.storeID, method=method).do)
Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked.
def _from_rest_blank(model, props): blank = model.get_fields_by_prop('allow_blank', True) for field in blank: try: if props[field] == '': props[field] = None except KeyError: continue
Set empty strings to None where allowed This is done on fields with `allow_blank=True` which takes an incoming empty string & sets it to None so validations are skipped. This is useful on fields that aren't required with format validations like URLType, EmailType, etc.
def respond_static(self, environ): path = os.path.normpath(environ["PATH_INFO"]) if path == "/": content = self.index() content_type = "text/html" else: path = os.path.join(os.path.dirname(__file__), path.lstrip("/")) try: with open(path, "r") as f: content = f.read() except IOError: return 404 content_type = guess_type(path)[0] return (200, [("Content-Type", content_type)], content)
Serves a static file when Django isn't being used.
def notify_attached(room, event, user): tpl = get_plugin_template_module('emails/attached.txt', chatroom=room, event=event, user=user) _send(event, tpl)
Notifies about an existing chatroom being attached to an event. :param room: the chatroom :param event: the event :param user: the user performing the action
def active_classification(keywords, exposure_key): classifications = None if 'classification' in keywords: return keywords['classification'] if 'layer_mode' in keywords and keywords['layer_mode'] == \ layer_mode_continuous['key']: classifications = keywords['thresholds'].get(exposure_key) elif 'value_maps' in keywords: classifications = keywords['value_maps'].get(exposure_key) if classifications is None: return None for classification, value in list(classifications.items()): if value['active']: return classification return None
Helper to retrieve active classification for an exposure. :param keywords: Hazard layer keywords. :type keywords: dict :param exposure_key: The exposure key. :type exposure_key: str :returns: The active classification key. None if there is no active one. :rtype: str
def DbUnExportEvent(self, argin): self._log.debug("In DbUnExportEvent()") event_name = argin[0].lower() self.db.unexport_event(event_name)
Mark one event channel as non exported in database :param argin: name of event channel or factory to unexport :type: tango.DevString :return: none :rtype: tango.DevVoid
def _hierarchy(self): self.hierarchy = {} for rank in self.taxonomy: taxslice = self._slice(level=self.taxonomy.index(rank)) self.hierarchy[rank] = self._group(taxslice)
Generate dictionary of referenced idents grouped by shared rank
def match(self, p_todo): children = self.todolist.children(p_todo) uncompleted = [todo for todo in children if not todo.is_completed()] return not uncompleted
Returns True when there are no children that are uncompleted yet.
def component_on_date(self, date: datetime.date) -> Optional["Interval"]: return self.intersection(Interval.wholeday(date))
Returns the part of this interval that falls on the date given, or ``None`` if the interval doesn't have any part during that date.
async def update_firmware( self, firmware_file: str, loop: asyncio.AbstractEventLoop = None, explicit_modeset: bool = True) -> str: if None is loop: checked_loop = self._loop else: checked_loop = loop return await self._backend.update_firmware(firmware_file, checked_loop, explicit_modeset)
Update the firmware on the Smoothie board. :param firmware_file: The path to the firmware file. :param explicit_modeset: `True` to force the smoothie into programming mode; `False` to assume it is already in programming mode. :param loop: An asyncio event loop to use; if not specified, the one associated with this instance will be used. :returns: The stdout of the tool used to update the smoothie
def get_grade_id(self): if not bool(self._my_map['gradeId']): raise errors.IllegalState('grade empty') return Id(self._my_map['gradeId'])
Gets the grade ``Id`` in this entry if the grading system is based on grades. return: (osid.id.Id) - the grade ``Id`` raise: IllegalState - ``is_graded()`` is ``false`` or ``GradeSystem.isBasedOnGrades()`` is ``false`` *compliance: mandatory -- This method must be implemented.*
def render_code(code, filetype, pygments_style): if filetype: lexer = pygments.lexers.get_lexer_by_name(filetype) formatter = pygments.formatters.HtmlFormatter(style=pygments_style) return pygments.highlight(code, lexer, formatter) else: return "<pre><code>{}</code></pre>".format(code)
Renders a piece of code into HTML. Highlights syntax if filetype is specfied
def _set_cpu_throttling(self): if not self.is_running(): return try: if sys.platform.startswith("win") and hasattr(sys, "frozen"): cpulimit_exec = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "cpulimit", "cpulimit.exe") else: cpulimit_exec = "cpulimit" subprocess.Popen([cpulimit_exec, "--lazy", "--pid={}".format(self._process.pid), "--limit={}".format(self._cpu_throttling)], cwd=self.working_dir) log.info("CPU throttled to {}%".format(self._cpu_throttling)) except FileNotFoundError: raise QemuError("cpulimit could not be found, please install it or deactivate CPU throttling") except (OSError, subprocess.SubprocessError) as e: raise QemuError("Could not throttle CPU: {}".format(e))
Limits the CPU usage for current QEMU process.
def to_array(self): array = super(InlineQueryResultCachedMpeg4Gif, self).to_array() array['type'] = u(self.type) array['id'] = u(self.id) array['mpeg4_file_id'] = u(self.mpeg4_file_id) if self.title is not None: array['title'] = u(self.title) if self.caption is not None: array['caption'] = u(self.caption) if self.parse_mode is not None: array['parse_mode'] = u(self.parse_mode) if self.reply_markup is not None: array['reply_markup'] = self.reply_markup.to_array() if self.input_message_content is not None: array['input_message_content'] = self.input_message_content.to_array() return array
Serializes this InlineQueryResultCachedMpeg4Gif to a dictionary. :return: dictionary representation of this object. :rtype: dict
def find_transport_reactions(model): transport_reactions = [] transport_rxn_candidates = set(model.reactions) - set(model.boundary) \ - set(find_biomass_reaction(model)) transport_rxn_candidates = set( [rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2] ) sbo_matches = set([rxn for rxn in transport_rxn_candidates if rxn.annotation is not None and 'sbo' in rxn.annotation and rxn.annotation['sbo'] in TRANSPORT_RXN_SBO_TERMS]) if len(sbo_matches) > 0: transport_reactions += list(sbo_matches) for rxn in transport_rxn_candidates: rxn_mets = set([met.formula for met in rxn.metabolites]) if (None not in rxn_mets) and (len(rxn_mets) != 0): if is_transport_reaction_formulae(rxn): transport_reactions.append(rxn) elif is_transport_reaction_annotations(rxn): transport_reactions.append(rxn) return set(transport_reactions)
Return a list of all transport reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- A transport reaction is defined as follows: 1. It contains metabolites from at least 2 compartments and 2. at least 1 metabolite undergoes no chemical reaction, i.e., the formula and/or annotation stays the same on both sides of the equation. A notable exception is transport via PTS, which also contains the following restriction: 3. The transported metabolite(s) are transported into a compartment through the exchange of a phosphate group. An example of transport via PTS would be pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c) Reactions similar to transport via PTS (referred to as "modified transport reactions") follow a similar pattern: A(x) + B-R(y) -> A-R(y) + B(y) Such modified transport reactions can be detected, but only when a formula field exists for all metabolites in a particular reaction. If this is not the case, transport reactions are identified through annotations, which cannot detect modified transport reactions.
def change(self) -> Tuple[bool, dict]: next = self.next self.next = None if self.next or not self.running: message = "The Scene.change interface is deprecated. Use the events commands instead." warn(message, DeprecationWarning) return self.running, {"scene_class": next}
Default case, override in subclass as necessary.
def plot_lines(f, x, samples, ax=None, **kwargs): r logZ = kwargs.pop('logZ', None) weights = kwargs.pop('weights', None) ntrim = kwargs.pop('ntrim', None) cache = kwargs.pop('cache', '') parallel = kwargs.pop('parallel', False) tqdm_kwargs = kwargs.pop('tqdm_kwargs', {}) fsamps = compute_samples(f, x, samples, logZ=logZ, weights=weights, ntrim=ntrim, parallel=parallel, cache=cache, tqdm_kwargs=tqdm_kwargs) fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs)
r""" Plot a representative set of functions to sample Additionally, if a list of log-evidences are passed, along with list of functions, and list of samples, this function plots the probability mass function for all models marginalised according to the evidences. Parameters ---------- f: function function :math:`f(x;\theta)` (or list of functions for each model) with dependent variable :math:`x`, parameterised by :math:`\theta`. x: 1D array-like `x` values to evaluate :math:`f(x;\theta)` at. samples: 2D array-like :math:`\theta` samples (or list of :math:`\theta` samples) to evaluate :math:`f(x;\theta)` at. `shape = (nsamples, npars)` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. logZ: 1D array-like, optional log-evidences of each model if multiple models are passed. Should be same length as the list `f`, and need not be normalised. Default: `numpy.ones_like(f)` weights: 1D array-like, optional sample weights (or list of weights), if desired. Should have length same as `samples.shape[0]`. Default: `numpy.ones_like(samples)` ntrim: int, optional Approximate number of samples to trim down to, if desired. Useful if the posterior is dramatically oversampled. Default: None cache: str, optional File root for saving previous calculations for re-use parallel, tqdm_args: see docstring for :func:`fgivenx.parallel.parallel_apply` kwargs: further keyword arguments Any further keyword arguments are plotting keywords that are passed to :func:`fgivenx.plot.plot_lines`.
def as_object(obj): LOGGER.debug('as_object(%s)', obj) if isinstance(obj, datetime.date): return as_date(obj) elif hasattr(obj, '__dict__'): out = {k: obj.__dict__[k] for k in obj.__dict__ if not k.startswith('_')} for k, v in ( (p, getattr(obj, p)) for p, _ in inspect.getmembers( obj.__class__, lambda x: isinstance(x, property)) ): out[k] = v return out
Return a JSON serializable type for ``o``. Args: obj (:py:class:`object`): the object to be serialized. Raises: :py:class:`AttributeError`: when ``o`` is not a Python object. Returns: (dict): JSON serializable type for the given object.
def process_request(request): if 'HTTP_X_OPERAMINI_FEATURES' in request.META: request.mobile = True return None if 'HTTP_ACCEPT' in request.META: s = request.META['HTTP_ACCEPT'].lower() if 'application/vnd.wap.xhtml+xml' in s: request.mobile = True return None if 'HTTP_USER_AGENT' in request.META: s = request.META['HTTP_USER_AGENT'].lower() for ua in search_strings: if ua in s: if not ignore_user_agent(s): request.mobile = True if MOBI_DETECT_TABLET: request.tablet = _is_tablet(s) return None request.mobile = False request.tablet = False return None
Adds a "mobile" attribute to the request which is True or False depending on whether the request should be considered to come from a small-screen device such as a phone or a PDA
def db_list(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ret = {} query = ( 'SELECT datname as "Name", pga.rolname as "Owner", ' 'pg_encoding_to_char(encoding) as "Encoding", ' 'datcollate as "Collate", datctype as "Ctype", ' 'datacl as "Access privileges", spcname as "Tablespace" ' 'FROM pg_database pgd, pg_roles pga, pg_tablespace pgts ' 'WHERE pga.oid = pgd.datdba AND pgts.oid = pgd.dattablespace' ) rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) for row in rows: ret[row['Name']] = row ret[row['Name']].pop('Name') return ret
Return dictionary with information about databases of a Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.db_list
def _get_esxcluster_proxy_details(): det = __salt__['esxcluster.get_details']() return det.get('vcenter'), det.get('username'), det.get('password'), \ det.get('protocol'), det.get('port'), det.get('mechanism'), \ det.get('principal'), det.get('domain'), det.get('datacenter'), \ det.get('cluster')
Returns the running esxcluster's proxy details
def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'): if isinstance(s, unicode): return s try: return s.decode(encoding) except UnicodeError: return s.decode(fallback)
Convert a string to unicode if it isn't already.
def waitForEvent(self, event_name, predicate, timeout=DEFAULT_TIMEOUT): deadline = time.time() + timeout while time.time() <= deadline: rpc_timeout = deadline - time.time() if rpc_timeout < 0: break rpc_timeout = min(rpc_timeout, MAX_TIMEOUT) try: event = self.waitAndGet(event_name, rpc_timeout) except TimeoutError: break if predicate(event): return event raise TimeoutError( self._ad, 'Timed out after %ss waiting for an "%s" event that satisfies the ' 'predicate "%s".' % (timeout, event_name, predicate.__name__))
Wait for an event of a specific name that satisfies the predicate. This call will block until the expected event has been received or time out. The predicate function defines the condition the event is expected to satisfy. It takes an event and returns True if the condition is satisfied, False otherwise. Note all events of the same name that are received but don't satisfy the predicate will be discarded and not be available for further consumption. Args: event_name: string, the name of the event to wait for. predicate: function, a function that takes an event (dictionary) and returns a bool. timeout: float, default is 120s. Returns: dictionary, the event that satisfies the predicate if received. Raises: TimeoutError: raised if no event that satisfies the predicate is received after timeout seconds.
def query_balance(self, asset: str, b58_address: str) -> int: raw_address = Address.b58decode(b58_address).to_bytes() contract_address = self.get_asset_address(asset) invoke_code = build_native_invoke_code(contract_address, b'\x00', "balanceOf", raw_address) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) try: balance = ContractDataParser.to_int(response['Result']) return balance except SDKException: return 0
This interface is used to query the account's ONT or ONG balance. :param asset: a string which is used to indicate which asset we want to check the balance. :param b58_address: a base58 encode account address. :return: account balance.
def set_forbidden_uptodate(self, uptodate): if self._forbidden_uptodate == uptodate: return self._forbidden_uptodate = uptodate self.invalidateFilter()
Set all forbidden uptodate values :param uptodatees: a list with forbidden uptodate values :uptodate uptodatees: list :returns: None :ruptodate: None :raises: None
def update_role(u_name, newprivilege): entry = TabMember.update( role=newprivilege ).where(TabMember.user_name == u_name) try: entry.execute() return True except: return False
Update the role of the usr.
def get_version(filepath='src/birding/version.py'): with open(get_abspath(filepath)) as version_file: return re.search( r, version_file.read()).group('version')
Get version without import, which avoids dependency issues.
def compare_checkpoints(self, attr_mean): if self._cmp_greater and attr_mean > self.best_checkpoint_attr_value: return True elif (not self._cmp_greater and attr_mean < self.best_checkpoint_attr_value): return True return False
Compares two checkpoints based on the attribute attr_mean param. Greater than is used by default. If command-line parameter checkpoint_score_attr starts with "min-" less than is used. Arguments: attr_mean: mean of attribute value for the current checkpoint Returns: True: when attr_mean is greater than previous checkpoint attr_mean and greater than function is selected when attr_mean is less than previous checkpoint attr_mean and less than function is selected False: when attr_mean is not in alignment with selected cmp fn
def add_interface(self, interface): if not isinstance(interface, Interface): raise TypeError self._interfaces[interface.name] = interface
Manually add or overwrite an interface definition from an Interface object. :param interface: an Interface() object
def update_lbaas_member(self, lbaas_member, lbaas_pool, body=None): return self.put(self.lbaas_member_path % (lbaas_pool, lbaas_member), body=body)
Updates a lbaas_member.
def substr(self, name, start=None, size=None): if start is not None and size is not None: start = get_integer('start', start) size = get_integer('size', size) return self.execute_command('substr', name, start, size) elif start is not None: start = get_integer('start', start) return self.execute_command('substr', name, start) return self.execute_command('substr', name)
Return a substring of the string at key ``name``. ``start`` and ``size`` are 0-based integers specifying the portion of the string to return. Like **Redis.SUBSTR** :param string name: the key name :param int start: Optional, the offset of first byte returned. If start is negative, the returned string will start at the start'th character from the end of string. :param int size: Optional, number of bytes returned. If size is negative, then that many characters will be omitted from the end of string. :return: The extracted part of the string. :rtype: string >>> ssdb.set('str_test', 'abc12345678') True >>> ssdb.substr('str_test', 2, 4) 'c123' >>> ssdb.substr('str_test', -2, 2) '78' >>> ssdb.substr('str_test', 1, -1) 'bc1234567'
def start(self): if self.closed: raise ConnectionClosed() self.read_watcher.start() if self.write == self.buffered_write: self.write_watcher.start()
Start watching the socket.
def _segment_with_tokens(text, tokens): list_form = [] text_ptr = 0 for token in tokens: inter_token_string = [] while not text[text_ptr:].startswith(token): inter_token_string.append(text[text_ptr]) text_ptr += 1 if text_ptr >= len(text): raise ValueError("Tokenization produced tokens that do not belong in string!") text_ptr += len(token) if inter_token_string: list_form.append(''.join(inter_token_string)) list_form.append(token) if text_ptr < len(text): list_form.append(text[text_ptr:]) return list_form
Segment a string around the tokens created by a passed-in tokenizer
def get_linode_id_from_name(name): nodes = _query('linode', 'list')['DATA'] linode_id = '' for node in nodes: if name == node['LABEL']: linode_id = node['LINODEID'] return linode_id if not linode_id: raise SaltCloudNotFound( 'The specified name, {0}, could not be found.'.format(name) )
Returns the Linode ID for a VM from the provided name. name The name of the Linode from which to get the Linode ID. Required.
def create(model_config, path, num_workers, batch_size, augmentations=None, tta=None): if not os.path.isabs(path): path = model_config.project_top_dir(path) train_path = os.path.join(path, 'train') valid_path = os.path.join(path, 'valid') train_ds = ImageDirSource(train_path) val_ds = ImageDirSource(valid_path) return TrainingData( train_ds, val_ds, num_workers=num_workers, batch_size=batch_size, augmentations=augmentations, )
Create an ImageDirSource with supplied arguments
def _none_value(self): if self.out_type == int: return 0 elif self.out_type == float: return 0.0 elif self.out_type == bool: return False elif self.out_type == six.text_type: return u''
Get an appropriate "null" value for this field's type. This is used internally when setting the field to None.
def remove_duplicate_edges_undirected(udg): lookup = {} edges = sorted(udg.get_all_edge_ids()) for edge_id in edges: e = udg.get_edge(edge_id) tpl_a = e['vertices'] tpl_b = (tpl_a[1], tpl_a[0]) if tpl_a in lookup or tpl_b in lookup: udg.delete_edge_by_id(edge_id) else: lookup[tpl_a] = edge_id lookup[tpl_b] = edge_id
Removes duplicate edges from an undirected graph.
def descendents(self, cls): if cls.cls == type or not hasattr(cls.cls, '__subclasses__'): return [] downs = cls.cls.__subclasses__() return list(map(lambda c: self.find_class(c), downs))
Returns a descendent list of documentation objects for `cls`, which must be a documentation object. The list will contain objects belonging to `pydoc.Class` or `pydoc.External`. Objects belonging to the former are exported classes either in this module or in one of its sub-modules.
def _get_category_from_pars_var(template_var, context): cat = template_var.resolve(context) if isinstance(cat, basestring): cat = Category.objects.get_by_tree_path(cat) return cat
get category from template variable or from tree_path
def after(self, existing_fn, new_fn): self.warn_if_function_not_registered(new_fn) try: index = self._stack.index(existing_fn) self._stack.insert(index + 1, new_fn) except ValueError as e: six.raise_from(BaseLunrException("Cannot find existing_fn"), e)
Adds a single function after a function that already exists in the pipeline.
def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT): if len(self.segments) > 0: return self.segments[0].points[0].time.strftime(name_format) + ".gpx" else: return "EmptyTrack"
Generates a name for the track The name is generated based on the date of the first point of the track, or in case it doesn't exist, "EmptyTrack" Args: name_format (str, optional): Name formar to give to the track, based on its start time. Defaults to DEFAULT_FILE_NAME_FORMAT Returns: str
def make_context(self, **kwargs): self.check_schema() return Context(self.driver, self.config, **kwargs)
Create a new context for reading data
def only_manager(self): assert len(self.managers) == 1, MULTIPLE_MANAGERS_MESSAGE return list(self.managers.values())[0]
Convience accessor for tests and contexts with sole manager.
def reply_message(self, reply_token, messages, timeout=None): if not isinstance(messages, (list, tuple)): messages = [messages] data = { 'replyToken': reply_token, 'messages': [message.as_json_dict() for message in messages] } self._post( '/v2/bot/message/reply', data=json.dumps(data), timeout=timeout )
Call reply message API. https://devdocs.line.me/en/#reply-message Respond to events from users, groups, and rooms. Webhooks are used to notify you when an event occurs. For events that you can respond to, a replyToken is issued for replying to messages. Because the replyToken becomes invalid after a certain period of time, responses should be sent as soon as a message is received. Reply tokens can only be used once. :param str reply_token: replyToken received via webhook :param messages: Messages. Max: 5 :type messages: T <= :py:class:`linebot.models.send_messages.SendMessage` | list[T <= :py:class:`linebot.models.send_messages.SendMessage`] :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float)
def _loadable_models(): classes = [ pyphi.Direction, pyphi.Network, pyphi.Subsystem, pyphi.Transition, pyphi.labels.NodeLabels, pyphi.models.Cut, pyphi.models.KCut, pyphi.models.NullCut, pyphi.models.Part, pyphi.models.Bipartition, pyphi.models.KPartition, pyphi.models.Tripartition, pyphi.models.RepertoireIrreducibilityAnalysis, pyphi.models.MaximallyIrreducibleCauseOrEffect, pyphi.models.MaximallyIrreducibleCause, pyphi.models.MaximallyIrreducibleEffect, pyphi.models.Concept, pyphi.models.CauseEffectStructure, pyphi.models.SystemIrreducibilityAnalysis, pyphi.models.ActualCut, pyphi.models.AcRepertoireIrreducibilityAnalysis, pyphi.models.CausalLink, pyphi.models.Account, pyphi.models.AcSystemIrreducibilityAnalysis ] return {cls.__name__: cls for cls in classes}
A dictionary of loadable PyPhi models. These are stored in this function (instead of module scope) to resolve circular import issues.
def _get_port_profile_id(self, request): port_profile_id = request.path.split("/")[-1].strip() if uuidutils.is_uuid_like(port_profile_id): LOG.debug("The instance id was found in request path.") return port_profile_id LOG.debug("Failed to get the instance id from the request.") return None
Get the port profile ID from the request path.
def _dispatch_trigger(self, msg): if not msg.args[0].startswith(self.trigger_char): return split_args = msg.args[0].split() trigger = split_args[0].lstrip(self.trigger_char) if trigger in self.triggers: method = getattr(self, trigger) if msg.command == PRIVMSG: if msg.dst == self.irc.nick: if EVT_PRIVATE in self.triggers[trigger]: msg.event = EVT_PRIVATE method(msg) else: if EVT_PUBLIC in self.triggers[trigger]: msg.event = EVT_PUBLIC method(msg) elif (msg.command == NOTICE) and (EVT_NOTICE in self.triggers[trigger]): msg.event = EVT_NOTICE method(msg)
Dispatches the message to the corresponding method.
def apply_computation(cls, state: BaseState, message: Message, transaction_context: BaseTransactionContext) -> 'BaseComputation': with cls(state, message, transaction_context) as computation: if message.code_address in computation.precompiles: computation.precompiles[message.code_address](computation) return computation show_debug2 = computation.logger.show_debug2 for opcode in computation.code: opcode_fn = computation.get_opcode_fn(opcode) if show_debug2: computation.logger.debug2( "OPCODE: 0x%x (%s) | pc: %s", opcode, opcode_fn.mnemonic, max(0, computation.code.pc - 1), ) try: opcode_fn(computation=computation) except Halt: break return computation
Perform the computation that would be triggered by the VM message.
def value_as_datetime(self): if self.value is None: return None v1, v2 = self.value if isinstance(v1, numbers.Number): d1 = datetime.utcfromtimestamp(v1 / 1000) else: d1 = v1 if isinstance(v2, numbers.Number): d2 = datetime.utcfromtimestamp(v2 / 1000) else: d2 = v2 return d1, d2
Convenience property to retrieve the value tuple as a tuple of datetime objects.
def remove(cls, id): api = Client.instance().api api.index(id).delete()
Deletes an index with id :param id string/document-handle
def peaks(data, method='max', axis='time', limits=None): idx_axis = data.index_of(axis) output = data._copy() output.axis.pop(axis) for trl in range(data.number_of('trial')): values = data.axis[axis][trl] dat = data(trial=trl) if limits is not None: limits = (values < limits[0]) | (values > limits[1]) idx = [slice(None)] * len(data.list_of_axes) idx[idx_axis] = limits dat[idx] = nan if method == 'max': peak_val = nanargmax(dat, axis=idx_axis) elif method == 'min': peak_val = nanargmin(dat, axis=idx_axis) output.data[trl] = values[peak_val] return output
Return the values of an index where the data is at max or min Parameters ---------- method : str, optional 'max' or 'min' axis : str, optional the axis where you want to detect the peaks limits : tuple of two values, optional the lowest and highest limits where to search for the peaks data : instance of Data one of the datatypes Returns ------- instance of Data with one dimension less that the input data. The actual values in the data can be not-numberic, for example, if you look for the max value across electrodes Notes ----- This function is useful when you want to find the frequency value at which the power is the largest, or to find the time point at which the signal is largest, or the channel at which the activity is largest.
def make_ical(self, csv_configs=None): csv_configs = self._generate_configs_from_default(csv_configs) self.cal = Calendar() for row in self.csv_data: event = Event() event.add('summary', row[csv_configs['CSV_NAME']]) event.add('dtstart', row[csv_configs['CSV_START_DATE']]) event.add('dtend', row[csv_configs['CSV_END_DATE']]) event.add('description', row[csv_configs['CSV_DESCRIPTION']]) event.add('location', row[csv_configs['CSV_LOCATION']]) self.cal.add_component(event) return self.cal
Make iCal entries
def all_equal(arg1,arg2): if all(hasattr(el, '_infinitely_iterable') for el in [arg1,arg2]): return arg1==arg2 try: return all(a1 == a2 for a1, a2 in zip(arg1, arg2)) except TypeError: return arg1==arg2
Return a single boolean for arg1==arg2, even for numpy arrays using element-wise comparison. Uses all(arg1==arg2) for sequences, and arg1==arg2 otherwise. If both objects have an '_infinitely_iterable' attribute, they are not be zipped together and are compared directly instead.
def is_git(path): try: repo_dir = run_cmd(path, 'git', 'rev-parse', '--git-dir') return True if repo_dir else False except (OSError, RuntimeError): return False
Return True if this is a git repo.
def my_shared_endpoint_list(endpoint_id): client = get_client() ep_iterator = client.my_shared_endpoint_list(endpoint_id) formatted_print(ep_iterator, fields=ENDPOINT_LIST_FIELDS)
Executor for `globus endpoint my-shared-endpoint-list`
def find_template(self, name): deftemplate = lib.EnvFindDeftemplate(self._env, name.encode()) if deftemplate == ffi.NULL: raise LookupError("Template '%s' not found" % name) return Template(self._env, deftemplate)
Find the Template by its name.
def _match_directories(self, entries, root, regex_string): self.log(u"Matching directory names in paged hierarchy") self.log([u"Matching within '%s'", root]) self.log([u"Matching regex '%s'", regex_string]) regex = re.compile(r"" + regex_string) directories = set() root_len = len(root) for entry in entries: if entry.startswith(root): self.log([u"Examining '%s'", entry]) entry = entry[root_len + 1:] entry_splitted = entry.split(os.sep) if ((len(entry_splitted) >= 2) and (re.match(regex, entry_splitted[0]) is not None)): directories.add(entry_splitted[0]) self.log([u"Match: '%s'", entry_splitted[0]]) else: self.log([u"No match: '%s'", entry]) return sorted(directories)
Match directory names in paged hierarchies. Example: :: root = /foo/bar regex_string = [0-9]+ /foo/bar/ 1/ bar baz 2/ bar 3/ foo => ["/foo/bar/1", "/foo/bar/2", "/foo/bar/3"] :param list entries: the list of entries (paths) of a container :param string root: the root directory to search within :param string regex_string: regex string to match directory names :rtype: list of matched directories
def _parse_response(self, response, target_object=strack): objects = json.loads(response.read().decode("utf-8")) list = [] for obj in objects: list.append(target_object(obj, client=self.client)) return list
Generic response parser method
def check_counts(self): if "counts" in re.split("[/.]", self.endpoint): logger.info("disabling tweet parsing due to counts API usage") self._tweet_func = lambda x: x
Disables tweet parsing if the count API is used.
def _getrsyncoptions(self): ignores = list(self.DEFAULT_IGNORES) ignores += self.config.option.rsyncignore ignores += self.config.getini("rsyncignore") return {"ignores": ignores, "verbose": self.config.option.verbose}
Get options to be passed for rsync.
def extract(dump_files, extractors=ALL_EXTRACTORS): def process_dump(dump, path): for page in dump: if page.namespace != 0: continue else: for cite in extract_cite_history(page, extractors): yield cite return mwxml.map(process_dump, dump_files)
Extracts cites from a set of `dump_files`. :Parameters: dump_files : str | `file` A set of files MediaWiki XML dump files (expects: pages-meta-history) extractors : `list`(`extractor`) A list of extractors to apply to the text :Returns: `iterable` -- a generator of extracted cites
def read(self, n): out = ctypes.create_string_buffer(n) ctypes.windll.kernel32.RtlMoveMemory(out, self.view + self.pos, n) self.pos += n return out.raw
Read n bytes from mapped view.
def get_context(namespace, context_id): context_obj = get_state(context_id, namespace=namespace) if not context_obj: raise ContextError("Context '{}' not found in namespace '{}'".format( context_id, namespace)) return context_obj
Get stored context object.
def main_pred_type(self, value): if value not in operators: value = operator_lkup.get(value) if value: self._main_pred_type = value self.payload['predicate']['type'] = self._main_pred_type else: raise Exception("main predicate combiner not a valid operator")
set main predicate combination type :param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``), ``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``), ``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like``
def get_diff_coeff(hvec, n=1): hvec = np.array(hvec, dtype=np.float) acc = len(hvec) exp = np.column_stack([np.arange(acc)]*acc) a = np.vstack([hvec] * acc) ** exp b = np.zeros(acc) b[n] = factorial(n) return np.linalg.solve(a, b)
Helper function to find difference coefficients of an derivative on an arbitrary mesh. Args: hvec (1D array-like): sampling stencil n (int): degree of derivative to find
def convert_namespaces_str( bel_str: str, api_url: str = None, namespace_targets: Mapping[str, List[str]] = None, canonicalize: bool = False, decanonicalize: bool = False, ) -> str: matches = re.findall(r'([A-Z]+:"(?:\\.|[^"\\])*"|[A-Z]+:(?:[^\),\s]+))', bel_str) for nsarg in matches: if "DEFAULT:" in nsarg: continue updated_nsarg = convert_nsarg( nsarg, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) if updated_nsarg != nsarg: bel_str = bel_str.replace(nsarg, updated_nsarg) return bel_str
Convert namespace in string Uses a regex expression to extract all NSArgs and replace them with the updated NSArg from the BEL.bio API terms endpoint. Args: bel_str (str): bel statement string or partial string (e.g. subject or object) api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1 namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example canonicalize (bool): use canonicalize endpoint/namespace targets decanonicalize (bool): use decanonicalize endpoint/namespace targets Results: str: bel statement with namespaces converted
def update_script_from_item(self, item): script, path_to_script, script_item = item.get_script() dictator = list(script_item.to_dict().values())[0] for instrument in list(script.instruments.keys()): script.instruments[instrument]['settings'] = dictator[instrument]['settings'] del dictator[instrument] for sub_script_name in list(script.scripts.keys()): sub_script_item = script_item.get_subscript(sub_script_name) self.update_script_from_item(sub_script_item) del dictator[sub_script_name] script.update(dictator) script.data_path = self.gui_settings['data_folder']
updates the script based on the information provided in item Args: script: script to be updated item: B26QTreeItem that contains the new settings of the script
def _is_numeric(self, values): if len(values) > 0: assert isinstance(values[0], (float, int)), \ "values must be numbers to perform math operations. Got {}".format( type(values[0])) return True
Check to be sure values are numbers before doing numerical operations.
def force_process_ordered(self): for instance_id, messages in self.replicas.take_ordereds_out_of_turn(): num_processed = 0 for message in messages: self.try_processing_ordered(message) num_processed += 1 logger.info('{} processed {} Ordered batches for instance {} ' 'before starting catch up' .format(self, num_processed, instance_id))
Take any messages from replica that have been ordered and process them, this should be done rarely, like before catchup starts so a more current LedgerStatus can be sent. can be called either 1. when node is participating, this happens just before catchup starts so the node can have the latest ledger status or 2. when node is not participating but a round of catchup is about to be started, here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks
def __get_precipfc_data(latitude, longitude): url = 'https://gpsgadget.buienradar.nl/data/raintext?lat={}&lon={}' url = url.format( round(latitude, 2), round(longitude, 2) ) result = __get_url(url) return result
Get buienradar forecasted precipitation.
def get_results_from_passive(self, scheduler_instance_id): if not self.schedulers: logger.debug("I do not have any scheduler: %s", self.schedulers) return [] scheduler_link = None for link in list(self.schedulers.values()): if scheduler_instance_id == link.instance_id: scheduler_link = link break else: logger.warning("I do not know this scheduler: %s", scheduler_instance_id) return [] logger.debug("Get results for the scheduler: %s", scheduler_instance_id) ret, scheduler_link.wait_homerun = scheduler_link.wait_homerun, {} logger.debug("Results: %s" % (list(ret.values())) if ret else "No results available") return list(ret.values())
Get executed actions results from a passive satellite for a specific scheduler :param scheduler_instance_id: scheduler id :type scheduler_instance_id: int :return: Results list :rtype: list
def _init_idxs_int(self, usr_hdrs): self.idxs_int = [ Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.int_hdrs]
List of indexes whose values will be ints.