Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
384,000
def t_ARTICLEHEADER(self, token): ur number = token.lexer.lexmatch.group("number").decode("utf8") newtag = token.lexer.lexmatch.group("newtag").decode("utf8") oldtag = token.lexer.lexmatch.group("oldtag").decode("utf8") name = token.lexer.lexmatch.group("name").decode("utf8") sep = token.lexer.lexmatch.group("sep").decode("utf8") title = token.lexer.lexmatch.group("title").decode("utf8") token.value = (number, newtag, oldtag, name, title, sep) token.lexer.lineno += 1 return token
ur'\#\#\s+<article-(?P<number>[A-Z0-9]+)><(?P<newtag>[a-zA-Z0-9-]+)><(?P<oldtag>[a-zA-Z0-9-]+)>[ ]*(?P<name>[^\<]+?)(?P<sep>:\s|\xef\xbc\x9a)(?P<title>[^<\n]+)\n
384,001
def _dispatch(self, textgroup, directory): self.dispatcher.dispatch(textgroup, path=directory)
Sparql dispatcher do not need to dispatch works, as the link is DB stored through Textgroup :param textgroup: A Textgroup object :param directory: The path in which we found the textgroup :return:
384,002
def get_page(self, target_url): response = self._version.domain.twilio.request( , target_url, ) return SyncListPage(self._version, response, self._solution)
Retrieve a specific page of SyncListInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage
384,003
def connections(self): if not self.__connections: self.__connections = Connections( self.__connection) return self.__connections
Gets the Connections API client. Returns: Connections:
384,004
def find_py_files(srctree, ignore=None): if not os.path.isdir(srctree): yield os.path.split(srctree) for srcpath, _, fnames in os.walk(srctree): if ignore is not None and ignore in srcpath: continue for fname in (x for x in fnames if x.endswith()): yield srcpath, fname
Return all the python files in a source tree Ignores any path that contains the ignore string This is not used by other class methods, but is designed to be used in code that uses this class.
384,005
def auto_newline(buffer): r insert_text = buffer.insert_text if buffer.document.current_line_after_cursor: insert_text() else: current_line = buffer.document.current_line_before_cursor.rstrip() insert_text() unindent = current_line.rstrip().endswith() current_line2 = current_line[4:] if unindent else current_line for c in current_line2: if c.isspace(): insert_text(c) else: break if current_line[-1:] == : for x in range(4): insert_text()
r""" Insert \n at the cursor position. Also add necessary padding.
384,006
def save_weights_from_checkpoint(input_checkpoint, output_path, conv_var_names=None, conv_transpose_var_names=None): check_input_checkpoint(input_checkpoint) with tf.Session() as sess: restore_from_checkpoint(sess, input_checkpoint) save_weights(sess, output_path, conv_var_names=conv_var_names, conv_transpose_var_names=conv_transpose_var_names)
Save the weights of the trainable variables given a checkpoint, each one in a different file in output_path.
384,007
def compile(self, source, dest, is_two_file=True, post=None, lang=None): makedirs(os.path.dirname(dest)) with io.open(dest, "w+", encoding="utf8") as out_file: with io.open(source, "r", encoding="utf8") as in_file: data = in_file.read() data, shortcode_deps = self.compile_string(data, source, is_two_file, post, lang) out_file.write(data) if post is None: if shortcode_deps: self.logger.error( "Cannot save dependencies for post {0} (post unknown)", source) else: post._depfile[dest] += shortcode_deps return True
Compile the docstring into HTML and save as dest.
384,008
def create_cookie(host, path, secure, expires, name, value): return http.cookiejar.Cookie(0, name, value, None, False, host, host.startswith(), host.startswith(), path, True, secure, expires, False, None, None, {})
Shortcut function to create a cookie
384,009
def ellipticity(self): eig = np.linalg.eig(self.field_hessian) eig.sort() return eig[0]/eig[1] - 1
Most meaningful for bond critical points, can be physically interpreted as e.g. degree of pi-bonding in organic molecules. Consult literature for more information. :return:
384,010
def getParameters(self, emailAddress): if emailAddress is not None: address = emailAddress.address else: address = u return [ liveform.Parameter(, liveform.TEXT_INPUT, _normalizeWhitespace, , default=address)]
Return a C{list} of one L{LiveForm} parameter for editing an L{EmailAddress}. @type emailAddress: L{EmailAddress} or C{NoneType} @param emailAddress: If not C{None}, an existing contact item from which to get the email address default value. @rtype: C{list} @return: The parameters necessary for specifying an email address.
384,011
def remove_service(self, service): uid = api.get_uid(service) services = self.getAnalyses() num_services = len(services) services = [item for item in services if item.get("service_uid", "") != uid] removed = len(services) < num_services self.setAnalyses(services) settings = self.getAnalysisServicesSettings() settings = [item for item in settings if item.get(, ) != uid] self.setAnalysisServicesSettings(settings) return removed
Removes the service passed in from the services offered by the current Template. If the Analysis Service passed in is not assigned to this Analysis Template, returns False. :param service: the service to be removed from this AR Template :type service: AnalysisService :return: True if the AnalysisService has been removed successfully
384,012
def _get_on_trixel_sources_from_database_query( self): self.log.debug( ) tableName = self.tableName raCol = self.raCol decCol = self.decCol radiusArc = self.radius radius = self.radius / (60. * 60.) trixelArray = self._get_trixel_ids_that_overlap_conesearch_circles() if trixelArray.size > 50000 and self.htmDepth == 16: self.htmDepth = 13 self.mesh = HTM( depth=self.htmDepth, log=self.log ) trixelArray = self._get_trixel_ids_that_overlap_conesearch_circles() if trixelArray.size > 50000 and self.htmDepth == 13: self.htmDepth = 10 self.mesh = HTM( depth=self.htmDepth, log=self.log ) trixelArray = self._get_trixel_ids_that_overlap_conesearch_circles() htmLevel = "htm%sID" % self.htmDepth if trixelArray.size > 150000: self.log.info( "Your search radius of the `%(tableName)s` table may be too large (%(radiusArc)s arcsec)" % locals()) minID = np.min(trixelArray) maxID = np.max(trixelArray) htmWhereClause = "where %(htmLevel)s between %(minID)s and %(maxID)s " % locals( ) else: thesHtmIds = ",".join(np.array(map(str, trixelArray))) htmWhereClause = "where %(htmLevel)s in (%(thesHtmIds)s)" % locals( ) cols = self.columns[:] if cols != "*" and raCol.lower() not in cols.lower(): cols += ", " + raCol if cols != "*" and decCol.lower() not in cols.lower(): cols += ", " + decCol if self.distinct: sqlQuery = % locals( ) else: sqlQuery = % locals( ) if self.sqlWhere and len(self.sqlWhere): sqlQuery += " and " + self.sqlWhere self.log.debug( ) return sqlQuery
*generate the mysql query before executing it*
384,013
def purge_bucket(context, provider, **kwargs): session = get_session(provider.region) if kwargs.get(): bucket_name = kwargs[] else: if kwargs.get(): value = kwargs[] handler = OutputLookup.handle elif kwargs.get(): value = kwargs[] handler = RxrefLookup.handle elif kwargs.get(): value = kwargs[] handler = XrefLookup.handle else: LOGGER.fatal() return False try: LOGGER.info("%s S3 bucket appears to have already been deleted...", bucket_name) return True raise bucket = s3_resource.Bucket(bucket_name) bucket.object_versions.delete() return True
Delete objects in bucket.
384,014
def code_to_session(self, js_code): return self._get( , params={ : self.appid, : self.secret, : js_code, : } )
登录凭证校验。通过 wx.login() 接口获得临时登录凭证 code 后传到开发者服务器调用此接口完成登录流程。更多使用方法详见 小程序登录 详情请参考 https://developers.weixin.qq.com/miniprogram/dev/api/code2Session.html :param js_code: :return:
384,015
def sample(self, bqm, chain_strength=1.0, chain_break_fraction=True, **parameters): child = self.child __, __, target_adjacency = child.structure embedding = self.embedding bqm_embedded = embed_bqm(bqm, embedding, target_adjacency, chain_strength=chain_strength, smear_vartype=dimod.SPIN) if in parameters: parameters[] = _embed_state(embedding, parameters[]) response = child.sample(bqm_embedded, **parameters) return unembed_sampleset(response, embedding, source_bqm=bqm, chain_break_fraction=chain_break_fraction)
Sample from the provided binary quadratic model. Also set parameters for handling a chain, the set of vertices in a target graph that represents a source-graph vertex; when a D-Wave system is the sampler, it is a set of qubits that together represent a variable of the binary quadratic model being minor-embedded. Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. chain_strength (float, optional, default=1.0): Magnitude of the quadratic bias (in SPIN-space) applied between variables to create chains. The energy penalty of chain breaks is 2 * `chain_strength`. chain_break_fraction (bool, optional, default=True): If True, the unembedded response contains a ‘chain_break_fraction’ field that reports the fraction of chains broken before unembedding. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :class:`dimod.SampleSet`: A `dimod` :obj:`~dimod.SampleSet` object. Examples: This example submits an triangle-structured problem to a D-Wave solver, selected by the user's default :std:doc:`D-Wave Cloud Client configuration file <cloud-client:intro>`, using a specified minor-embedding of the problem’s variables to physical qubits. >>> from dwave.system.samplers import DWaveSampler >>> from dwave.system.composites import FixedEmbeddingComposite >>> import dimod ... >>> sampler = FixedEmbeddingComposite(DWaveSampler(), {'a': [0, 4], 'b': [1, 5], 'c': [2, 6]}) >>> response = sampler.sample_ising({}, {'ab': 0.5, 'bc': 0.5, 'ca': 0.5}, chain_strength=2) >>> response.first # doctest: +SKIP Sample(sample={'a': 1, 'b': -1, 'c': 1}, energy=-0.5, num_occurrences=1, chain_break_fraction=0.0) See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_ for explanations of technical terms in descriptions of Ocean tools.
384,016
def register(self, model, index_cls=AlgoliaIndex, auto_indexing=None): if self.is_registered(model): raise RegistrationError( .format(model)) if not issubclass(index_cls, AlgoliaIndex): raise RegistrationError( .format(index_cls)) index_obj = index_cls(model, self.client, self.__settings) self.__registered_models[model] = index_obj if (isinstance(auto_indexing, bool) and auto_indexing) or self.__auto_indexing: post_save.connect(self.__post_save_receiver, model) pre_delete.connect(self.__pre_delete_receiver, model) logger.info(, model)
Registers the given model with Algolia engine. If the given model is already registered with Algolia engine, a RegistrationError will be raised.
384,017
def get_homepath(self, ignore_session=False, force_cookieless=False): if not ignore_session and self._session.get("session_id") is not None and self._session.get("cookieless", False): return web.ctx.homepath + "/@" + self._session.get("session_id") + "@" elif not ignore_session and force_cookieless: return web.ctx.homepath + "/@@" else: return web.ctx.homepath
:param ignore_session: Ignore the cookieless session_id that should be put in the URL :param force_cookieless: Force the cookieless session; the link will include the session_creator if needed.
384,018
def hilite(s, ok=True, bold=False): if not term_supports_colors(): return s attr = [] if ok is None: pass elif ok: attr.append() else: attr.append() if bold: attr.append() return % (.join(attr), s)
Return an highlighted version of 'string'.
384,019
def overwrite(self, bs, pos=None): bs = Bits(bs) if not bs.len: return if pos is None: try: pos = self._pos except AttributeError: raise TypeError("overwrite require a bit position for this type.") if pos < 0: pos += self.len if pos < 0 or pos + bs.len > self.len: raise ValueError("Overwrite exceeds boundary of bitstring.") self._overwrite(bs, pos) try: self._pos = pos + bs.len except AttributeError: pass
Overwrite with bs at bit position pos. bs -- The bitstring to overwrite with. pos -- The bit position to begin overwriting from. Raises ValueError if pos < 0 or pos + bs.len > self.len
384,020
def managed(name, source=None, source_hash=, source_hash_name=None, keep_source=True, user=None, group=None, mode=None, attrs=None, template=None, makedirs=False, dir_mode=None, context=None, replace=True, defaults=None, backup=, show_changes=True, create=True, contents=None, tmp_dir=, tmp_ext=, contents_pillar=None, contents_grains=None, contents_newline=True, contents_delimiter=, encoding=None, encoding_errors=, allow_empty=True, follow_symlinks=True, check_cmd=None, skip_verify=False, selinux=None, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False, **kwargs): rt exist, the state will fail. dir_mode If directories are to be created, passing this option specifies the permissions for those directories. If this is not set, directories will be assigned permissions by adding the execute bit to the mode of the files. The default mode for new files and directories corresponds umask of salt process. For existing files and directories ituserdatadeployerid_rsa*Welcome! This system is managed by Salt.strictstrict/etc/dhcp.confdescription "Salt Minion"start on started mountallstop on shutdownrespawnexec salt-minionAdministratorspermsfull_controlAdministratorspermsfull_controls an example using the above ``win_*`` parameters: .. code-block:: yaml create_config_file: file.managed: - name: C:\config\settings.cfg - source: salt://settings.cfg - win_owner: Administrators - win_perms: dev_ops: perms: full_control appuser: perms: - read_attributes - read_ea - create_folders - read_permissions joe_snuffy: perms: read - win_deny_perms: fred_snuffy: perms: full_control - win_inheritance: False envenvchangescommentnameresultDestination file name is requiredThe \ option is not supported on WindowsThe \ option is not supported on WindowsThe \ option is only supported on Linuxseuserserolesetypeserangekeepre not hard-coding the mode, so set it to None mode = None except AttributeError: keep_mode = False mode = salt.utils.files.normalize_mode(mode) contents_count = len( [x for x in (contents, contents_pillar, contents_grains) if x is not None] ) if source and contents_count > 0: return _error( ret, source\contents\ contents_pillar\contents_grains\ ) elif keep_mode and contents_count > 0: return _error( ret, contents\ contents_pillar\contents_grains\ ) elif contents_count > 1: return _error( ret, contents\contents_pillar\ contents_grains\ ) if not source and contents_count == 0 and replace: replace = False log.warning( source\contents\ contents_pillar\contents_grains\ replace\True\ replace\False\ , name ) if in kwargs: ret.setdefault(, []).append( file_mode\ mode\ ) if contents_pillar is not None: if isinstance(contents_pillar, list): list_contents = [] for nextp in contents_pillar: nextc = __salt__[](nextp, __NOT_FOUND, delimiter=contents_delimiter) if nextc is __NOT_FOUND: return _error( ret, .format(nextp) ) list_contents.append(nextc) use_contents = os.linesep.join(list_contents) else: use_contents = __salt__[](contents_pillar, __NOT_FOUND, delimiter=contents_delimiter) if use_contents is __NOT_FOUND: return _error( ret, .format(contents_pillar) ) elif contents_grains is not None: if isinstance(contents_grains, list): list_contents = [] for nextg in contents_grains: nextc = __salt__[](nextg, __NOT_FOUND, delimiter=contents_delimiter) if nextc is __NOT_FOUND: return _error( ret, .format(nextc) ) list_contents.append(nextc) use_contents = os.linesep.join(list_contents) else: use_contents = __salt__[](contents_grains, __NOT_FOUND, delimiter=contents_delimiter) if use_contents is __NOT_FOUND: return _error( ret, .format(contents_grains) ) elif contents is not None: use_contents = contents else: use_contents = None if use_contents is not None: if not allow_empty and not use_contents: if contents_pillar: contents_id = .format(contents_pillar) elif contents_grains: contents_id = .format(contents_grains) else: contents_id = contents\ return _error( ret, .format(contents_id) ) if isinstance(use_contents, six.binary_type) and b in use_contents: contents = use_contents elif isinstance(use_contents, six.text_type) and str() in use_contents: contents = use_contents else: validated_contents = _validate_str_list(use_contents) if not validated_contents: return _error( ret, ) contents = for part in validated_contents: for line in part.splitlines(): contents += line.rstrip().rstrip() + os.linesep if contents_newline and not contents.endswith(os.linesep): contents += os.linesep if template: contents = __salt__[]( contents, template=template, context=context, defaults=defaults, saltenv=__env__) if not isinstance(contents, six.string_types): if in contents: ret[] = contents[] else: ret[] = False if in contents: ret[] = contents[] else: ret[] = return ret user = _test_owner(kwargs, user=user) if salt.utils.platform.is_windows(): if win_owner is None: win_owner = user if user else None ret[] = ( ).format(name) return ret u_check = _check_user(user, group) if u_check: return _error(ret, u_check) if not os.path.isabs(name): return _error( ret, .format(name)) if os.path.isdir(name): ret[] = .format(name) ret[] = False return ret if context is None: context = {} elif not isinstance(context, dict): return _error( ret, ) if defaults and not isinstance(defaults, dict): return _error( ret, ) if not replace and os.path.exists(name): ret_perms = {} if salt.utils.platform.is_windows(): ret = __salt__[]( path=name, ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) else: ret, ret_perms = __salt__[]( name, ret, user, group, mode, attrs, follow_symlinks, seuser=seuser, serole=serole, setype=setype, serange=serange) if __opts__[]: if isinstance(ret_perms, dict) and \ in ret_perms and \ mode != ret_perms[]: ret[] = ( .format(name, mode, ret_perms[])) else: ret[] = .format(name) elif not ret[] and ret[]: ret[] = ( .format(name)) return ret accum_data, _ = _load_accumulators() if name in accum_data: if not context: context = {} context[] = accum_data[name] try: if __opts__[]: try: ret[] = __salt__[]( name, source, source_hash, source_hash_name, user, group, mode, attrs, template, context, defaults, __env__, contents, skip_verify, keep_mode, seuser=seuser, serole=serole, setype=setype, serange=serange, **kwargs ) except CommandExecutionError as exc: ret[] = False ret[] = six.text_type(exc) return ret if salt.utils.platform.is_windows(): try: ret = __salt__[]( path=name, ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith(): ret[] = .format(name) if isinstance(ret[], tuple): ret[], ret[] = ret[] elif ret[]: ret[] = None ret[] = .format(name) if in ret[] and not show_changes: ret[][] = else: ret[] = True ret[] = .format(name) return ret source, source_hash = __salt__[]( source, source_hash, __env__ ) except CommandExecutionError as exc: ret[] = False ret[] = .format(exc) return ret try: sfn, source_sum, comment_ = __salt__[]( name, template, source, source_hash, source_hash_name, user, group, mode, attrs, __env__, context, defaults, skip_verify, **kwargs ) except Exception as exc: ret[] = {} log.debug(traceback.format_exc()) return _error(ret, .format(exc)) tmp_filename = None if check_cmd: tmp_filename = salt.utils.files.mkstemp(suffix=tmp_ext, dir=tmp_dir) if __salt__[](name): try: __salt__[](name, tmp_filename) except Exception as exc: return _error( ret, .format( name, tmp_filename, exc ) ) try: ret = __salt__[]( tmp_filename, sfn, ret, source, source_sum, user, group, mode, attrs, __env__, backup, makedirs, template, show_changes, contents, dir_mode, follow_symlinks, skip_verify, keep_mode, win_owner=win_owner, win_perms=win_perms, win_deny_perms=win_deny_perms, win_inheritance=win_inheritance, win_perms_reset=win_perms_reset, encoding=encoding, encoding_errors=encoding_errors, seuser=seuser, serole=serole, setype=setype, serange=serange, **kwargs) except Exception as exc: ret[] = {} log.debug(traceback.format_exc()) salt.utils.files.remove(tmp_filename) if not keep_source: if not sfn \ and source \ and _urlparse(source).scheme == : sfn = __salt__[](source, __env__) if sfn: salt.utils.files.remove(sfn) return _error(ret, .format(exc)) if ret[]: ret = {: {}, : , : name, : True} check_cmd_opts = {} if in __grains__: check_cmd_opts[] = __grains__[] cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts) if isinstance(cret, dict): ret.update(cret) salt.utils.files.remove(tmp_filename) return ret sfn = tmp_filename else: ret = {: {}, : , : name, : True} if comment_ and contents is None: return _error(ret, comment_) else: try: return __salt__[]( name, sfn, ret, source, source_sum, user, group, mode, attrs, __env__, backup, makedirs, template, show_changes, contents, dir_mode, follow_symlinks, skip_verify, keep_mode, win_owner=win_owner, win_perms=win_perms, win_deny_perms=win_deny_perms, win_inheritance=win_inheritance, win_perms_reset=win_perms_reset, encoding=encoding, encoding_errors=encoding_errors, seuser=seuser, serole=serole, setype=setype, serange=serange, **kwargs) except Exception as exc: ret[] = {} log.debug(traceback.format_exc()) return _error(ret, .format(exc)) finally: if tmp_filename: salt.utils.files.remove(tmp_filename) if not keep_source: if not sfn \ and source \ and _urlparse(source).scheme == : sfn = __salt__[](source, __env__) if sfn: salt.utils.files.remove(sfn)
r''' Manage a given file, this function allows for a file to be downloaded from the salt master and potentially run through a templating system. name The location of the file to manage, as an absolute path. source The source file to download to the minion, this source file can be hosted on either the salt master server (``salt://``), the salt minion local file system (``/``), or on an HTTP or FTP server (``http(s)://``, ``ftp://``). Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials. (see s3.get state documentation) File retrieval from Openstack Swift object storage is supported via swift://container/object_path URLs, see swift.get documentation. For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If source is left blank or None (use ~ in YAML), the file will be created as an empty file and the content will not be managed. This is also the case when a file already exists and the source is undefined; the contents of the file will not be changed or managed. If source is left blank or None, please also set replaced to False to make your intention explicit. If the file is hosted on a HTTP or FTP server then the source_hash argument is also required. A list of sources can also be passed in to provide a default source and a set of fallbacks. The first source in the list that is found to exist will be used and subsequent entries in the list will be ignored. Source list functionality only supports local files and remote files hosted on the salt master server or retrievable via HTTP, HTTPS, or FTP. .. code-block:: yaml file_override_example: file.managed: - source: - salt://file_that_does_not_exist - salt://file_that_exists source_hash This can be one of the following: 1. a source hash string 2. the URI of a file that contains source hash strings The function accepts the first encountered long unbroken alphanumeric string of correct length as a valid hash, in order from most secure to least secure: .. code-block:: text Type Length ====== ====== sha512 128 sha384 96 sha256 64 sha224 56 sha1 40 md5 32 **Using a Source Hash File** The file can contain several checksums for several files. Each line must contain both the file name and the hash. If no file name is matched, the first hash encountered will be used, otherwise the most secure hash with the correct source file name will be used. When using a source hash file the source_hash argument needs to be a url, the standard download urls are supported, ftp, http, salt etc: Example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash The following lines are all supported formats: .. code-block:: text /etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27 sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf ead48423703509d37c4a90e6a0d53e143b6fc268 Debian file type ``*.dsc`` files are also supported. **Inserting the Source Hash in the SLS Data** The source_hash can be specified as a simple checksum, like so: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: 79eef25f9b0b2c642c62b7f737d4f53f .. note:: Releases prior to 2016.11.0 must also include the hash type, like in the below example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f Known issues: If the remote server URL has the hash file as an apparent sub-directory of the source file, the module will discover that it has already cached a directory where a file should be cached. For example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5 source_hash_name When ``source_hash`` refers to a hash file, Salt will try to find the correct hash by matching the filename/URI associated with that hash. By default, Salt will look for the filename being managed. When managing a file at path ``/tmp/foo.txt``, then the following line in a hash file would match: .. code-block:: text acbd18db4cc2f85cedef654fccc4a4d8 foo.txt However, sometimes a hash file will include multiple similar paths: .. code-block:: text 37b51d194a7513e45b56f6524f2d51f2 ./dir1/foo.txt acbd18db4cc2f85cedef654fccc4a4d8 ./dir2/foo.txt 73feffa4b7f6bb68e44cf984c85f6e88 ./dir3/foo.txt In cases like this, Salt may match the incorrect hash. This argument can be used to tell Salt which filename to match, to ensure that the correct hash is identified. For example: .. code-block:: yaml /tmp/foo.txt: file.managed: - source: https://mydomain.tld/dir2/foo.txt - source_hash: https://mydomain.tld/hashes - source_hash_name: ./dir2/foo.txt .. note:: This argument must contain the full filename entry from the checksum file, as this argument is meant to disambiguate matches for multiple files that have the same basename. So, in the example above, simply using ``foo.txt`` would not match. .. versionadded:: 2016.3.5 keep_source : True Set to ``False`` to discard the cached copy of the source file once the state completes. This can be useful for larger files to keep them from taking up space in minion cache. However, keep in mind that discarding the source file will result in the state needing to re-download the source file if the state is run again. .. versionadded:: 2017.7.3 user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion. On Windows, this is ignored mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds to the umask of the salt process. The mode of existing files and directories will only be changed if ``mode`` is specified. .. note:: This option is **not** supported on Windows. .. versionchanged:: 2016.11.0 This option can be set to ``keep``, and Salt will keep the mode from the Salt fileserver. This is only supported when the ``source`` URL begins with ``salt://``, or for files local to the minion. Because the ``source`` option cannot be used with any of the ``contents`` options, setting the ``mode`` to ``keep`` is also incompatible with the ``contents`` options. .. note:: keep does not work with salt-ssh. As a consequence of how the files are transferred to the minion, and the inability to connect back to the master with salt-ssh, salt is unable to stat the file as it exists on the fileserver and thus cannot mirror the mode on the salt-ssh minion attrs The attributes to have on this file, e.g. ``a``, ``i``. The attributes can be any or a combination of the following characters: ``aAcCdDeijPsStTu``. .. note:: This option is **not** supported on Windows. .. versionadded:: 2018.3.0 template If this setting is applied, the named templating engine will be used to render the downloaded file. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` makedirs : False If set to ``True``, then the parent directories will be created to facilitate the creation of the named file. If ``False``, and the parent directory of the destination file doesn't exist, the state will fail. dir_mode If directories are to be created, passing this option specifies the permissions for those directories. If this is not set, directories will be assigned permissions by adding the execute bit to the mode of the files. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. replace : True If set to ``False`` and the file already exists, the file will not be modified even if changes would otherwise be made. Permissions and ownership will still be enforced, however. context Overrides default context variables passed to the template. defaults Default context passed to the template. backup Overrides the default backup mode for this specific file. See :ref:`backup_mode documentation <file-state-backups>` for more details. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create : True If set to ``False``, then the file will only be managed if the file already exists on the system. contents Specify the contents of the file. Cannot be used in combination with ``source``. Ignores hashes and does not use a templating engine. This value can be either a single string, a multiline YAML string or a list of strings. If a list of strings, then the strings will be joined together with newlines in the resulting file. For example, the below two example states would result in identical file contents: .. code-block:: yaml /path/to/file1: file.managed: - contents: - This is line 1 - This is line 2 /path/to/file2: file.managed: - contents: | This is line 1 This is line 2 contents_pillar .. versionadded:: 0.17.0 .. versionchanged:: 2016.11.0 contents_pillar can also be a list, and the pillars will be concatenated together to form one file. Operates like ``contents``, but draws from a value stored in pillar, using the pillar path syntax used in :mod:`pillar.get <salt.modules.pillar.get>`. This is useful when the pillar value contains newlines, as referencing a pillar variable using a jinja/mako template can result in YAML formatting issues due to the newlines causing indentation mismatches. For example, the following could be used to deploy an SSH private key: .. code-block:: yaml /home/deployer/.ssh/id_rsa: file.managed: - user: deployer - group: deployer - mode: 600 - attrs: a - contents_pillar: userdata:deployer:id_rsa This would populate ``/home/deployer/.ssh/id_rsa`` with the contents of ``pillar['userdata']['deployer']['id_rsa']``. An example of this pillar setup would be like so: .. code-block:: yaml userdata: deployer: id_rsa: | -----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAoQiwO3JhBquPAalQF9qP1lLZNXVjYMIswrMe2HcWUVBgh+vY U7sCwx/dH6+VvNwmCoqmNnP+8gTPKGl1vgAObJAnMT623dMXjVKwnEagZPRJIxDy B/HaAre9euNiY3LvIzBTWRSeMfT+rWvIKVBpvwlgGrfgz70m0pqxu+UyFbAGLin+ GpxzZAMaFpZw4sSbIlRuissXZj/sHpQb8p9M5IeO4Z3rjkCP1cxI -----END RSA PRIVATE KEY----- .. note:: The private key above is shortened to keep the example brief, but shows how to do multiline string in YAML. The key is followed by a pipe character, and the multiline string is indented two more spaces. To avoid the hassle of creating an indented multiline YAML string, the :mod:`file_tree external pillar <salt.pillar.file_tree>` can be used instead. However, this will not work for binary files in Salt releases before 2015.8.4. contents_grains .. versionadded:: 2014.7.0 Operates like ``contents``, but draws from a value stored in grains, using the grains path syntax used in :mod:`grains.get <salt.modules.grains.get>`. This functionality works similarly to ``contents_pillar``, but with grains. For example, the following could be used to deploy a "message of the day" file: .. code-block:: yaml write_motd: file.managed: - name: /etc/motd - contents_grains: motd This would populate ``/etc/motd`` file with the contents of the ``motd`` grain. The ``motd`` grain is not a default grain, and would need to be set prior to running the state: .. code-block:: bash salt '*' grains.set motd 'Welcome! This system is managed by Salt.' contents_newline : True .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.4 This option is now ignored if the contents being deployed contain binary data. If ``True``, files managed using ``contents``, ``contents_pillar``, or ``contents_grains`` will have a newline added to the end of the file if one is not present. Setting this option to ``False`` will omit this final newline. contents_delimiter .. versionadded:: 2015.8.4 Can be used to specify an alternate delimiter for ``contents_pillar`` or ``contents_grains``. This delimiter will be passed through to :py:func:`pillar.get <salt.modules.pillar.get>` or :py:func:`grains.get <salt.modules.grains.get>` when retrieving the contents. encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. .. versionadded:: 2017.7.0 encoding_errors : 'strict' Error encoding scheme. Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the list of available schemes. .. versionadded:: 2017.7.0 allow_empty : True .. versionadded:: 2015.8.4 If set to ``False``, then the state will fail if the contents specified by ``contents_pillar`` or ``contents_grains`` are empty. follow_symlinks : True .. versionadded:: 2014.7.0 If the desired path is a symlink follow it and make changes to the file to which the symlink points. check_cmd .. versionadded:: 2014.7.0 The specified command will be run with an appended argument of a *temporary* file containing the new managed contents. If the command exits with a zero status the new managed contents will be written to the managed destination. If the command exits with a nonzero exit code, the state will fail and no changes will be made to the file. For example, the following could be used to verify sudoers before making changes: .. code-block:: yaml /etc/sudoers: file.managed: - user: root - group: root - mode: 0440 - attrs: i - source: salt://sudoers/files/sudoers.jinja - template: jinja - check_cmd: /usr/sbin/visudo -c -f **NOTE**: This ``check_cmd`` functions differently than the requisite ``check_cmd``. tmp_dir Directory for temp file created by ``check_cmd``. Useful for checkers dependent on config file location (e.g. daemons restricted to their own config directories by an apparmor profile). .. code-block:: yaml /etc/dhcp/dhcpd.conf: file.managed: - user: root - group: root - mode: 0755 - tmp_dir: '/etc/dhcp' - contents: "# Managed by Salt" - check_cmd: dhcpd -t -cf tmp_ext Suffix for temp file created by ``check_cmd``. Useful for checkers dependent on config file extension (e.g. the init-checkconf upstart config checker). .. code-block:: yaml /etc/init/test.conf: file.managed: - user: root - group: root - mode: 0440 - tmp_ext: '.conf' - contents: - 'description "Salt Minion"' - 'start on started mountall' - 'stop on shutdown' - 'respawn' - 'exec salt-minion' - check_cmd: init-checkconf -f skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 selinux : None Allows setting the selinux user, role, type, and range of a managed file .. code-block:: yaml /tmp/selinux.test file.managed: - user: root - selinux: seuser: system_u serole: object_r setype: system_conf_t seranage: s0 .. versionadded:: Neon win_owner : None The owner of the directory. If this is not passed, user will be used. If user is not passed, the account under which Salt is running will be used. .. versionadded:: 2017.7.0 win_perms : None A dictionary containing permissions to grant and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. .. versionadded:: 2017.7.0 win_deny_perms : None A dictionary containing permissions to deny and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. .. versionadded:: 2017.7.0 win_inheritance : True True to inherit permissions from the parent directory, False not to inherit permission. .. versionadded:: 2017.7.0 win_perms_reset : False If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. .. versionadded:: 2018.3.0 Here's an example using the above ``win_*`` parameters: .. code-block:: yaml create_config_file: file.managed: - name: C:\config\settings.cfg - source: salt://settings.cfg - win_owner: Administrators - win_perms: # Basic Permissions dev_ops: perms: full_control # List of advanced permissions appuser: perms: - read_attributes - read_ea - create_folders - read_permissions joe_snuffy: perms: read - win_deny_perms: fred_snuffy: perms: full_control - win_inheritance: False
384,021
def size(default_chunk_size, response_time_max, response_time_actual): if response_time_actual == 0: response_time_actual = 1 scale = 1 / (response_time_actual / response_time_max) size = int(default_chunk_size * scale) return min(max(size, 1), default_chunk_size)
Determines the chunk size based on response times.
384,022
def name(random=random, *args, **kwargs): if random.choice([True, True, True, False]): return firstname(random=random) + " " + lastname(random=random) elif random.choice([True, False]): return title(random=random) + " " + firstname(random=random) + " " + lastname(random=random) else: return title(random=random) + " " + lastname(random=random)
Return someone's name >>> mock_random.seed(0) >>> name(random=mock_random) 'carl poopbritches' >>> mock_random.seed(7) >>> name(random=mock_random, capitalize=True) 'Duke Testy Wonderful'
384,023
def fit(self, validation_data=None, **kwargs): callbacks = kwargs.pop(, []) if validation_data is not None: callbacks.insert(0, InferenceRunner( validation_data, ScalarStats(self._stats_to_inference))) self.trainer.train_with_defaults(callbacks=callbacks, **kwargs)
Args: validation_data (DataFlow or InputSource): to be used for inference. The inference callback is added as the first in the callback list. If you need to use it in a different order, please write it in the callback list manually. kwargs: same arguments as :meth:`Trainer.train_with_defaults`.
384,024
def babel_extract(config, input, output, target, keywords): click.echo( click.style( "Starting Extractions config:{0} input:{1} output:{2} keywords:{3}".format( config, input, output, keywords ), fg="green", ) ) keywords = " -k ".join(keywords) os.popen( "pybabel extract -F {0} -k {1} -o {2} {3}".format( config, keywords, output, input ) ) click.echo(click.style("Starting Update target:{0}".format(target), fg="green")) os.popen("pybabel update -N -i {0} -d {1}".format(output, target)) click.echo(click.style("Finish, you can start your translations", fg="green"))
Babel, Extracts and updates all messages marked for translation
384,025
def multiply(dists): if not all([isinstance(k, Distribution) for k in dists]): raise NotImplementedError("Can only multiply Distribution objects") n_delta = np.sum([k.is_delta for k in dists]) min_width = np.max([k.min_width for k in dists]) if n_delta>1: raise ArithmeticError("Cannot multiply more than one delta functions!") elif n_delta==1: delta_dist_ii = np.where([k.is_delta for k in dists])[0][0] delta_dist = dists[delta_dist_ii] new_xpos = delta_dist.peak_pos new_weight = np.prod([k.prob(new_xpos) for k in dists if k!=delta_dist_ii]) * delta_dist.weight res = Distribution.delta_function(new_xpos, weight = new_weight,min_width=min_width) else: new_xmin = np.max([k.xmin for k in dists]) new_xmax = np.min([k.xmax for k in dists]) x_vals = np.unique(np.concatenate([k.x for k in dists])) x_vals = x_vals[(x_vals>new_xmin-TINY_NUMBER)&(x_vals<new_xmax+TINY_NUMBER)] y_vals = np.sum([k.__call__(x_vals) for k in dists], axis=0) peak = y_vals.min() ind = (y_vals-peak)<BIG_NUMBER/1000 n_points = ind.sum() if n_points == 0: print ("ERROR in distribution multiplication: Distributions do not overlap") x_vals = [0,1] y_vals = [BIG_NUMBER,BIG_NUMBER] res = Distribution(x_vals, y_vals, is_log=True, min_width=min_width, kind=) elif n_points == 1: res = Distribution.delta_function(x_vals[0]) else: res = Distribution(x_vals[ind], y_vals[ind], is_log=True, min_width=min_width, kind=, assume_sorted=True) return res
multiplies a list of Distribution objects
384,026
def elevations(self): resources = self.get_resource() elevations = namedtuple(, ) try: return [elevations(resource[]) for resource in resources] except KeyError: return [elevations(resource[]) for resource in resources] except TypeError: try: if isinstance(resources[][], dict): return elevations(resources[][]) except KeyError: offsets = namedtuple(, ) try: if isinstance(resources[][], dict): return offsets(resources[][]) except KeyError: print(KeyError)
Retrieves elevations/offsets from the output response Returns: elevations/offsets (namedtuple): A named tuple of list of elevations/offsets
384,027
def download_needed(self, response, outfile, quiet=True): try: remote_date = datetime.strptime(response.headers[], ) if isfile(outfile): local_date = datetime.fromtimestamp(os.path.getmtime(outfile)) if remote_date <= local_date: if not quiet: print(os.path.basename(outfile) + ) return False except: pass return True
determine if a download is needed based on timestamp. Return True if needed (remote is newer) or False if local is newest. Parameters ========== response: the response from the API outfile: the output file to write to quiet: suppress verbose output (default is True)
384,028
def add_arguments(self, parser): group = parser.add_mutually_exclusive_group(required=True) group.add_argument(, , action=, help=) group.add_argument(, , action=, help=) return self.add_common_arguments(parser, False)
Adds the arguments for the firmware command. Args: self (FirmwareCommand): the ``FirmwareCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None``
384,029
def stop_instance(self, instance_id): self._restore_from_storage(instance_id) if self._start_failed: raise Exception( % instance_id) with self._resource_lock: try: v_m = self._qualified_name_to_vm(instance_id) if not v_m: err = "stop_instance: cant need if self._n_instances == 0: log.debug( ) self._delete_global_reqs() self._delete_cloud_provider_storage() except Exception as exc: log.error(traceback.format_exc()) log.error("error stopping instance %s: %s", instance_id, exc) raise log.debug(, instance_id)
Stops the instance gracefully. :param str instance_id: instance identifier :return: None
384,030
def _update(self, rules: list): self._rules = rules to_store = .join( rule.config_string for rule in rules ) sftp_connection = self._sftp_connection with sftp_connection.open(self.RULE_PATH, mode=) as file_handle: file_handle.write(to_store)
Updates the given rules and stores them on the router.
384,031
def convert(self, request, response, data): qstr = request.query_string return self.escape( % qstr) if qstr else
Performs the desired Conversion. :param request: The webob Request object describing the request. :param response: The webob Response object describing the response. :param data: The data dictionary returned by the prepare() method. :returns: A string, the results of which are the desired conversion.
384,032
def resize_thumbnail(image, size, resample=Image.LANCZOS): img_format = image.format img = image.copy() img.thumbnail((size[0], size[1]), resample) img.format = img_format return img
Resize image according to size. image: a Pillow image instance size: a list of two integers [width, height]
384,033
def direct_horizontal_irradiance(self): analysis_period = AnalysisPeriod(timestep=self.timestep, is_leap_year=self.is_leap_year) header_dhr = Header(data_type=DirectHorizontalIrradiance(), unit=, analysis_period=analysis_period, metadata=self.metadata) direct_horiz = [] sp = Sunpath.from_location(self.location) sp.is_leap_year = self.is_leap_year for dt, dnr in zip(self.datetimes, self.direct_normal_irradiance): sun = sp.calculate_sun_from_date_time(dt) direct_horiz.append(dnr * math.sin(math.radians(sun.altitude))) return HourlyContinuousCollection(header_dhr, direct_horiz)
Returns the direct irradiance on a horizontal surface at each timestep. Note that this is different from the direct_normal_irradiance needed to construct a Wea, which is NORMAL and not HORIZONTAL.
384,034
def get_variance(seq): m = get_mean(seq) return sum((v-m)**2 for v in seq)/float(len(seq))
Batch variance calculation.
384,035
def roc_auc_xlim(x_bla, y_bla, xlim=0.1): x = x_bla[:] y = y_bla[:] x.sort() y.sort() u = {} for i in x + y: u[i] = 1 vals = sorted(u.keys()) len_x = float(len(x)) len_y = float(len(y)) new_x = [] new_y = [] x_p = 0 y_p = 0 for val in vals[::-1]: while len(x) > 0 and x[-1] >= val: x.pop() x_p += 1 while len(y) > 0 and y[-1] >= val: y.pop() y_p += 1 new_y.append((len_x - x_p) / len_x) new_x.append((len_y - y_p) / len_y) new_x = 1 - np.array(new_x) new_y = 1 - np.array(new_y) x = new_x y = new_y if len(x) != len(y): raise ValueError("Unequal!") if not xlim: xlim = 1.0 auc = 0.0 bla = zip(stats.rankdata(x), range(len(x))) bla = sorted(bla, key=lambda x: x[1]) prev_x = x[bla[0][1]] prev_y = y[bla[0][1]] index = 1 while index < len(bla) and x[bla[index][1]] <= xlim: _, i = bla[index] auc += y[i] * (x[i] - prev_x) - ((x[i] - prev_x) * (y[i] - prev_y) / 2.0) prev_x = x[i] prev_y = y[i] index += 1 if index < len(bla): (rank, i) = bla[index] auc += prev_y * (xlim - prev_x) + ((y[i] - prev_y)/(x[i] - prev_x) * (xlim -prev_x) * (xlim - prev_x)/2) return auc
Computes the ROC Area Under Curve until a certain FPR value. Parameters ---------- fg_vals : array_like list of values for positive set bg_vals : array_like list of values for negative set xlim : float, optional FPR value Returns ------- score : float ROC AUC score
384,036
def bag(directory, mets_basename, dest, identifier, in_place, manifestation_depth, mets, base_version_checksum, tag_file, skip_zip, processes): resolver = Resolver() workspace = Workspace(resolver, directory=directory, mets_basename=mets_basename) workspace_bagger = WorkspaceBagger(resolver) workspace_bagger.bag( workspace, dest=dest, ocrd_identifier=identifier, ocrd_manifestation_depth=manifestation_depth, ocrd_mets=mets, ocrd_base_version_checksum=base_version_checksum, processes=processes, tag_files=tag_file, skip_zip=skip_zip, in_place=in_place )
Bag workspace as OCRD-ZIP at DEST
384,037
def find_expectations(self, expectation_type=None, column=None, expectation_kwargs=None, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True, ): match_indexes = self.find_expectation_indexes( expectation_type, column, expectation_kwargs, ) return self._copy_and_clean_up_expectations_from_indexes( match_indexes, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs, )
Find matching expectations within _expectation_config. Args: expectation_type=None : The name of the expectation type to be matched. column=None : The name of the column to be matched. expectation_kwargs=None : A dictionary of kwargs to match against. discard_result_format_kwargs=True : In returned expectation object(s), suppress the `result_format` parameter. discard_include_configs_kwargs=True : In returned expectation object(s), suppress the `include_configs` parameter. discard_catch_exceptions_kwargs=True : In returned expectation object(s), suppress the `catch_exceptions` parameter. Returns: A list of matching expectation objects. If there are no matches, the list will be empty.
384,038
def _reset_suffix_links(self): self._suffix_links_set = False for current, _parent in self.dfs(): current.suffix = None current.dict_suffix = None current.longest_prefix = None
Reset all suffix links in all nodes in this trie.
384,039
def load(self, profile_args): for key, value in profile_args.items(): self.add(key, value)
Load provided CLI Args. Args: args (dict): Dictionary of args in key/value format.
384,040
def next_task(self, item, raise_exceptions=None, **kwargs): filename = os.path.basename(item) batch = self.get_batch(filename) tx_deserializer = self.tx_deserializer_cls( allow_self=self.allow_self, override_role=self.override_role ) try: tx_deserializer.deserialize_transactions( transactions=batch.saved_transactions ) except (DeserializationError, TransactionDeserializerError) as e: raise TransactionsFileQueueError(e) from e else: batch.close() self.archive(filename)
Deserializes all transactions for this batch and archives the file.
384,041
def as_languages(self): langs = [] for culture_code in self.select_related(, ).all(): lang = culture_code.language lang.country = culture_code.country lang.culturecode = culture_code.code langs.append(lang) return langs
Get the Language objects associated with this queryset of CultureCodes as a list. The Language objects will have country and culturecode set. :return:
384,042
def get_prev_step(self, step=None): if step is None: step = self.steps.current form_list = self.get_form_list() key = form_list.keyOrder.index(step) - 1 if key >= 0: return form_list.keyOrder[key] return None
Returns the previous step before the given `step`. If there are no steps available, None will be returned. If the `step` argument is None, the current step will be determined automatically.
384,043
def variablename(var): s=[tpl[0] for tpl in itertools.ifilter(lambda x: var is x[1], globals().items())] s=s[0].upper() return s
Returns the string of a variable name.
384,044
def competition_download_leaderboard(self, id, **kwargs): kwargs[] = True if kwargs.get(): return self.competition_download_leaderboard_with_http_info(id, **kwargs) else: (data) = self.competition_download_leaderboard_with_http_info(id, **kwargs) return data
Download competition leaderboard # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.competition_download_leaderboard(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: Competition name (required) :return: Result If the method is called asynchronously, returns the request thread.
384,045
def havdalah(self): today = HDate(gdate=self.date, diaspora=self.location.diaspora) tomorrow = HDate(gdate=self.date + dt.timedelta(days=1), diaspora=self.location.diaspora) if today.is_shabbat or today.is_yom_tov: if tomorrow.is_shabbat or tomorrow.is_yom_tov: return None return self._havdalah_datetime return None
Return the time for havdalah, or None if not applicable. If havdalah_offset is 0, uses the time for three_stars. Otherwise, adds the offset to the time of sunset and uses that. If it's currently a multi-day YomTov, and the end of the stretch is after today, the havdalah value is defined to be None (to avoid misleading the user that melacha is permitted).
384,046
def extend_substation(grid, critical_stations, grid_level): load_factor_lv_trans_lc_normal = cfg_ding0.get( , ) load_factor_lv_trans_fc_normal = cfg_ding0.get( , ) trafo_params = grid.network._static_data[.format( grid_level=grid_level)] trafo_s_max_max = max(trafo_params[]) for station in critical_stations: if station[][0] > station[][1]: case = lf_lv_trans_normal = load_factor_lv_trans_lc_normal else: case = lf_lv_trans_normal = load_factor_lv_trans_fc_normal s_max_trafos = sum([_.s_max_a for _ in station[]._transformers]) s_trafo_missing = max(station[]) - ( s_max_trafos * lf_lv_trans_normal) extendable_trafos = [_ for _ in station[]._transformers if _.s_max_a < trafo_s_max_max] while (s_trafo_missing > 0) and extendable_trafos: trafo = extendable_trafos[0] trafo_s_max_a_before = trafo.s_max_a extend_trafo_power(extendable_trafos, trafo_params) s_trafo_missing -= ((trafo.s_max_a * lf_lv_trans_normal) - trafo_s_max_a_before) extendable_trafos = [_ for _ in station[]._transformers if _.s_max_a < trafo_s_max_max] if s_trafo_missing > 0: trafo_type, trafo_cnt = select_transformers(grid, s_max={ : s_trafo_missing, : case }) for t in range(0, trafo_cnt): lv_transformer = TransformerDing0( grid=grid, id_db=id, v_level=0.4, s_max_longterm=trafo_type[], r=trafo_type[], x=trafo_type[]) grid._station.add_transformer(lv_transformer) logger.info("{stations_cnt} have been reinforced due to overloading " "issues.".format(stations_cnt=len(critical_stations)))
Reinforce MV or LV substation by exchanging the existing trafo and installing a parallel one if necessary. First, all available transformers in a `critical_stations` are extended to maximum power. If this does not solve all present issues, additional transformers are build. Parameters ---------- grid: GridDing0 Ding0 grid container critical_stations : :any:`list` List of stations with overloading grid_level : str Either "LV" or "MV". Basis to select right equipment. Notes ----- Curently straight forward implemented for LV stations Returns ------- type #TODO: Description of return. Change type in the previous line accordingly
384,047
def add_macd(self,fast_period=12,slow_period=26,signal_period=9,column=None, name=,str=None,**kwargs): if not column: column=self._d[] study={:, :name, :{:fast_period,:slow_period, :signal_period,:column, :str}, :utils.merge_dict({:False,:[,]},kwargs)} study[][]=.format(fast_period,slow_period,signal_period) self._add_study(study)
Add Moving Average Convergence Divergence (MACD) study to QuantFigure.studies Parameters: fast_period : int MACD Fast Period slow_period : int MACD Slow Period signal_period : int MACD Signal Period column :string Defines the data column name that contains the data over which the study will be applied. Default: 'close' name : string Name given to the study str : string Label factory for studies The following wildcards can be used: {name} : Name of the column {study} : Name of the study {period} : Period used Examples: 'study: {study} - period: {period}' kwargs: legendgroup : bool If true, all legend items are grouped into a single one All formatting values available on iplot()
384,048
def _load_manifest_interpret_source(manifest, source, username=None, password=None, verify_certificate=True, do_inherit=True): try: if isinstance(source, string_types): if source.startswith("http"): _load_manifest_from_url(manifest, source, verify_certificate=verify_certificate, username=username, password=password) else: _load_manifest_from_file(manifest, source) if not manifest.has_option(, ): manifest.set(, , str(source)) else: manifest.readfp(source) if manifest.has_option(, ) and do_inherit: parent_manifest = configparser.RawConfigParser() _load_manifest_interpret_source(parent_manifest, manifest.get(, ), username=username, password=password, verify_certificate=verify_certificate) for s in parent_manifest.sections(): for k, v in parent_manifest.items(s): if not manifest.has_option(s, k): manifest.set(s, k, v) except configparser.Error: logger.debug("", exc_info=True) error_message = sys.exc_info()[1] raise ManifestException("Unable to parse manifest!: {0}".format(error_message))
Interpret the <source>, and load the results into <manifest>
384,049
def parse_signature_type_comment(type_comment): try: result = ast3.parse(type_comment, , ) except SyntaxError: raise ValueError(f"invalid function signature type comment: {type_comment!r}") assert isinstance(result, ast3.FunctionType) if len(result.argtypes) == 1: argtypes = result.argtypes[0] else: argtypes = result.argtypes return argtypes, result.returns
Parse the fugly signature type comment into AST nodes. Caveats: ASTifying **kwargs is impossible with the current grammar so we hack it into unary subtraction (to differentiate from Starred in vararg). For example from: "(str, int, *int, **Any) -> 'SomeReturnType'" To: ([ast3.Name, ast.Name, ast3.Name, ast.Name], ast3.Str)
384,050
def RegisterMessageHandler(self, handler, lease_time, limit=1000): self.UnregisterMessageHandler() self.handler_stop = False self.handler_thread = threading.Thread( name="message_handler", target=self._MessageHandlerLoop, args=(handler, lease_time, limit)) self.handler_thread.daemon = True self.handler_thread.start()
Leases a number of message handler requests up to the indicated limit.
384,051
def _complete_statement(self, line: str) -> Statement: while True: try: statement = self.statement_parser.parse(line) if statement.multiline_command and statement.terminator: break if not statement.multiline_command: statement = self.statement_parser.parse_command_only(line) if not statement.multiline_command: raise try: self.at_continuation_prompt = True newline = self.pseudo_raw_input(self.continuation_prompt) if newline == : newline = self.poutput(newline) line = .format(statement.raw, newline) except KeyboardInterrupt as ex: if self.quit_on_sigint: raise ex else: self.poutput() statement = self.statement_parser.parse() break finally: self.at_continuation_prompt = False if not statement.command: raise EmptyStatement() return statement
Keep accepting lines of input until the command is complete. There is some pretty hacky code here to handle some quirks of self.pseudo_raw_input(). It returns a literal 'eof' if the input pipe runs out. We can't refactor it because we need to retain backwards compatibility with the standard library version of cmd.
384,052
def aging_csv(request): animal_list = Animal.objects.all() response = HttpResponse(content_type=) response[] = writer = csv.writer(response) writer.writerow(["Animal", "Strain", "Genotype", "Gender", "Age", "Death", "Alive"]) for animal in animal_list.iterator(): writer.writerow([ animal.MouseID, animal.Strain, animal.Genotype, animal.Gender, animal.age(), animal.Cause_of_Death, animal.Alive ]) return response
This view generates a csv output file of all animal data for use in aging analysis. The view writes to a csv table the animal, strain, genotype, age (in days), and cause of death.
384,053
def create_graph_from_data(self, data, **kwargs): self.arguments[] = str(self.verbose).upper() results = self._run_ccdr(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
Apply causal discovery on observational data using CCDr. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CCDR algorithm.
384,054
def deploy_to(self, displays=None, exclude=[], lock=[]): if displays is None: signs = Sign.objects.all() else: signs = Sign.objects.filter(display__in=displays) for sign in signs.exclude(display__in=exclude): sign.pages.add(self) sign.save()
Deploys page to listed display (specify with display). If display is None, deploy to all display. Can specify exclude for which display to exclude. This overwrites the first argument.
384,055
def standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False): parser = BoolOptionParser() if verbose: parser.add_option(, , action=, dest=, default=0) if quiet: parser.add_option(, , action=, dest=, default=0) if no_interactive: parser.add_option(, action="count", dest="no_interactive", default=0) if interactive: parser.add_option(, , action=, dest=, default=0) if simulate: parser.add_option(, , action=, dest=, default=False) if overwrite: parser.add_option(, , dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser
Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method.
384,056
def main(): usage = "usage: %prog [-u USER] [-p PASSWORD] [-t TITLE] [-s selection] url" parser = OptionParser(usage, version="%prog "+instapaperlib.__version__) parser.add_option("-u", "--user", action="store", dest="user", metavar="USER", help="instapaper username") parser.add_option("-p", "--password", action="store", dest="password", metavar="USER", help="instapaper password") parser.add_option("-t", "--title", action="store", dest="title", metavar="TITLE", help="title of the link to add") parser.add_option("-s", "--selection", action="store", dest="selection", metavar="SELECTION", help="short text for description") (options, args) = parser.parse_args() if not len(args) > 0: parser.error("What do you want to read later?") if not options.user: login = re.compile("(.+?):(.+)") try: config = open(os.path.expanduser("~") + "/.instapaperrc") for line in config: matches = login.match(line) if matches: user = matches.group(1).strip() password = matches.group(2).strip() except IOError: parser.error("No login information present.") sys.exit(-1) else: user = options.user if not options.password: password = getpass() else: password = options.password (status, text) = instapaperlib.add_item(user, password, args[0], options.title, options.selection) print text
main method
384,057
def open(self, user=None, repo=None): s browser' webbrowser.open(self.format_path(repo, namespace=user, rw=False))
Open the URL of a repository in the user's browser
384,058
def parse(self, ioc_obj): if ioc_obj is None: return iocid = ioc_obj.iocid try: sd = ioc_obj.metadata.xpath()[0] except IndexError: sd = if iocid in self.iocs: msg = .format(iocid, self.ioc_name[iocid], sd) log.warning(msg) self.iocs[iocid] = ioc_obj self.ioc_name[iocid] = sd if self.parser_callback: self.parser_callback(ioc_obj) return True
parses an ioc to populate self.iocs and self.ioc_name :param ioc_obj: :return:
384,059
def default_cx(self): px_width = self.image.px_width horz_dpi = self.image.horz_dpi width_in_inches = px_width / horz_dpi return Inches(width_in_inches)
Native width of this image, calculated from its width in pixels and horizontal dots per inch (dpi).
384,060
def options(self): if self._options is None: self._options = Option.View(self) return self._options
Returns the options specified as argument to this command.
384,061
def addAttachment(self, filepath): proj_id = self.contextId fa = self.rtc_obj.getFiledAgainst(self.filedAgainst, projectarea_id=proj_id) fa_id = fa.url.split("/")[-1] headers = copy.deepcopy(self.rtc_obj.headers) if headers.__contains__("Content-Type"): headers.__delitem__("Content-Type") filename = os.path.basename(filepath) fileh = open(filepath, "rb") files = {"attach": (filename, fileh, "application/octet-stream")} params = {"projectId": proj_id, "multiple": "true", "category": fa_id} req_url = "".join([self.rtc_obj.url, "/service/com.ibm.team.workitem.service.", "internal.rest.IAttachmentRestService/"]) resp = self.post(req_url, verify=False, headers=headers, proxies=self.rtc_obj.proxies, params=params, files=files) raw_data = xmltodict.parse(resp.content) json_body = json.loads(raw_data["html"]["body"]["textarea"]) attachment_info = json_body["files"][0] return self._add_attachment_link(attachment_info)
Upload attachment to a workitem :param filepath: the attachment file path :return: the :class:`rtcclient.models.Attachment` object :rtype: rtcclient.models.Attachment
384,062
def onset_detect(y=None, sr=22050, onset_envelope=None, hop_length=512, backtrack=False, energy=None, units=, **kwargs): return onsets
Basic onset detector. Locate note onset events by picking peaks in an onset strength envelope. The `peak_pick` parameters were chosen by large-scale hyper-parameter optimization over the dataset provided by [1]_. .. [1] https://github.com/CPJKU/onset_db Parameters ---------- y : np.ndarray [shape=(n,)] audio time series sr : number > 0 [scalar] sampling rate of `y` onset_envelope : np.ndarray [shape=(m,)] (optional) pre-computed onset strength envelope hop_length : int > 0 [scalar] hop length (in samples) units : {'frames', 'samples', 'time'} The units to encode detected onset events in. By default, 'frames' are used. backtrack : bool If `True`, detected onset events are backtracked to the nearest preceding minimum of `energy`. This is primarily useful when using onsets as slice points for segmentation. energy : np.ndarray [shape=(m,)] (optional) An energy function to use for backtracking detected onset events. If none is provided, then `onset_envelope` is used. kwargs : additional keyword arguments Additional parameters for peak picking. See `librosa.util.peak_pick` for details. Returns ------- onsets : np.ndarray [shape=(n_onsets,)] estimated positions of detected onsets, in whichever units are specified. By default, frame indices. .. note:: If no onset strength could be detected, onset_detect returns an empty list. Raises ------ ParameterError if neither `y` nor `onsets` are provided or if `units` is not one of 'frames', 'samples', or 'time' See Also -------- onset_strength : compute onset strength per-frame onset_backtrack : backtracking onset events librosa.util.peak_pick : pick peaks from a time series Examples -------- Get onset times from a signal >>> y, sr = librosa.load(librosa.util.example_audio_file(), ... offset=30, duration=2.0) >>> onset_frames = librosa.onset.onset_detect(y=y, sr=sr) >>> librosa.frames_to_time(onset_frames, sr=sr) array([ 0.07 , 0.395, 0.511, 0.627, 0.766, 0.975, 1.207, 1.324, 1.44 , 1.788, 1.881]) Or use a pre-computed onset envelope >>> o_env = librosa.onset.onset_strength(y, sr=sr) >>> times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr) >>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr) >>> import matplotlib.pyplot as plt >>> D = np.abs(librosa.stft(y)) >>> plt.figure() >>> ax1 = plt.subplot(2, 1, 1) >>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max), ... x_axis='time', y_axis='log') >>> plt.title('Power spectrogram') >>> plt.subplot(2, 1, 2, sharex=ax1) >>> plt.plot(times, o_env, label='Onset strength') >>> plt.vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9, ... linestyle='--', label='Onsets') >>> plt.axis('tight') >>> plt.legend(frameon=True, framealpha=0.75)
384,063
def download(self, url, destination_path): self._pbar_url.update_total(1) future = self._executor.submit(self._sync_download, url, destination_path) return promise.Promise.resolve(future)
Download url to given path. Returns Promise -> sha256 of downloaded file. Args: url: address of resource to download. destination_path: `str`, path to directory where to download the resource. Returns: Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
384,064
def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt=, tgt_type=, verbose=False, show_timeout=False, show_jid=False): log.trace() minions = set(minions) if verbose: msg = .format(jid) print(msg) print( * len(msg) + ) elif show_jid: print(.format(jid)) if timeout is None: timeout = self.opts[] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} try: if self.returners[.format(self.opts[])](jid) == {}: log.warning() return ret except Exception as exc: raise SaltClientError( .format( self.opts[], exc)) while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) jid_tag = .format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and in raw: if in raw.get(, {}): minions.update(raw[][]) continue found.add(raw[]) ret[raw[]] = {: raw[]} ret[raw[]][] = raw.get(, False) if in raw: ret[raw[]][] = raw[] if len(found.intersection(minions)) >= len(minions): break continue if len(found.intersection(minions)) >= len(minions): break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get(, False) \ or tgt_type in (, , ): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { : , : } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret
Get the returns for the command line interface via the event system
384,065
def _handle_wrong_field(cls, field_name, field_type): if field_type == ATTR_TYPE_READ: field_type = elif field_type == ATTR_TYPE_WRITE: field_type = elif field_type == ATTR_TYPE_URL: field_type = else: raise AttributeError(.format( field_type )) msg = .format( cls.__name__, field_type, field_name ) _logger.error(msg) raise AttributeError(msg)
Raise an exception whenever an invalid attribute with the given name was attempted to be set to or retrieved from this model class. Assumes that the given field is invalid, without making any checks. Also adds an entry to the logs.
384,066
def _apply_section(self, section, hosts): if section[] is not None: for hostname in self._group_get_hostnames(section[]): hosts[hostname][].add(section[]) func_map = { "hosts": self._apply_section_hosts, "children": self._apply_section_children, "vars": self._apply_section_vars, } func = func_map[section[]] func(section, hosts)
Recursively find all the hosts that belong in or under a section and add the section's group name and variables to every host.
384,067
def _unsorted_set(df, label, **kwargs): out = "*NSET, NSET={0}, UNSORTED\n".format(label) labels = df.index.values return out + argiope.utils.list_to_string(labels, **kwargs)
Returns a set as inp string with unsorted option.
384,068
def _get_delta(self, now, then): if now.__class__ is not then.__class__: now = datetime.date(now.year, now.month, now.day) then = datetime.date(then.year, then.month, then.day) if now < then: raise ValueError("Cannot determine moderation rules because date field is set to a value in the future") return now - then
Internal helper which will return a ``datetime.timedelta`` representing the time between ``now`` and ``then``. Assumes ``now`` is a ``datetime.date`` or ``datetime.datetime`` later than ``then``. If ``now`` and ``then`` are not of the same type due to one of them being a ``datetime.date`` and the other being a ``datetime.datetime``, both will be coerced to ``datetime.date`` before calculating the delta.
384,069
def sff(args): p = OptionParser(sff.__doc__) p.add_option("--prefix", dest="prefix", default=None, help="Output frg filename prefix") p.add_option("--nodedup", default=False, action="store_true", help="Do not remove duplicates [default: %default]") p.set_size() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(p.print_help()) sffiles = args plates = [x.split(".")[0].split("_")[-1] for x in sffiles] mated = (opts.size != 0) mean, sv = get_mean_sv(opts.size) if len(plates) > 1: plate = plates[0][:-1] + else: plate = "_".join(plates) if mated: libname = "Titan{0}Kb-".format(opts.size / 1000) + plate else: libname = "TitanFrags-" + plate if opts.prefix: libname = opts.prefix cmd = "sffToCA" cmd += " -libraryname {0} -output {0} ".format(libname) cmd += " -clear 454 -trim chop " if mated: cmd += " -linker titanium -insertsize {0} {1} ".format(mean, sv) if opts.nodedup: cmd += " -nodedup " cmd += " ".join(sffiles) sh(cmd)
%prog sff sffiles Convert reads formatted as 454 SFF file, and convert to CA frg file. Turn --nodedup on if another deduplication mechanism is used (e.g. CD-HIT-454). See assembly.sff.deduplicate().
384,070
def read_meminfo(): data = {} with open("/proc/meminfo", "rb") as meminfo_file: for row in meminfo_file: fields = row.split() data[fields[0].decode("ascii")[:-1]] = int(fields[1]) * 1024 return data
Returns system memory usage information. :returns: The system memory usage. :rtype: dict
384,071
def register(self, event_type: Union[Type, _ellipsis], callback: Callable[[], Any]): if not isinstance(event_type, type) and event_type is not ...: raise TypeError(f"{type(self)}.register requires event_type to be a type.") if not callable(callback): raise TypeError(f"{type(self)}.register requires callback to be callable.") self.event_extensions[event_type].append(callback)
Register a callback to be applied to an event at time of publishing. Primarily to be used by subsystems. The callback will receive the event. Your code should modify the event in place. It does not need to return it. :param event_type: The class of an event. :param callback: A callable, must accept an event, and return no value. :return: None
384,072
def print_invalid_chars(invalid_chars, vargs): if len(invalid_chars) > 0: if vargs["print_invalid"]: print(u"".join(invalid_chars)) if vargs["unicode"]: for u_char in sorted(set(invalid_chars)): print(u"\t%s\t%s" % (u_char, hex(ord(u_char)), unicodedata.name(u_char, "UNKNOWN")))
Print Unicode characterss that are not IPA valid, if requested by the user. :param list invalid_chars: a list (possibly empty) of invalid Unicode characters :param dict vargs: the command line parameters
384,073
def drain(iterable): if getattr(iterable, "popleft", False): def next_item(coll): return coll.popleft() elif getattr(iterable, "popitem", False): def next_item(coll): return coll.popitem() else: def next_item(coll): return coll.pop() while True: try: yield next_item(iterable) except (IndexError, KeyError): raise StopIteration
Helper method that empties an iterable as it is iterated over. Works for: * ``dict`` * ``collections.deque`` * ``list`` * ``set``
384,074
def submit_msql_object_query(object_query, client=None): client = client or get_new_client() if not client.session_id: client.request_session() result = client.execute_object_query(object_query) execute_msql_result = result["body"]["ExecuteMSQLResult"] membersuite_object_list = [] if execute_msql_result["Success"]: result_value = execute_msql_result["ResultValue"] if result_value["ObjectSearchResult"]["Objects"]: membersuite_object_list = [] for obj in (result_value["ObjectSearchResult"]["Objects"] ["MemberSuiteObject"]): membersuite_object = membersuite_object_factory(obj) membersuite_object_list.append(membersuite_object) elif result_value["SingleObject"]["ClassType"]: membersuite_object = membersuite_object_factory( execute_msql_result["ResultValue"]["SingleObject"]) membersuite_object_list.append(membersuite_object) elif (result_value["ObjectSearchResult"]["Objects"] is None and result_value["SingleObject"]["ClassType"] is None): raise NoResultsError(result=execute_msql_result) return membersuite_object_list else: raise ExecuteMSQLError(result=execute_msql_result)
Submit `object_query` to MemberSuite, returning .models.MemberSuiteObjects. So this is a converter from MSQL to .models.MemberSuiteObjects. Returns query results as a list of MemberSuiteObjects.
384,075
def push_intent(self, intent): if intent.id: print(.format(intent.name)) self.update(intent) else: print(.format(intent.name)) intent = self.register(intent) return intent
Registers or updates an intent and returns the intent_json with an ID
384,076
def bookmark(ctx): user, project_name, _build = get_build_or_local(ctx.obj.get(), ctx.obj.get()) try: PolyaxonClient().build_job.bookmark(user, project_name, _build) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(_build)) Printer.print_error(.format(e)) sys.exit(1) Printer.print_success("Build job bookmarked.")
Bookmark build job. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon build bookmark ``` \b ```bash $ polyaxon build -b 2 bookmark ```
384,077
def _parse_interfaces(interface_files=None): if interface_files is None: interface_files = [] if os.path.exists(_DEB_NETWORK_DIR): interface_files += [.format(_DEB_NETWORK_DIR, dir) for dir in os.listdir(_DEB_NETWORK_DIR)] if os.path.isfile(_DEB_NETWORK_FILE): interface_files.insert(0, _DEB_NETWORK_FILE) adapters = salt.utils.odict.OrderedDict() method = -1 for interface_file in interface_files: with salt.utils.files.fopen(interface_file) as interfaces: return adapters
Parse /etc/network/interfaces and return current configured interfaces
384,078
def blast_pdb(seq, outfile=, outdir=, evalue=0.0001, seq_ident_cutoff=0.0, link=False, force_rerun=False): if len(seq) < 12: raise ValueError() if link: page = .format(seq, evalue) print(page) parser = etree.XMLParser(ns_clean=True) outfile = op.join(outdir, outfile) if ssbio.utils.force_rerun(force_rerun, outfile): page = .format( seq, evalue) req = requests.get(page) if req.status_code == 200: response = req.text if outfile: with open(outfile, ) as f: f.write(response) tree = etree.ElementTree(etree.fromstring(response, parser)) log.debug() else: log.error() return [] else: tree = etree.parse(outfile, parser) log.debug(.format(outfile)) len_orig = float(len(seq)) root = tree.getroot() hit_list = [] for hit in root.findall(): info = {} hitdef = hit.find() if hitdef is not None: info[] = hitdef.text.split()[0].split()[0].lower() info[] = hitdef.text.split()[0].split()[2].split() hsp = hit.findall()[0] hspi = hsp.find() if hspi is not None: info[] = int(hspi.text) info[] = int(hspi.text)/len_orig if int(hspi.text)/len_orig < seq_ident_cutoff: log.debug(.format(hitdef.text.split()[0].split()[0])) continue hspp = hsp.find() if hspp is not None: info[] = int(hspp.text) info[] = int(hspp.text) / len_orig hspg = hsp.find() if hspg is not None: info[] = int(hspg.text) info[] = int(hspg.text) / len_orig hspe = hsp.find() if hspe is not None: info[] = float(hspe.text) hsps = hsp.find() if hsps is not None: info[] = float(hsps.text) hit_list.append(info) log.debug("{}: Number of BLAST hits".format(len(hit_list))) return hit_list
Returns a list of BLAST hits of a sequence to available structures in the PDB. Args: seq (str): Your sequence, in string format outfile (str): Name of output file outdir (str, optional): Path to output directory. Default is the current directory. evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default). seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form) link (bool, optional): Set to True if a link to the HTML results should be displayed force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False Returns: list: Rank ordered list of BLAST hits in dictionaries.
384,079
def where_before_entry(query, ref): return orm.select( e for e in query if e.local_date < ref.local_date or (e.local_date == ref.local_date and e.id < ref.id) )
Generate a where clause for prior entries ref -- The entry of reference
384,080
def has_friends(self, flt=FriendFilter.ALL): return self._iface.get_has_friend(self.user_id, flt)
Indicated whether the user has friends, who meet the given criteria (filter). :param int flt: Filter value from FriendFilter. Filters can be combined with `|`. :rtype: bool
384,081
def run_step(context): logger.debug("started") CmdStep(name=__name__, context=context).run_step(is_shell=True) logger.debug("done")
Run shell command without shell interpolation. Context is a dictionary or dictionary-like. Context must contain the following keys: cmd: <<cmd string>> (command + args to execute.) OR, as a dict cmd: run: str. mandatory. <<cmd string>> command + args to execute. save: bool. defaults False. save output to cmdOut. Will execute command string in the shell as a sub-process. The shell defaults to /bin/sh. The context['cmd'] string must be formatted exactly as it would be when typed at the shell prompt. This includes, for example, quoting or backslash escaping filenames with spaces in them. There is an exception to this: Escape curly braces: if you want a literal curly brace, double it like {{ or }}. If save is True, will save the output to context as follows: cmdOut: returncode: 0 stdout: 'stdout str here. None if empty.' stderr: 'stderr str here. None if empty.' cmdOut.returncode is the exit status of the called process. Typically 0 means OK. A negative value -N indicates that the child was terminated by signal N (POSIX only). context['cmd'] will interpolate anything in curly braces for values found in context. So if your context looks like this: key1: value1 key2: value2 cmd: mything --arg1 {key1} The cmd passed to the shell will be "mything --arg value1"
384,082
def put(self, deviceId, measurementId): record = self.measurements.get(deviceId) if record is not None: measurement = record.get(measurementId) if measurement is not None: if len([x.name for x in measurement.statuses if x.name is or x.name is ]) > 0: logger.info( + x.name) measurement = None if measurement is None: logger.info( + measurementId) measurement = ScheduledMeasurement(measurementId, self.recordingDevices.get(deviceId)) body = request.get_json() duration_ = body[] def _cleanup(): logger.info( + measurementId + + deviceId) record.pop(measurementId) measurement.schedule(duration_, at=body.get(), delay=body.get(), callback=_cleanup) record[measurementId] = measurement return measurement, 200 else: return measurement, 400 else: return + deviceId, 400
Schedules a new measurement at the specified time. :param deviceId: the device to measure. :param measurementId: the name of the measurement. :return: 200 if it was scheduled, 400 if the device is busy, 500 if the device is bad.
384,083
def atlasdb_num_peers( con=None, path=None ): with AtlasDBOpen(con=con, path=path) as dbcon: sql = "SELECT MAX(peer_index) FROM peers;" args = () cur = dbcon.cursor() res = atlasdb_query_execute( cur, sql, args ) ret = [] for row in res: tmp = {} tmp.update(row) ret.append(tmp) assert len(ret) == 1 return ret[0][]
How many peers are there in the db?
384,084
def object(self, infotype, key): with self.pipe as pipe: return pipe.object(infotype, self.redis_key(key))
get the key's info stats :param name: str the name of the redis key :param subcommand: REFCOUNT | ENCODING | IDLETIME :return: Future()
384,085
def compute_residuals(self): r = self.rsdl() adapt_tol = self.opt[] if self.opt[, ]: adapt_tol = self.tau0 / (1. + self.k) return r, adapt_tol
Compute residuals and stopping thresholds.
384,086
def lane_stats_table(self): headers = OrderedDict() headers[] = { : .format(config.base_count_prefix), : .format(config.base_count_desc), : , : } headers[] = { : .format(config.read_count_prefix), : .format(config.read_count_desc), : , : } headers[] = { : , : , : , : 100, : 0, : } headers[] = { : , : , : 0, : } headers[] = { : , : , : 100, : 0, : , : } table_config = { : , : , : , : , : True } return table.plot(self.bcl2fastq_bylane, headers, table_config)
Return a table with overview stats for each bcl2fastq lane for a single flow cell
384,087
def airwires(board, showgui=0): board = Path(board).expand().abspath() file_out = tempfile.NamedTemporaryFile(suffix=, delete=0) file_out.close() ulp = ulp_templ.replace(, file_out.name) file_ulp = tempfile.NamedTemporaryFile(suffix=, delete=0) file_ulp.write(ulp.encode()) file_ulp.close() commands = [ + file_ulp.name, , ] command_eagle(board, commands=commands, showgui=showgui) n = int(Path(file_out.name).text()) Path(file_out.name).remove() Path(file_ulp.name).remove() return n
search for airwires in eagle board
384,088
def ARC4_encrypt(key, data, skip=0): algorithm = algorithms.ARC4(key) cipher = Cipher(algorithm, mode=None, backend=default_backend()) encryptor = cipher.encryptor() if skip: encryptor.update(b"\x00" * skip) return encryptor.update(data)
Encrypt data @data with key @key, skipping @skip first bytes of the keystream
384,089
def send_element(self, element): with self.lock: if self._eof or self._socket is None or not self._serializer: logger.debug("Dropping element: {0}".format( element_to_unicode(element))) return data = self._serializer.emit_stanza(element) self._write(data.encode("utf-8"))
Send an element via the transport.
384,090
def cowsay_output(message): command = % message ret = subprocess.Popen( command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, error = ret.communicate() return output, error
Invoke a shell command to print cowsay output. Primary replacement for os.system calls.
384,091
def update_from_dict(self, data_dict): for k, v in data_dict.items(): setattr(self, k, v) if "item_queue_id" in data_dict: self.id = data_dict["item_queue_id"]
:param data_dict: Dictionary to be mapped into object attributes :type data_dict: dict :return:
384,092
def modify(self, max_time_out_of_sync=None, name=None, hourly_snap_replication_policy=None, daily_snap_replication_policy=None, src_spa_interface=None, src_spb_interface=None, dst_spa_interface=None, dst_spb_interface=None): req_body = self._cli.make_body( maxTimeOutOfSync=max_time_out_of_sync, name=name, hourlySnapReplicationPolicy=hourly_snap_replication_policy, dailySnapReplicationPolicy=daily_snap_replication_policy, srcSPAInterface=src_spa_interface, srcSPBInterface=src_spb_interface, dstSPAInterface=dst_spa_interface, dstSPBInterface=dst_spb_interface) resp = self.action(, **req_body) resp.raise_if_err() return resp
Modifies properties of a replication session. :param max_time_out_of_sync: same as the one in `create` method. :param name: same as the one in `create` method. :param hourly_snap_replication_policy: same as the one in `create` method. :param daily_snap_replication_policy: same as the one in `create` method. :param src_spa_interface: same as the one in `create` method. :param src_spb_interface: same as the one in `create` method. :param dst_spa_interface: same as the one in `create` method. :param dst_spb_interface: same as the one in `create` method.
384,093
def path_expand(text): result = os.path.expandvars(os.path.expanduser(text)) if result.startswith("."): result = result.replace(".", os.getcwd(), 1) return result
returns a string with expanded variable. :param text: the path to be expanded, which can include ~ and environment $ variables :param text: string
384,094
def predict(self, nSteps): pristineTPDynamicState = self._getTPDynamicState() assert (nSteps>0) multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols), dtype="float32") step = 0 while True: multiStepColumnPredictions[step, :] = self.topDownCompute() if step == nSteps-1: break step += 1 self.infActiveState[][:, :] = self.infActiveState[][:, :] self.infPredictedState[][:, :] = self.infPredictedState[][:, :] self.cellConfidence[][:, :] = self.cellConfidence[][:, :] self.infActiveState[][:, :] = self.infPredictedState[][:, :] self.infPredictedState[].fill(0) self.cellConfidence[].fill(0.0) self._inferPhase2() self._setTPDynamicState(pristineTPDynamicState) return multiStepColumnPredictions
This function gives the future predictions for <nSteps> timesteps starting from the current TM state. The TM is returned to its original state at the end before returning. 1. We save the TM state. 2. Loop for nSteps a. Turn-on with lateral support from the current active cells b. Set the predicted cells as the next step's active cells. This step in learn and infer methods use input here to correct the predictions. We don't use any input here. 3. Revert back the TM state to the time before prediction :param nSteps: (int) The number of future time steps to be predicted :returns: all the future predictions - a numpy array of type "float32" and shape (nSteps, numberOfCols). The ith row gives the tm prediction for each column at a future timestep (t+i+1).
384,095
def et_node_to_string(et_node, default=): return str(et_node.text).strip() if et_node is not None and et_node.text else default
Simple method to get stripped text from node or ``default`` string if None is given. :param et_node: Element or None :param default: string returned if None is given, default ``''`` :type et_node: xml.etree.ElementTree.Element, None :type default: str :return: text from node or default :rtype: str
384,096
def import_end_event_to_graph(diagram_graph, process_id, process_attributes, element): end_event_definitions = {, , , , , } BpmnDiagramGraphImport.import_flow_node_to_graph(diagram_graph, process_id, process_attributes, element) BpmnDiagramGraphImport.import_event_definition_elements(diagram_graph, element, end_event_definitions)
Adds to graph the new element that represents BPMN end event. End event inherits sequence of eventDefinitionRef from Event type. Separate methods for each event type are required since each of them has different variants (Message, Error, Signal etc.). :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML 'endEvent' element.
384,097
def check(self): status = _checkContainerStatus(self.sparkContainerID, self.hdfsContainerID, sparkNoun=, hdfsNoun=) return status
Checks to see if Spark worker and HDFS datanode are still running.
384,098
def create_fc_template(self, out_path, out_name): fields = self.fields objectIdField = self.objectIdField geomType = self.geometryType wkid = self.parentLayer.spatialReference[] return create_feature_class(out_path, out_name, geomType, wkid, fields, objectIdField)
creates a featureclass template on local disk
384,099
def suggest(alias, max=3, cutoff=0.5): aliases = matchers.keys() similar = get_close_matches(alias, aliases, n=max, cutoff=cutoff) return similar
Suggest a list of aliases which are similar enough