Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
378,800
def _expand_tasks(self, scopes): expanded_scopes = set(scopes) for scope, info in self._scope_to_info.items(): if info.category == ScopeInfo.TASK: outer = enclosing_scope(scope) while outer != GLOBAL_SCOPE: if outer in expanded_scopes: expanded_scopes.add(scope) break outer = enclosing_scope(outer) return sorted(expanded_scopes)
Add all tasks in any requested goals. Returns the requested scopes, plus the added tasks, sorted by scope name.
378,801
def bulkImport_json(self, filename, onDuplicate="error", formatType="auto", **params) : url = "%s/import" % self.database.URL params["onDuplicate"] = onDuplicate params["collection"] = self.name params["type"] = formatType with open(filename) as f: data = f.read() r = self.connection.session.post(URL, params = params, data = data) try : errorMessage = "At least: %d errors. The first one is: \n\n more in <this_exception>.data" % (len(data), data[0]["errorMessage"]) except KeyError: raise UpdateError(data[], data)
bulk import from a file repecting arango's key/value format
378,802
def printArchive(fileName): archive = CombineArchive() if archive.initializeFromArchive(fileName) is None: print("Invalid Combine Archive") return None print(*80) print(, fileName) print( * 80) printMetaDataFor(archive, ".") print("Num Entries: {0}".format(archive.getNumEntries())) for i in range(archive.getNumEntries()): entry = archive.getEntry(i) print(" {0}: location: {1} format: {2}".format(i, entry.getLocation(), entry.getFormat())) printMetaDataFor(archive, entry.getLocation()) for j in range(entry.getNumCrossRefs()): print(" {0}: crossRef location {1}".format(j, entry.getCrossRef(j).getLocation())) archive.cleanUp()
Prints content of combine archive :param fileName: path of archive :return: None
378,803
def gameValue(self): allowed = type(self).ALLOWED_TYPES try: if isinstance(allowed, dict): return allowed.get(self.type.name) except: pass return None
identify the correpsonding internal SC2 game value for self.type's value
378,804
def sanitize_git_path(self, uri, ref=None): if uri.endswith(): dir_name = uri[:-4] else: dir_name = uri dir_name = self.sanitize_uri_path(dir_name) if ref is not None: dir_name += "-%s" % ref return dir_name
Take a git URI and ref and converts it to a directory safe path. Args: uri (string): git URI (e.g. [email protected]:foo/bar.git) ref (string): optional git ref to be appended to the path Returns: str: Directory name for the supplied uri
378,805
def cublasDtpmv(handle, uplo, trans, diag, n, AP, x, incx): status = _libcublas.cublasDtpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Matrix-vector product for real triangular-packed matrix.
378,806
def _trim_zeros_complex(str_complexes, na_rep=): def separate_and_trim(str_complex, na_rep): num_arr = str_complex.split() return (_trim_zeros_float([num_arr[0]], na_rep) + [] + _trim_zeros_float([num_arr[1][:-1]], na_rep) + []) return [.join(separate_and_trim(x, na_rep)) for x in str_complexes]
Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those.
378,807
def get_list_attribute(self, attribute): list_attribute = self.api.getListAttribute(self.obj_ref(), attribute) if list_attribute == []: raise TgnError(self.ref + + attribute) return list_attribute
:return: attribute value as Python list.
378,808
def add(self, *args, **kwargs): check_uniqueness = kwargs.pop(, False) args = self.prepare_args(args) for index in self._indexes: index.add(*args, check_uniqueness=check_uniqueness and index.handle_uniqueness, **kwargs) if check_uniqueness and index.handle_uniqueness: check_uniqueness = False
Add the instance tied to the field to all the indexes For the parameters, seen BaseIndex.add
378,809
def check_dimensions(self, dataset): required_ctx = TestCtx(BaseCheck.HIGH, ) t = util.get_time_variable(dataset) if not t: required_ctx.assert_true(False, ) return required_ctx.to_result() t_dims = dataset.variables[t].dimensions o = None or (t_dims and t_dims[0]) message = for variable in util.get_geophysical_variables(dataset): is_valid = util.is_point(dataset, variable) required_ctx.assert_true( is_valid, message.format(variable, o, o) ) return required_ctx.to_result()
Checks that the feature types of this dataset are consitent with a point dataset
378,810
def _set_qsfpp(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=qsfpp.qsfpp, is_container=, presence=False, yang_name="qsfpp", rest_name="qsfpp", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__qsfpp = t if hasattr(self, ): self._set()
Setter method for qsfpp, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfpp (container) If this variable is read-only (config: false) in the source YANG file, then _set_qsfpp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_qsfpp() directly.
378,811
def command(state, args): args = parser.parse_args(args[1:]) if not args.query: print() return search_query = _compile_re_query(args.query) results = state.titles.search(search_query) results = [(anime.aid, anime.main_title) for anime in results] state.results[].set(results) state.results[].print()
Search AniDB.
378,812
def ReferenceResults(self, field, allow_edit=False): instance = getattr(self, "instance", field.aq_parent) table = api.get_view("table_reference_results", context=instance, request=self.REQUEST) table.update() table.before_render() return table.ajax_contents_table()
Render Reference Results Table
378,813
def iterfollow(self): if self.links is None: return if self.links.get("next"): yield self.follow() else: raise StopIteration
Generator for self.follow()
378,814
def get_uservar(self, user, name): if name == : return self.last_match(user) else: return self._session.get(user, name)
Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable.
378,815
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): params = { : path } url = self._url(, container) res = self._get(url, params=params, stream=True) self._raise_for_status(res) encoded_stat = res.headers.get() return ( self._stream_raw_result(res, chunk_size, False), utils.decode_json_header(encoded_stat) if encoded_stat else None )
Retrieve a file or folder from a container in the form of a tar archive. Args: container (str): The container where the file is located path (str): Path to the file or folder to retrieve chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB Returns: (tuple): First element is a raw tar data stream. Second element is a dict containing ``stat`` information on the specified ``path``. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> c = docker.APIClient() >>> f = open('./sh_bin.tar', 'wb') >>> bits, stat = c.get_archive(container, '/bin/sh') >>> print(stat) {'name': 'sh', 'size': 1075464, 'mode': 493, 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} >>> for chunk in bits: ... f.write(chunk) >>> f.close()
378,816
def filter(self, table, vg_snapshots, filter_string): query = filter_string.lower() return [vg_snapshot for vg_snapshot in vg_snapshots if query in vg_snapshot.name.lower()]
Naive case-insensitive search.
378,817
def is_all_field_none(self): if self._id_ is not None: return False if self._time_responded is not None: return False if self._time_expiry is not None: return False if self._monetary_account_id is not None: return False if self._amount_inquired is not None: return False if self._amount_responded is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._description is not None: return False if self._attachment is not None: return False if self._status is not None: return False if self._minimum_age is not None: return False if self._require_address is not None: return False if self._address_shipping is not None: return False if self._address_billing is not None: return False if self._geolocation is not None: return False if self._redirect_url is not None: return False if self._type_ is not None: return False if self._sub_type is not None: return False if self._allow_chat is not None: return False if self._eligible_whitelist_id is not None: return False return True
:rtype: bool
378,818
def extract_version(filepath=, name=): context = {} for line in open(filepath): if name in line: exec(line, context) break else: raise RuntimeError(.format(name, filepath)) return context[name]
Parse __version__ out of given Python file. Given jeni.py has dependencies, `from jeni import __version__` will fail.
378,819
def extract(binary): if len(binary) <= 8: raise Exception("Binary pyc must be greater than 8 bytes (got %i)" % len(binary)) magic = binary[:4] MAGIC = get_magic() if magic != MAGIC: raise Exception("Python version mismatch (%r != %r) Is this a pyc file?" % (magic, MAGIC)) modtime = time.asctime(time.localtime(struct.unpack(, binary[4:8])[0])) code = marshal.loads(binary[8:]) return modtime, code
Extract a code object from a binary pyc file. :param binary: a sequence of bytes from a pyc file.
378,820
def array_split( ary, indices_or_sections=None, axis=None, tile_shape=None, max_tile_bytes=None, max_tile_shape=None, sub_tile_shape=None, halo=None ): "To be replaced." return [ ary[slyce] for slyce in shape_split( array_shape=ary.shape, indices_or_sections=indices_or_sections, axis=axis, array_start=None, array_itemsize=ary.itemsize, tile_shape=tile_shape, max_tile_bytes=max_tile_bytes, max_tile_shape=max_tile_shape, sub_tile_shape=sub_tile_shape, halo=halo, tile_bounds_policy=ARRAY_BOUNDS ).flatten() ]
To be replaced.
378,821
def add(name, gid=None, **kwargs): *
Add the specified group CLI Example: .. code-block:: bash salt '*' group.add foo 3456
378,822
def firmware_autoupgrade_params_username(self, **kwargs): config = ET.Element("config") firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware") autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params") username = ET.SubElement(autoupgrade_params, "username") username.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
378,823
def tag(self, *tags): if any([not isinstance(tag, collections.Hashable) for tag in tags]): raise TypeError() if not all(isinstance(tag, collections.Hashable) for tag in tags): raise TypeError() self.tags.update(tags) return self
Tags the job with one or more unique indentifiers. Tags must be hashable. Duplicate tags are discarded. :param tags: A unique list of ``Hashable`` tags. :return: The invoked job instance
378,824
def state_likelihood(self, beta, alpha): _, _, _, Q = self._ss_matrices(beta) residuals = alpha[0][1:]-alpha[0][:-1] return np.sum(ss.norm.logpdf(residuals, loc=0, scale=np.power(Q.ravel(),0.5)))
Returns likelihood of the states given the variance latent variables Parameters ---------- beta : np.array Contains untransformed starting values for latent variables alpha : np.array State matrix Returns ---------- State likelihood
378,825
def all_subclasses(cls): for subclass in cls.__subclasses__(): yield subclass for subc in all_subclasses(subclass): yield subc
Recursively generate of all the subclasses of class cls.
378,826
def get_jwt_decrypt_keys(self, jwt, **kwargs): try: _key_type = jwe_alg2keytype(jwt.headers[]) except KeyError: _key_type = try: _kid = jwt.headers[] except KeyError: logger.info() _kid = keys = self.get(key_use=, owner=, key_type=_key_type) try: _aud = kwargs[] except KeyError: _aud = if _aud: try: allow_missing_kid = kwargs[] except KeyError: allow_missing_kid = False try: nki = kwargs[] except KeyError: nki = {} keys = self._add_key(keys, _aud, , _key_type, _kid, nki, allow_missing_kid) keys = [k for k in keys if k.appropriate_for()] return keys
Get decryption keys from this keyjar based on information carried in a JWE. These keys should be usable to decrypt an encrypted JWT. :param jwt: A cryptojwt.jwt.JWT instance :param kwargs: Other key word arguments :return: list of usable keys
378,827
def getSiblings(self, retracted=False): request = self.getRequest() if not request: return [] siblings = [] retracted_states = [STATE_RETRACTED, STATE_REJECTED] for sibling in request.getAnalyses(full_objects=True): if api.get_uid(sibling) == self.UID(): continue if not retracted: if api.get_workflow_status_of(sibling) in retracted_states: continue siblings.append(sibling) return siblings
Returns the list of analyses of the Analysis Request to which this analysis belongs to, but with the current analysis excluded. :param retracted: If false, retracted/rejected siblings are dismissed :type retracted: bool :return: list of siblings for this analysis :rtype: list of IAnalysis
378,828
def conditional_http_tween_factory(handler, registry): settings = registry.settings if hasattr(registry, ) else {} not_cacheble_list = [] if in settings: not_cacheble_list = settings.get().split() def conditional_http_tween(request): response = handler(request) if request.path not in not_cacheble_list: if response.last_modified is not None: response.conditional_response = True if response.etag is not None: response.conditional_response = True elif (isinstance(response.app_iter, Sequence) and len(response.app_iter) == 1) and response.body is not None: response.conditional_response = True response.md5_etag() return response return conditional_http_tween
Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate.
378,829
def isempty(result): if isinstance(result, list): for element in result: if isinstance(element, list): if not isempty(element): return False else: if element is not None: return False else: if result is not None: return False return True
Finds out if a scraping result should be considered empty.
378,830
def make(self, selection): cached = self._selector_cache.get(selection) if cached: return cached namespaces = self.EXSLT_NAMESPACES self._add_parsley_ns(namespaces) try: m = self.REGEX_ENDING_ATTRIBUTE.match(selection) if m: cssxpath = css_to_xpath(m.group("expr")) attribute = m.group("attr").replace(, ) cssxpath = "%s/%s" % (cssxpath, attribute) else: cssxpath = css_to_xpath(selection) selector = lxml.etree.XPath( cssxpath, namespaces = self.namespaces, extensions = self.extensions, smart_strings=(self.SMART_STRINGS or self._test_smart_strings_needed(selection)), ) except tuple(self.CSSSELECT_SYNTAXERROR_EXCEPTIONS) as syntax_error: if self.DEBUG: print(repr(syntax_error), selection) print("Try interpreting as XPath selector") try: selector = lxml.etree.XPath(selection, namespaces = self.namespaces, extensions = self.extensions, smart_strings=(self.SMART_STRINGS or self._test_smart_strings_needed(selection)), ) except lxml.etree.XPathSyntaxError as syntax_error: syntax_error.msg += ": %s" % selection raise syntax_error except Exception as e: if self.DEBUG: print(repr(e), selection) raise except lxml.etree.XPathSyntaxError as syntax_error: syntax_error.msg += ": %s" % selection raise syntax_error except Exception as e: if self.DEBUG: print(repr(e), selection) raise self._selector_cache[selection] = Selector(selector) return self._selector_cache[selection]
Scopes and selectors are tested in this order: * is this a CSS selector with an appended @something attribute? * is this a regular CSS selector? * is this an XPath expression? XPath expression can also use EXSLT functions (as long as they are understood by libxslt)
378,831
def log_debug(msg, logger="TaskLogger"): tasklogger = get_tasklogger(logger) tasklogger.debug(msg) return tasklogger
Log a DEBUG message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger
378,832
def read_nonblocking(self, size=1, timeout=-1): size if self.closed: raise ValueError() if timeout == -1: timeout = self.timeout if not self.isalive(): if self.use_poll: r = poll_ignore_interrupts([self.child_fd], timeout) else: r, w, e = select_ignore_interrupts([self.child_fd], [], [], 0) if not r: self.flag_eof = True raise EOF() elif self.__irix_hack: if self.use_poll: r = poll_ignore_interrupts([self.child_fd], timeout) else: r, w, e = select_ignore_interrupts([self.child_fd], [], [], 2) if not r and not self.isalive(): self.flag_eof = True raise EOF() if self.use_poll: r = poll_ignore_interrupts([self.child_fd], timeout) else: r, w, e = select_ignore_interrupts( [self.child_fd], [], [], timeout ) if not r: if not self.isalive(): self.flag_eof = True raise EOF() else: raise TIMEOUT() if self.child_fd in r: return super(spawn, self).read_nonblocking(size) raise ExceptionPexpect()
This reads at most size characters from the child application. It includes a timeout. If the read does not complete within the timeout period then a TIMEOUT exception is raised. If the end of file is read then an EOF exception will be raised. If a logfile is specified, a copy is written to that log. If timeout is None then the read may block indefinitely. If timeout is -1 then the self.timeout value is used. If timeout is 0 then the child is polled and if there is no data immediately ready then this will raise a TIMEOUT exception. The timeout refers only to the amount of time to read at least one character. This is not affected by the 'size' parameter, so if you call read_nonblocking(size=100, timeout=30) and only one character is available right away then one character will be returned immediately. It will not wait for 30 seconds for another 99 characters to come in. This is a wrapper around os.read(). It uses select.select() to implement the timeout.
378,833
def read(self, size=None): if not self._is_open: raise IOError() return self._vshadow_store.read(size)
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
378,834
def assign_account_entitlement_for_user(self, body, user_id, dont_notify_user=None, origin=None): route_values = {} if user_id is not None: route_values[] = self._serialize.url(, user_id, ) query_parameters = {} if dont_notify_user is not None: query_parameters[] = self._serialize.query(, dont_notify_user, ) if origin is not None: query_parameters[] = self._serialize.query(, origin, ) content = self._serialize.body(body, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize(, response)
AssignAccountEntitlementForUser. [Preview API] Assign an explicit account entitlement :param :class:`<AccountEntitlementUpdateModel> <azure.devops.v5_0.licensing.models.AccountEntitlementUpdateModel>` body: The update model for the entitlement :param str user_id: The id of the user :param bool dont_notify_user: :param str origin: :rtype: :class:`<AccountEntitlement> <azure.devops.v5_0.licensing.models.AccountEntitlement>`
378,835
def parse(self, generator): gen = iter(generator) for line in gen: block = {} for rule in self.rules: if rule[0](line): block = rule[1](line, gen) break yield block
Parse an iterable source of strings into a generator
378,836
def from_size(value): match_size = re_zfs_size.match(str(value)) if match_size: v_unit = match_size.group(2).upper()[0] v_size = float(match_size.group(1)) v_multiplier = math.pow(1024, zfs_size.index(v_unit) + 1) value = v_size * v_multiplier if int(value) == value: value = int(value) elif value is not None: value = str(value) return from_numeric(value)
Convert zfs size (human readble) to python int (bytes)
378,837
def _probs(density_matrix: np.ndarray, indices: List[int], num_qubits: int) -> List[float]: all_probs = np.diagonal( np.reshape(density_matrix, (2 ** num_qubits, 2 ** num_qubits))) tensor = np.reshape(all_probs, [2] * num_qubits) probs = [ np.sum(np.abs(tensor[linalg.slice_for_qubits_equal_to(indices, b)])) for b in range(2 ** len(indices))] probs /= np.sum(probs) return probs
Returns the probabilities for a measurement on the given indices.
378,838
def run(self): self.timer = t.Thread(target=self.report_spans) self.timer.daemon = True self.timer.name = "Instana Span Reporting" self.timer.start()
Span a background thread to periodically report queued spans
378,839
def parentItem(self, value): self._parentItem = value self._recursiveSetNodePath(self._constructNodePath())
The parent item
378,840
def read_stats(self): self.statistics = TgnObjectsDict() for port in self.session.ports.values(): for tpld in port.tplds.values(): self.statistics[tpld] = tpld.read_stats() return self.statistics
Read current statistics from chassis. :return: dictionary {tpld full index {group name {stat name: stat value}}}
378,841
def fetch_batch(self, formatter=TableFormat): clone = self.copy() clone.update_query(type=) if not clone.fetch_size or clone.fetch_size <= 0: clone.request[].update(quantity=200) fmt = formatter(clone) for result in clone.fetch_raw(): yield fmt.formatted(result)
Fetch a batch of logs and return using the specified formatter. Formatter is class type defined in :py:mod:`smc_monitoring.models.formatters`. This fetch type will be a single shot fetch (this method forces ``fetch_type='stored'``). If ``fetch_size`` is not already set on the query, the default fetch_size will be 200. :param formatter: Formatter type for data representation. Any type in :py:mod:`smc_monitoring.models.formatters`. :return: generator returning data in specified format
378,842
def _handle_request(self, request): response = webob.Response(request=request) path = request.path_info parsed = self._urlmap(path) if parsed: path_params, resource = parsed else: path_params, resource = {}, self.NOT_FOUND_RESOURCE instance = resource(request=request, response=response, path_params=path_params, application=self) response = instance() if request.method == : response.body = return response
Finds the resource to which a request maps and then calls it. Instantiates, fills and returns a :class:`webob.Response` object. If no resource matches the request, a 404 status is set on the response object. :param request: Object representing the current request. :type request: :class:`webob.Request`
378,843
def remove(self, key): check_not_none(key, "key can't be none") return self._encode_invoke(transactional_map_remove_codec, key=self._to_data(key))
Transactional implementation of :func:`Map.remove(key) <hazelcast.proxy.map.Map.remove>` The object to be removed will be removed from only the current transaction context until the transaction is committed. :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key.
378,844
def _write_marker(self, indent_string, depth, entry, comment): return % (indent_string, self._a_to_u( * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u( * depth), self._decode_element(comment))
Write a section marker line
378,845
def xpathNextParent(self, cur): if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlXPathNextParent(self._o, cur__o) if ret is None:raise xpathError() __tmp = xmlNode(_obj=ret) return __tmp
Traversal function for the "parent" direction The parent axis contains the parent of the context node, if there is one.
378,846
def _set_character_restriction(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=character_restriction.character_restriction, is_container=, presence=False, yang_name="character-restriction", rest_name="character-restriction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__character_restriction = t if hasattr(self, ): self._set()
Setter method for character_restriction, mapped from YANG variable /password_attributes/character_restriction (container) If this variable is read-only (config: false) in the source YANG file, then _set_character_restriction is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_character_restriction() directly.
378,847
def import_class(path): components = path.split(".") module = components[:-1] module = ".".join(module) mod = __import__(module, fromlist=[native_str(components[-1])]) return getattr(mod, native_str(components[-1]))
Import a class from a string module class path
378,848
def decode_data(self, encoded): try: identifier = None data_format = 2 if len(encoded) > 8: data_format = 4 identifier = encoded[8:] encoded = encoded[:8] decoded = bytearray(base64.b64decode(encoded, )) return { : data_format, : self._get_temperature(decoded), : self._get_humidity(decoded), : self._get_pressure(decoded), : identifier } except: log.exception(, encoded) return None
Decode sensor data. Returns: dict: Sensor values
378,849
def extern_equals(self, context_handle, val1, val2): return self._ffi.from_handle(val1[0]) == self._ffi.from_handle(val2[0])
Return true if the given Handles are __eq__.
378,850
def _wrap_thing(self, thing, kind): thing[] = self._epoch_utc_to_local(thing[]) thing[] = copy.deepcopy(thing) ThingType = namedtuple(kind, thing.keys()) thing = ThingType(**thing) return thing
Mimic praw.Submission and praw.Comment API
378,851
def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None, region=None, key=None, keyid=None, profile=None): log.debug() pending = __salt__[]( conn_id=conn_id, conn_name=conn_name, region=region, key=key, keyid=keyid, profile=profile) ret = { : name, : True, : {}, : } if not pending: ret[] = True ret[].update({: }) return ret if __opts__[]: ret[].update({: }) return ret fun = log.debug(, fun) result = __salt__[fun](conn_id=conn_id, name=conn_name, region=region, key=key, keyid=keyid, profile=profile) if in result: ret[] = "Failed to accept VPC peering: {0}".format(result[]) ret[] = False return ret ret[].update({: , : result[]}) return ret
Accept a VPC pending requested peering connection between two VPCs. name Name of this state conn_id The connection ID to accept. Exclusive with conn_name. String type. conn_name The name of the VPC peering connection to accept. Exclusive with conn_id. String type. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.accept_vpc_peering_connection: - conn_name: salt_peering_connection # usage with vpc peering connection id and region boto_vpc.accept_vpc_peering_connection: - conn_id: pbx-1873d472 - region: us-west-2
378,852
def plugin(name, module=): if module: mod = projex.importfile(module) if mod: return getattr(mod, nstr(name), None) return Builder._plugins.get(nstr(name))
Returns the plugin for the given name. By default, the base Builder instance will be returned. :param name | <str>
378,853
def _map(self, from_pos, to_pos, pos, base): pos_i = -1 while pos_i < len(self.cigar_op) and pos >= from_pos[pos_i + 1]: pos_i += 1 if pos_i == -1 or pos_i == len(self.cigar_op): raise HGVSInvalidIntervalError("Position is beyond the bounds of transcript record") if self.cigar_op[pos_i] in "=MX": mapped_pos = to_pos[pos_i] + (pos - from_pos[pos_i]) mapped_pos_offset = 0 elif self.cigar_op[pos_i] in "DI": if base == "start": mapped_pos = to_pos[pos_i] - 1 elif base == "end": mapped_pos = to_pos[pos_i] mapped_pos_offset = 0 elif self.cigar_op[pos_i] == "N": if pos - from_pos[pos_i] + 1 <= from_pos[pos_i + 1] - pos: mapped_pos = to_pos[pos_i] - 1 mapped_pos_offset = pos - from_pos[pos_i] + 1 else: mapped_pos = to_pos[pos_i] mapped_pos_offset = -(from_pos[pos_i + 1] - pos) return mapped_pos, mapped_pos_offset, self.cigar_op[pos_i]
Map position between aligned sequences Positions in this function are 0-based.
378,854
def get_if_addr6(iff): return next((x[0] for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)
Returns the main global unicast address associated with provided interface, in human readable form. If no global address is found, None is returned.
378,855
def parse(self, content): if content is None: return None feed = feedparser.parse(content) if feed[]: exception_content = { "exception": str(type(feed[])), "content": str(feed[].getException()), "line": feed[].getLineNumber(), "message": feed[].getMessage(), } feed[] = exception_content if not self.bozo_accept: feed = None return feed
Parse the fetched feed content Feedparser returned dict contain a 'bozo' key which can be '1' if the feed is malformed. Return None if the feed is malformed and 'bozo_accept' is 'False', else return the feed content dict. If the feed is malformed but 'bozo_accept' is 'True', the feed content dict will contain the parsing error exception informations in 'bozo_exception'.
378,856
def diag(A, k=0): if isinstance(A, Poly): core, core_new = A.A, {} for key in A.keys: core_new[key] = numpy.diag(core[key], k) return Poly(core_new, A.dim, None, A.dtype) return numpy.diag(A, k)
Extract or construct a diagonal polynomial array.
378,857
def get_instructions(self, cm, size, insn, idx): self.odex = cm.get_odex_format() max_idx = size * calcsize() if max_idx > len(insn): max_idx = len(insn) while idx < max_idx: obj = None classic_instruction = True op_value = insn[idx] if (op_value == 0x00 or op_value == 0xff) and ((idx + 2) < max_idx): op_value = unpack(, insn[idx:idx + 2])[0] if op_value in DALVIK_OPCODES_PAYLOAD: try: obj = get_instruction_payload(op_value, insn[idx:]) classic_instruction = False except struct.error: warning("error while decoding instruction ...") elif op_value in DALVIK_OPCODES_EXTENDED_WIDTH: try: obj = get_extented_instruction(cm, op_value, insn[idx:]) classic_instruction = False except struct.error as why: warning("error while decoding instruction ..." + why.__str__()) elif self.odex and (op_value in DALVIK_OPCODES_OPTIMIZED): obj = get_optimized_instruction(cm, op_value, insn[idx:]) classic_instruction = False if classic_instruction: op_value = insn[idx] obj = get_instruction(cm, op_value, insn[idx:], self.odex) yield obj idx = idx + obj.get_length()
:param cm: a ClassManager object :type cm: :class:`ClassManager` object :param size: the total size of the buffer :type size: int :param insn: a raw buffer where are the instructions :type insn: string :param idx: a start address in the buffer :type idx: int :rtype: a generator of :class:`Instruction` objects
378,858
def lookup(self, hostname): class SubDict(MutableMapping): def __init__(self, hostname, entries, hostkeys): self._hostname = hostname self._entries = entries self._hostkeys = hostkeys def __iter__(self): for k in self.keys(): yield k def __len__(self): return len(self.keys()) def __delitem__(self, key): for e in list(self._entries): if e.key.get_name() == key: self._entries.remove(e) else: raise KeyError(key) def __getitem__(self, key): for e in self._entries: if e.key.get_name() == key: return e.key raise KeyError(key) def __setitem__(self, key, val): for e in self._entries: if e.key is None: continue if e.key.get_name() == key: e.key = val break else: e = HostKeyEntry([hostname], val) self._entries.append(e) self._hostkeys._entries.append(e) def keys(self): return [ e.key.get_name() for e in self._entries if e.key is not None ] entries = [] for e in self._entries: if self._hostname_matches(hostname, e): entries.append(e) if len(entries) == 0: return None return SubDict(hostname, entries, self)
Find a hostkey entry for a given hostname or IP. If no entry is found, ``None`` is returned. Otherwise a dictionary of keytype to key is returned. The keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``. :param str hostname: the hostname (or IP) to lookup :return: dict of `str` -> `.PKey` keys associated with this host (or ``None``)
378,859
def create_index_list(self, table_name, attr_names): self.validate_access_permission(["w", "a"]) if typepy.is_empty_sequence(attr_names): return table_attr_set = set(self.fetch_attr_names(table_name)) index_attr_set = set(AttrList.sanitize(attr_names)) for attribute in list(table_attr_set.intersection(index_attr_set)): self.create_index(table_name, attribute)
:param str table_name: Table name that exists attribute. :param list attr_names: List of attribute names to create indices. Ignore attributes that are not existing in the table. .. seealso:: :py:meth:`.create_index`
378,860
def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): my-minion client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({: k, : v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags)
Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags
378,861
def prefetch_urls(self, urls): req = {} req.update({"urls": urls}) body = json.dumps(req) url = .format(self.server) return self.__post(url, body)
预取文件列表,文档 http://developer.qiniu.com/article/fusion/api/prefetch.html Args: urls: 待预取的文件外链列表 Returns: 一个dict变量和一个ResponseInfo对象 参考代码 examples/cdn_manager.py
378,862
def get_attrs(cls): ignore = dir(type(, (object,), {})) + [] attrs = [ item for item in inspect.getmembers(cls) if item[0] not in ignore and not isinstance( item[1], ( types.FunctionType, types.MethodType, classmethod, staticmethod, property))] attrs.sort(key=lambda attr: (getattr(attr[1], , -1), attr[0])) return attrs
Get all class attributes ordered by definition
378,863
def backbone_bond_lengths(self): bond_lengths = dict( n_ca=[distance(r[], r[]) for r in self.get_monomers(ligands=False)], ca_c=[distance(r[], r[]) for r in self.get_monomers(ligands=False)], c_o=[distance(r[], r[]) for r in self.get_monomers(ligands=False)], c_n=[distance(r1[], r2[]) for r1, r2 in [ (self[i], self[i + 1]) for i in range(len(self) - 1)]], ) return bond_lengths
Dictionary containing backbone bond lengths as lists of floats. Returns ------- bond_lengths : dict Keys are `n_ca`, `ca_c`, `c_o` and `c_n`, referring to the N-CA, CA-C, C=O and C-N bonds respectively. Values are lists of floats : the bond lengths in Angstroms. The lists of n_ca, ca_c and c_o are of length k for a Polypeptide containing k Residues. The list of c_n bonds is of length k-1 for a Polypeptide containing k Residues (C-N formed between successive `Residue` pairs).
378,864
def get_unresolved_properties_by_inheritance(self, timeperiod): for i in timeperiod.templates: template = self.templates[i] timeperiod.unresolved.extend(template.unresolved)
Fill full properties with template if needed for the unresolved values (example: sunday ETCETC) :return: None
378,865
def get_disease(self, disease_name=None, disease_id=None, definition=None, parent_ids=None, tree_numbers=None, parent_tree_numbers=None, slim_mapping=None, synonym=None, alt_disease_id=None, limit=None, as_df=False): q = self.session.query(models.Disease) if disease_name: q = q.filter(models.Disease.disease_name.like(disease_name)) if disease_id: q = q.filter(models.Disease.disease_id == disease_id) if definition: q = q.filter(models.Disease.definition.like(definition)) if parent_ids: q = q.filter(models.Disease.parent_ids.like(parent_ids)) if tree_numbers: q = q.filter(models.Disease.tree_numbers.like(tree_numbers)) if parent_tree_numbers: q = q.filter(models.Disease.parent_tree_numbers.like(parent_tree_numbers)) if slim_mapping: q = q.join(models.DiseaseSlimmapping).filter(models.DiseaseSlimmapping.slim_mapping.like(slim_mapping)) if synonym: q = q.join(models.DiseaseSynonym).filter(models.DiseaseSynonym.synonym.like(synonym)) if alt_disease_id: q = q.join(models.DiseaseAltdiseaseid).filter(models.DiseaseAltdiseaseid.alt_disease_id == alt_disease_id) return self._limit_and_df(q, limit, as_df)
Get diseases :param bool as_df: if set to True result returns as `pandas.DataFrame` :param int limit: maximum number of results :param str disease_name: disease name :param str disease_id: disease identifier :param str definition: definition of disease :param str parent_ids: parent identifiers, delimiter | :param str tree_numbers: tree numbers, delimiter | :param str parent_tree_numbers: parent tree numbers, delimiter :param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \ that classifies MEDIC diseases into high-level categories :param str synonym: disease synonyms :param str alt_disease_id: alternative disease identifiers :return: list of :class:`pyctd.manager.models.Disease` object .. seealso:: :class:`pyctd.manager.models.Disease` .. todo:: normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease`
378,866
def spin(self): if self._notification_socket: self._flush_notifications() if self._iopub_socket: self._flush_iopub(self._iopub_socket) if self._mux_socket: self._flush_results(self._mux_socket) if self._task_socket: self._flush_results(self._task_socket) if self._control_socket: self._flush_control(self._control_socket) if self._query_socket: self._flush_ignored_hub_replies()
Flush any registration notifications and execution results waiting in the ZMQ queue.
378,867
async def iter_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, batch_size: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: Optional[uuid.UUID] = None, ): correlation_id = correlation_id cmd = convo.IterAllEvents( msg.Position.for_direction(direction, from_position), batch_size, resolve_links, require_master, direction, self.credential, correlation_id, ) result = await self.dispatcher.start_conversation(cmd) iterator = await result async for event in iterator: yield event
Read through all the events in the database. Args: direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the database. >>> with async.connect() as conn: >>> async for event in conn.iter_all() >>> print(event) Print every event from the database in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter_all(direction=StreamDirection.Backward): >>> print(event) Start reading from a known commit position >>> with async.connect() as conn: >>> async for event in conn.iter_all(from_position=Position(12345)) >>> print(event)
378,868
def __cleanup_breakpoint(self, event, bp): "Auxiliary method." try: process = event.get_process() thread = event.get_thread() bp.disable(process, thread) except Exception: pass bp.set_condition(True) bp.set_action(None)
Auxiliary method.
378,869
def word(self): if self.wordtype == DigitWord.DIGIT: return self._word else: return [str(hex(a)).replace(, ) for a in self._word]
Property of the DigitWord returning (or setting) the DigitWord as a list of integers (or string representations) of DigitModel. The property is called during instantiation as the property validates the value passed and ensures that all digits are valid.
378,870
def main(): parser = get_parser() subparsers = get_subparsers(parser) def help(return_code=0): version = helpme.__version__ bot.custom(message= %version, prefix=, color=) parser.print_help() sys.exit(return_code) if args.version is True: print(helpme.__version__) sys.exit(0) if args.command == "config": from .config import main if args.command == "list": from .list import main if args.command in HELPME_HELPERS: from .help import main return_code = 0 try: main(args, extras) sys.exit(return_code) except UnboundLocalError: return_code = 1 help(return_code)
the main entry point for the HelpMe Command line application. Currently, the user can request help or set config values for a particular helper.
378,871
def from_dict(dic): return ODict((k, ODict.convert_ifdic(v)) for k, v in dic.items())
recursive dict to dictobj 컨버트 :param dic: :return:
378,872
def save(self, fname): element = _transform.SVGFigure(self.width, self.height) element.append(self) element.save(os.path.join(CONFIG[], fname))
Save figure to SVG file. Parameters ---------- fname : str Full path to file.
378,873
def specific_gains(string): if not string: return {} gains = {} for gain in string.split(): amp_name, value = gain.split() gains[amp_name.strip()] = float(value.strip()) return gains
Convert string with gains of individual amplification elements to dict
378,874
def tempfile_writer(target): tmp = target.parent / ( % target.name) try: with tmp.open() as fd: yield fd except: tmp.unlink() raise LOG.debug(, tmp, target) tmp.rename(target)
write cache data to a temporary location. when writing is complete, rename the file to the actual location. delete the temporary file on any error
378,875
def get_renderer(self, with_layout=True): if with_layout and self.is_lti(): return self._default_renderer_lti elif with_layout: return self._default_renderer else: return self._default_renderer_nolayout
Get the default renderer
378,876
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id, proxy, *args, **kwargs): if not objective_bank_id: raise NullArgument if not self.supports_objective_requisite_assignment(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ObjectiveRequisiteAssignmentSession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank. :param objective_bank_id: the ``Id`` of the objective bank :type objective_bank_id: ``osid.id.Id`` :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: an ``ObjectiveRequisiteAssignmentSession`` :rtype: ``osid.learning.ObjectiveRequisiteAssignmentSession`` :raise: ``NotFound`` -- ``objective_bank_id`` not found :raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null`` :raise: ``OperationFailed`` -- ``unable to complete request`` :raise: ``Unimplemented`` -- ``supports_objective_requisite_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite_assignment()`` and ``supports_visible_federation()`` are ``true``.*
378,877
def irreg(self, i): excl = False for ir in self._irregs: if i in ir.morphos(): return ir.grq(), ir.exclusif return "", excl
Renvoie la forme irrégulière de morpho i. excl devient True si elle est exclusive, sinon. :return: Forme irrégulière de morpho i, Exclusivité :rtype: tuple.<str, bool>
378,878
def create_free_space_request_content(): root = etree.Element(, xmlns=) prop = etree.SubElement(root, ) etree.SubElement(prop, ) etree.SubElement(prop, ) tree = etree.ElementTree(root) return WebDavXmlUtils.etree_to_string(tree)
Creates an XML for requesting of free space on remote WebDAV server. :return: the XML string of request content.
378,879
def reset_password_view(self, token): if self.call_or_get(current_user.is_authenticated): logout_user() data_items = self.token_manager.verify_token( token, self.USER_RESET_PASSWORD_EXPIRATION) user = None if data_items: user_id = data_items[0] user = self.db_manager.get_user_by_id(user_id) user_or_user_email_object = self.db_manager.get_primary_user_email_object(user) user_or_user_email_object.email_confirmed_at = datetime.utcnow() self.db_manager.save_object(user_or_user_email_object) self.db_manager.commit() if not user: flash(_(), ) return redirect(self._endpoint_url()) form = self.ResetPasswordFormClass(request.form) if request.method == and form.validate(): password_hash = self.hash_password(form.new_password.data) user.password=password_hash self.db_manager.save_object(user) self.db_manager.commit() if self.USER_ENABLE_EMAIL and self.USER_SEND_PASSWORD_CHANGED_EMAIL: self.email_manager.send_password_changed_email(user) signals.user_reset_password.send(current_app._get_current_object(), user=user) flash(_("Your password has been reset successfully."), ) safe_next_url = self._get_safe_next_url(, self.USER_AFTER_RESET_PASSWORD_ENDPOINT) if self.USER_AUTO_LOGIN_AFTER_RESET_PASSWORD: return self._do_login_user(user, safe_next_url) else: return redirect(url_for() + + quote(safe_next_url)) self.prepare_domain_translations() return render_template(self.USER_RESET_PASSWORD_TEMPLATE, form=form)
Verify the password reset token, Prompt for new password, and set the user's password.
378,880
def location_path(self): value = [] for option in self.options: if option.number == defines.OptionRegistry.LOCATION_PATH.number: value.append(str(option.value)) return "/".join(value)
Return the Location-Path of the response. :rtype : String :return: the Location-Path option
378,881
def _get_translations_multi_paths(): ctx = _request_ctx_stack.top if ctx is None: return None translations = getattr(ctx, "babel_translations", None) if translations is None: babel_ext = ctx.app.extensions["babel"] translations = None trs = None for (dirname, domain) in reversed(babel_ext._translations_paths): trs = Translations.load( dirname, locales=[flask_babel.get_locale()], domain=domain ) if not trs or not hasattr(trs, "merge"): continue elif translations is not None and hasattr(translations, "merge"): translations.merge(trs) else: translations = trs if translations is None: translations = trs ctx.babel_translations = translations return translations
Return the correct gettext translations that should be used for this request. This will never fail and return a dummy translation object if used outside of the request or if a translation cannot be found.
378,882
def map2slim(subjects, slim, **kwargs): logging.info("SLIM SUBJECTS:{} SLIM:{} CAT:{}".format(subjects,slim,kwargs.get())) searchresult = search_associations(subjects=subjects, slim=slim, facet_fields=[], **kwargs ) pmap = {} for a in searchresult[]: subj = a[][] slimmed_terms = a[] for t in slimmed_terms: k = (subj,t) if k not in pmap: pmap[k] = [] pmap[k].append(a) results = [ {: subj, :t, : assocs} for ((subj,t),assocs) in pmap.items()] return results
Maps a set of subjects (e.g. genes) to a set of slims Result is a list of unique subject-class pairs, with a list of source assocations
378,883
def append_partition_by_name(self, db_name, tbl_name, part_name): self.send_append_partition_by_name(db_name, tbl_name, part_name) return self.recv_append_partition_by_name()
Parameters: - db_name - tbl_name - part_name
378,884
def get_many(self, keys): d = {} for k in keys: val = self.get(k) if val is not None: d[k] = val return d
Fetch a bunch of keys from the cache. For certain backends (memcached, pgsql) this can be *much* faster when fetching multiple values. Return a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict.
378,885
def DoRarExtraction(rarArchive, targetFile, dstDir): try: rarArchive.extract(targetFile, dstDir) except BaseException as ex: goodlogging.Log.Info("EXTRACT", "Extract failed - Exception: {0}".format(ex)) return False else: return True
RAR extraction with exception catching Parameters ---------- rarArchive : RarFile object RarFile object to extract. targetFile : string Target file name. dstDir : string Target directory. Returns ---------- boolean False if rar extraction failed, otherwise True.
378,886
def get_meta_regex(schema=): meta_parse = collections.OrderedDict() if schema == : meta_parse[] = [] meta_parse[] = [, , ] meta_parse[] = [, ] meta_parse[] = [] meta_parse[] = [, , ] meta_parse[] = [, ] meta_parse[] = [, ] meta_parse[] = [, ] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] elif schema == : meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] meta_parse[] = [] return meta_parse
Create a dictionary of regex for extracting the meta data for the spectra
378,887
def in_dir( config_dir=os.path.expanduser(), extensions=[, , ] ): configs = [] for filename in os.listdir(config_dir): if is_config_file(filename, extensions) and not filename.startswith(): configs.append(filename) return configs
Return a list of configs in ``config_dir``. Parameters ---------- config_dir : str directory to search extensions : list filetypes to check (e.g. ``['.yaml', '.json']``). Returns ------- list
378,888
def __realized_bbox(self, requested_bbox): realized_bbox = requested_bbox.expand_to_chunk_size(self.underlying, offset=self.voxel_offset) return Bbox.clamp(realized_bbox, self.bounds)
The requested bbox might not be aligned to the underlying chunk grid or even outside the bounds of the dataset. Convert the request into a bbox representing something that can be actually downloaded. Returns: Bbox
378,889
def _energy_minimize_openmm( self, tmp_dir, forcefield_files=None, forcefield_name=None, steps=1000, scale_bonds=1, scale_angles=1, scale_torsions=1, scale_nonbonded=1): foyer = import_() to_parmed = self.to_parmed() ff = foyer.Forcefield(forcefield_files=forcefield_files, name=forcefield_name) to_parmed = ff.apply(to_parmed) from simtk.openmm.app.simulation import Simulation from simtk.openmm.app.pdbreporter import PDBReporter from simtk.openmm.openmm import LangevinIntegrator import simtk.unit as u system = to_parmed.createSystem() integrator = LangevinIntegrator(298 * u.kelvin, 1 / u.picosecond, 0.002 * u.picoseconds) simulation = Simulation(to_parmed.topology, system, integrator) for force in system.getForces(): if type(force).__name__ == "HarmonicBondForce": for bond_index in range(force.getNumBonds()): atom1, atom2, r0, k = force.getBondParameters(bond_index) force.setBondParameters(bond_index, atom1, atom2, r0, k * scale_bonds) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "HarmonicAngleForce": for angle_index in range(force.getNumAngles()): atom1, atom2, atom3, r0, k = force.getAngleParameters( angle_index) force.setAngleParameters(angle_index, atom1, atom2, atom3, r0, k * scale_angles) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "RBTorsionForce": for torsion_index in range(force.getNumTorsions()): atom1, atom2, atom3, atom4, c0, c1, c2, c3, c4, c5 = force.getTorsionParameters( torsion_index) force.setTorsionParameters( torsion_index, atom1, atom2, atom3, atom4, c0 * scale_torsions, c1 * scale_torsions, c2 * scale_torsions, c3 * scale_torsions, c4 * scale_torsions, c5 * scale_torsions) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "NonbondedForce": for nb_index in range(force.getNumParticles()): charge, sigma, epsilon = force.getParticleParameters( nb_index) force.setParticleParameters(nb_index, charge, sigma, epsilon * scale_nonbonded) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "CMMotionRemover": pass else: warn( .format( type(force).__name__)) simulation.context.setPositions(to_parmed.positions) simulation.minimizeEnergy(maxIterations=steps) reporter = PDBReporter(os.path.join(tmp_dir, ), 1) reporter.report( simulation, simulation.context.getState( getPositions=True))
Perform energy minimization using OpenMM Converts an mBuild Compound to a Parmed Structure, applies a forcefield using Foyer, and creates an OpenMM System. Parameters ---------- forcefield_files : str or list of str, optional, default=None Forcefield files to load forcefield_name : str, optional, default=None Apply a named forcefield to the output file using the `foyer` package, e.g. 'oplsaa'. Forcefields listed here: https://github.com/mosdef-hub/foyer/tree/master/foyer/forcefields steps : int, optional, default=1000 Number of energy minimization iterations scale_bonds : float, optional, default=1 Scales the bond force constant (1 is completely on) scale_angles : float, optiona, default=1 Scales the angle force constant (1 is completely on) scale_torsions : float, optional, default=1 Scales the torsional force constants (1 is completely on) scale_nonbonded : float, optional, default=1 Scales epsilon (1 is completely on) Notes ----- Assumes a particular organization for the force groups (HarmonicBondForce, HarmonicAngleForce, RBTorsionForce, NonBondedForce) References ---------- .. [1] P. Eastman, M. S. Friedrichs, J. D. Chodera, R. J. Radmer, C. M. Bruns, J. P. Ku, K. A. Beauchamp, T. J. Lane, L.-P. Wang, D. Shukla, T. Tye, M. Houston, T. Stich, C. Klein, M. R. Shirts, and V. S. Pande. "OpenMM 4: A Reusable, Extensible, Hardware Independent Library for High Performance Molecular Simulation." J. Chem. Theor. Comput. 9(1): 461-469. (2013).
378,890
def add_search_path(*path_tokens): full_path = os.path.join(*path_tokens) if full_path not in sys.path: sys.path.insert(0, os.path.abspath(full_path))
Adds a new search path from where modules can be loaded. This function is provided for test applications to add locations to the search path, so any required functionality can be loaded. It helps keeping the step implementation modules simple by placing the bulk of the implementation in separate utility libraries. This function can also be used to add the application being tested to the path, so its functionality can be made available for testing. :param arglist path_tokens: Variable list of path tokens that is joined to create the full, absolute path to be added.
378,891
def uint32_gt(a: int, b: int) -> bool: half_mod = 0x80000000 return (((a < b) and ((b - a) > half_mod)) or ((a > b) and ((a - b) < half_mod)))
Return a > b.
378,892
def transform(self, v3): if isinstance(v3, Vector3): t = super(Quaternion, self).transform([v3.x, v3.y, v3.z]) return Vector3(t[0], t[1], t[2]) elif len(v3) == 3: return super(Quaternion, self).transform(v3) else: raise TypeError("param v3 is not a vector type")
Calculates the vector transformed by this quaternion :param v3: Vector3 to be transformed :returns: transformed vector
378,893
def set_widths(self, estimation, widths): estimation[self.map_offset[1]:self.map_offset[2]] = widths.ravel()
Set estimation on widths Parameters ---------- estimation : 1D arrary Either prior of posterior estimation widths : 2D array, in shape [K, 1] Estimation on widths
378,894
async def check_passwd(self, identity: str, passwd: str ) -> SessionIdentity : assert identity value, _ = await self._client.get(f"{self._prefix_identity}/{identity}") if value is None: logger.debug(f) raise Unauthorized(f"无此登录身份") profile = json.loads(value.decode()) user_id = profile[] identity = profile[] hashed = profile[] if sha256_crypt.verify(passwd, hashed): return SessionIdentity(user_id=user_id, identity=identity) else: raise Unauthorized(f"登录身份认证失败")
通过密码检查身份
378,895
def cleanup(self): for task in self.__done_registry: task.stop() self.__done_registry.clear() self.cleanup_event().clear()
Do cleanup (stop and remove watchdogs that are no longer needed) :return: None
378,896
def postadressen(self): return [h.postadres for h in self.huisnummers if h.status.id == ]
Returns the postadressen for this Perceel. Will only take the huisnummers with status `inGebruik` into account. :rtype: list
378,897
def active_trail_nodes(self, variables, observed=None): if observed: observed_list = observed if isinstance(observed, (list, tuple)) else [observed] else: observed_list = [] ancestors_list = self._get_ancestors_of(observed_list) active_trails = {} for start in variables if isinstance(variables, (list, tuple)) else [variables]: visit_list = set() visit_list.add((start, )) traversed_list = set() active_nodes = set() while visit_list: node, direction = visit_list.pop() if (node, direction) not in traversed_list: if node not in observed_list: active_nodes.add(node) traversed_list.add((node, direction)) if direction == and node not in observed_list: for parent in self.predecessors(node): visit_list.add((parent, )) for child in self.successors(node): visit_list.add((child, )) elif direction == : if node not in observed_list: for child in self.successors(node): visit_list.add((child, )) if node in ancestors_list: for parent in self.predecessors(node): visit_list.add((parent, )) active_trails[start] = active_nodes return active_trails
Returns a dictionary with the given variables as keys and all the nodes reachable from that respective variable as values. Parameters ---------- variables: str or array like variables whose active trails are to be found. observed : List of nodes (optional) If given the active trails would be computed assuming these nodes to be observed. Examples -------- >>> from pgmpy.base import DAG >>> student = DAG() >>> student.add_nodes_from(['diff', 'intel', 'grades']) >>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')]) >>> student.active_trail_nodes('diff') {'diff': {'diff', 'grades'}} >>> student.active_trail_nodes(['diff', 'intel'], observed='grades') {'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}} References ---------- Details of the algorithm can be found in 'Probabilistic Graphical Model Principles and Techniques' - Koller and Friedman Page 75 Algorithm 3.1
378,898
def set_feature_flag_courses(self, feature, course_id, state=None): path = {} data = {} params = {} path["course_id"] = course_id path["feature"] = feature if state is not None: self._validate_enum(state, ["off", "allowed", "on"]) data["state"] = state self.logger.debug("PUT /api/v1/courses/{course_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/features/flags/{feature}".format(**path), data=data, params=params, single_item=True)
Set feature flag. Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets a feature flag for the same feature in any state other than "allowed".
378,899
def _cromwell_move_outputs(metadata, final_dir): sample_key = [k for k in metadata["outputs"].keys() if k.endswith(("rgnames__sample", "rgnames__sample_out"))][0] project_dir = utils.safe_makedir(os.path.join(final_dir, "project")) samples = metadata["outputs"][sample_key] def _copy_with_secondary(f, dirname): if len(f["secondaryFiles"]) > 1: dirname = utils.safe_makedir(os.path.join(dirname, os.path.basename(os.path.dirname(f["location"])))) if not objectstore.is_remote(f["location"]): finalf = os.path.join(dirname, os.path.basename(f["location"])) if not utils.file_uptodate(finalf, f["location"]): shutil.copy(f["location"], dirname) [_copy_with_secondary(sf, dirname) for sf in f["secondaryFiles"]] def _write_to_dir(val, dirname): if isinstance(val, (list, tuple)): [_write_to_dir(v, dirname) for v in val] else: _copy_with_secondary(val, dirname) for k, vals in metadata["outputs"].items(): if k != sample_key: if k.endswith(("summary__multiqc")): vs = [v for v in vals if v] assert len(vs) == 1 _write_to_dir(vs[0], project_dir) elif len(vals) == len(samples): for s, v in zip(samples, vals): if v: _write_to_dir(v, utils.safe_makedir(os.path.join(final_dir, s))) elif len(vals) == 1: _write_to_dir(vals[0], project_dir) elif len(vals) > 0: raise ValueError("Unexpected sample and outputs: %s %s %s" % (k, samples, vals))
Move Cromwell outputs to the final upload directory.