Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
6,800
def __add_parameter(self, param, path_parameters, params): descriptor = None if not isinstance(param, messages.MessageField): name = param.name descriptor = self.__parameter_descriptor(param) descriptor[] = if name in path_parameters else if descriptor: params[name] = descriptor else: for subfield_list in self.__field_to_subfields(param): name = .join(subfield.name for subfield in subfield_list) descriptor = self.__parameter_descriptor(subfield_list[-1]) if name in path_parameters: descriptor[] = True descriptor[] = else: descriptor.pop(, None) descriptor[] = if descriptor: params[name] = descriptor
Adds all parameters in a field to a method parameters descriptor. Simple fields will only have one parameter, but a message field 'x' that corresponds to a message class with fields 'y' and 'z' will result in parameters 'x.y' and 'x.z', for example. The mapping from field to parameters is mostly handled by __field_to_subfields. Args: param: Parameter to be added to the descriptor. path_parameters: A list of parameters matched from a path for this field. For example for the hypothetical 'x' from above if the path was '/a/{x.z}/b/{other}' then this list would contain only the element 'x.z' since 'other' does not match to this field. params: List of parameters. Each parameter in the field.
6,801
def site_config_dirs(appname): if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == : pathlist = [os.path.join(, appname)] else: xdg_config_dirs = os.getenv(, ) if xdg_config_dirs: pathlist = [ os.sep.join([os.path.expanduser(x), appname]) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] pathlist.append() return pathlist
Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: Mac OS X: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\
6,802
def chdir(self, dir, change_os_dir=0): curr=self._cwd try: if dir is not None: self._cwd = dir if change_os_dir: os.chdir(dir.get_abspath()) except OSError: self._cwd = curr raise
Change the current working directory for lookups. If change_os_dir is true, we will also change the "real" cwd to match.
6,803
def get_urls(self, order="total_clicks desc", offset=None, count=None): req_data = [ None, order, fmt_paging(offset, count) ] return self.request("query:Message_Url", req_data)
Returns a list of URLs you've included in messages. List is sorted by ``total_clicks``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items.
6,804
def _open(filename, mode=, buffering=0): if mode not in (, , ): raise RuntimeError("Invalid open file mode, must be , , or ") if buffering > MAX_OPEN_BUFFER: raise RuntimeError("Invalid buffering value, max buffer size is {}".format(MAX_OPEN_BUFFER)) return open(filename, mode, buffering)
read only version of open()
6,805
def fingerprint(txt): raw_text = txt.upper() tokens = sorted(list(set(raw_text.split()))) res = .join([strip_nonalpha(t) for t in tokens]) return res
takes a string and truncates to standard form for data matching. Based on the spec at OpenRefine https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth#fingerprint - remove leading and trailing whitespace - change all characters to their lowercase representation - remove all punctuation and control characters - split the string into whitespace-separated tokens - sort the tokens and remove duplicates - join the tokens back together - normalize extended western characters to their ASCII representation (for example "gödel" → "godel")
6,806
def wheregreater(self, fieldname, value): return self.mask([elem > value for elem in self[fieldname]])
Returns a new DataTable with rows only where the value at `fieldname` > `value`.
6,807
def send_audio_packet(self, data, *, encode=True): self.checked_add(, 1, 65535) if encode: encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME) else: encoded_data = data packet = self._get_voice_packet(encoded_data) try: self.socket.sendto(packet, (self.endpoint_ip, self.voice_port)) except BlockingIOError: log.warning(, self.sequence, self.timestamp) self.checked_add(, self.encoder.SAMPLES_PER_FRAME, 4294967295)
Sends an audio packet composed of the data. You must be connected to play audio. Parameters ---------- data: bytes The :term:`py:bytes-like object` denoting PCM or Opus voice data. encode: bool Indicates if ``data`` should be encoded into Opus. Raises ------- ClientException You are not connected. OpusError Encoding the data failed.
6,808
def delete_authoring_nodes(self, editor): editor_node = foundations.common.get_first_item(self.get_editor_nodes(editor)) file_node = editor_node.parent self.unregister_editor(editor_node) self.unregister_file(file_node, raise_exception=False) return True
Deletes the Model authoring Nodes associated with given editor. :param editor: Editor. :type editor: Editor :return: Method success. :rtype: bool
6,809
def idempotency_key(self, idempotency_key): if idempotency_key is None: raise ValueError("Invalid value for `idempotency_key`, must not be `None`") if len(idempotency_key) < 1: raise ValueError("Invalid value for `idempotency_key`, length must be greater than or equal to `1`") self._idempotency_key = idempotency_key
Sets the idempotency_key of this BatchUpsertCatalogObjectsRequest. A value you specify that uniquely identifies this request among all your requests. A common way to create a valid idempotency key is to use a Universally unique identifier (UUID). If you're unsure whether a particular request was successful, you can reattempt it with the same idempotency key without worrying about creating duplicate objects. See [Idempotency](/basics/api101/idempotency) for more information. :param idempotency_key: The idempotency_key of this BatchUpsertCatalogObjectsRequest. :type: str
6,810
def bump(self, bump_part): major, minor, patch, stage, n = tuple(self) if bump_part not in {"major", "minor", "patch"}: if bump_part not in self.stages: raise ValueError(f"Unknown {bump_part} stage") if self.stage == "final" and bump_part == "final": raise ValueError(f"{self} is already in final stage.") if bump_part == self.stage: n += 1 else: new_stage_number = tuple(self.stages).index(bump_part) if new_stage_number < self._stage_number: raise ValueError(f"{bump_part} stage is previous to {self}") stage = bump_part n = 0 else: if self.stage != "final": raise ValueError( f"{self} is a pre-release version." f" Can't do a {bump_part} version bump" ) if bump_part == "major": major += 1 minor, patch = 0, 0 elif bump_part == "minor": minor += 1 patch = 0 else: patch += 1 return Version(major=major, minor=minor, patch=patch, stage=stage, n=n)
Return a new bumped version instance.
6,811
def merge_like_ops(self): cigs = [] for op, grps in groupby(self.items(), itemgetter(1)): cigs.append((sum(g[0] for g in grps), op)) return Cigar(self.string_from_elements(cigs))
>>> Cigar("1S20M").merge_like_ops() Cigar('1S20M') >>> Cigar("1S1S20M").merge_like_ops() Cigar('2S20M') >>> Cigar("1S1S1S20M").merge_like_ops() Cigar('3S20M') >>> Cigar("1S1S1S20M1S1S").merge_like_ops() Cigar('3S20M2S')
6,812
def organization_fields_reorder(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_fields api_path = "/api/v2/organization_fields/reorder.json" return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/organization_fields#reorder-organization-field
6,813
def get(self, sid): return AssetVersionContext( self._version, service_sid=self._solution[], asset_sid=self._solution[], sid=sid, )
Constructs a AssetVersionContext :param sid: The sid :returns: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionContext :rtype: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionContext
6,814
def to_eng(num_in): x = decimal.Decimal(str(num_in)) eng_not = x.normalize().to_eng_string() return(eng_not)
Return number in engineering notation.
6,815
def get_rds_instances_by_region(self, region): try: conn = self.connect_to_aws(rds, region) if conn: instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == : error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, )
Makes an AWS API call to the list of RDS instances in a particular region
6,816
def flushall(args): cluster = Cluster.from_node(ClusterNode.from_uri(args.cluster)) for node in cluster.masters: node.flushall()
Execute flushall in all cluster nodes.
6,817
def get_review_history_statuses(instance, reverse=False): review_history = getReviewHistory(instance, reverse=reverse) return map(lambda event: event["review_state"], review_history)
Returns a list with the statuses of the instance from the review_history
6,818
def ParseMultiple(self, stats, file_objects, knowledge_base): _ = knowledge_base found_files = self._Combine(stats, file_objects) weights = [w for w in self.WEIGHTS if w.path in found_files] weights = sorted(weights, key=lambda x: x.weight) for _, path, handler in weights: contents = found_files[path] obj = handler(contents) complete, result = obj.Parse() if result is None: continue elif complete: yield rdf_protodict.Dict({ : result.release, : result.major, : result.minor }) return system_release = found_files.get(, None) if system_release and in system_release: match_object = ReleaseFileParseHandler.RH_RE.search(system_release) if match_object and match_object.lastindex > 1: yield rdf_protodict.Dict({ : , : int(match_object.group(1)), : int(match_object.group(2)) }) return results_dict = self._ParseOSReleaseFile(found_files) if results_dict is not None: yield results_dict return yield rdf_anomaly.Anomaly( type=, symptom=)
Parse the found release files.
6,819
def strelka_somatic_variant_stats(variant, variant_metadata): sample_info = variant_metadata["sample_info"] assert len(sample_info) == 2, "More than two samples found in the somatic VCF" tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"]) normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"]) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats
6,820
def get_ldap_filter(ldap_filter): if ldap_filter is None: return None if isinstance(ldap_filter, (LDAPFilter, LDAPCriteria)): return ldap_filter elif is_string(ldap_filter): return _parse_ldap(ldap_filter) raise TypeError( "Unhandled filter type {0}".format(type(ldap_filter).__name__) )
Retrieves the LDAP filter object corresponding to the given filter. Parses it the argument if it is an LDAPFilter instance :param ldap_filter: An LDAP filter (LDAPFilter or string) :return: The corresponding filter, can be None :raise ValueError: Invalid filter string found :raise TypeError: Unknown filter type
6,821
async def update_bucket(self, *, chat: typing.Union[str, int, None] = None, user: typing.Union[str, int, None] = None, bucket: typing.Dict = None, **kwargs): raise NotImplementedError
Update bucket for user in chat You can use bucket parameter or|and kwargs. Chat or user is always required. If one of them is not provided, you have to set missing value based on the provided one. :param bucket: :param chat: :param user: :param kwargs: :return:
6,822
def SELFDESTRUCT(self, recipient): recipient = Operators.EXTRACT(recipient, 0, 160) address = self.address if issymbolic(recipient): logger.info("Symbolic recipient on self destruct") recipient = solver.get_value(self.constraints, recipient) if recipient not in self.world: self.world.create_account(address=recipient) self.world.send_funds(address, recipient, self.world.get_balance(address)) self.world.delete_account(address) raise EndTx()
Halt execution and register account for later deletion
6,823
def download_image(image_id, url, x1, y1, x2, y2, output_dir): output_filename = os.path.join(output_dir, image_id + ) if os.path.exists(output_filename): return True try: url_file = urlopen(url) if url_file.getcode() != 200: return False image_buffer = url_file.read() image = Image.open(BytesIO(image_buffer)).convert() w = image.size[0] h = image.size[1] image = image.crop((int(x1 * w), int(y1 * h), int(x2 * w), int(y2 * h))) image = image.resize((299, 299), resample=Image.ANTIALIAS) image.save(output_filename) except IOError: return False return True
Downloads one image, crops it, resizes it and saves it locally.
6,824
def get_service_agreements(storage_path, status=): conn = sqlite3.connect(storage_path) try: cursor = conn.cursor() return [ row for row in cursor.execute( , (status,)) ] finally: conn.close()
Get service agreements pending to be executed. :param storage_path: storage path for the internal db, str :param status: :return:
6,825
def to_hex_string(data): if isinstance(data, int): return % data return .join([( % o) for o in data])
Convert list of integers to a hex string, separated by ":"
6,826
def score_n2(matrix, matrix_size): score = 0 for i in range(matrix_size - 1): for j in range(matrix_size - 1): bit = matrix[i][j] if bit == matrix[i][j + 1] and bit == matrix[i + 1][j] \ and bit == matrix[i + 1][j + 1]: score += 1 return score * 3
\ Implements the penalty score feature 2. ISO/IEC 18004:2015(E) -- 7.8.3 Evaluation of data masking results - Table 11 (page 54) ============================== ==================== =============== Feature Evaluation condition Points ============================== ==================== =============== Block of modules in same color Block size = m × n N2 ×(m-1)×(n-1) ============================== ==================== =============== N2 = 3 :param matrix: The matrix to evaluate :param matrix_size: The width (or height) of the matrix. :return int: The penalty score (feature 2) of the matrix.
6,827
def get_instance(key, expire=None): global _instances try: instance = _instances[key] except KeyError: instance = RedisSet( key, _redis, expire=expire ) _instances[key] = instance return instance
Return an instance of RedisSet.
6,828
def cellpar_to_cell(cellpar, ab_normal=(0,0,1), a_direction=None): if a_direction is None: if np.linalg.norm(np.cross(ab_normal, (1,0,0))) < 1e-5: a_direction = (0,0,1) else: a_direction = (1,0,0) ad = np.array(a_direction) Z = unit_vector(ab_normal) X = unit_vector(ad - dot(ad, Z)*Z) Y = np.cross(Z, X) alpha, beta, gamma = 90., 90., 90. if isinstance(cellpar, (int, float)): a = b = c = cellpar elif len(cellpar) == 1: a = b = c = cellpar[0] elif len(cellpar) == 3: a, b, c = cellpar alpha, beta, gamma = 90., 90., 90. else: a, b, c, alpha, beta, gamma = cellpar alpha *= pi/180.0 beta *= pi/180.0 gamma *= pi/180.0 va = a * np.array([1, 0, 0]) vb = b * np.array([cos(gamma), sin(gamma), 0]) cx = cos(beta) cy = (cos(alpha) - cos(beta)*cos(gamma))/sin(gamma) cz = sqrt(1. - cx*cx - cy*cy) vc = c * np.array([cx, cy, cz]) abc = np.vstack((va, vb, vc)) T = np.vstack((X, Y, Z)) cell = dot(abc, T) return cell
Return a 3x3 cell matrix from `cellpar` = [a, b, c, alpha, beta, gamma]. The returned cell is orientated such that a and b are normal to `ab_normal` and a is parallel to the projection of `a_direction` in the a-b plane. Default `a_direction` is (1,0,0), unless this is parallel to `ab_normal`, in which case default `a_direction` is (0,0,1). The returned cell has the vectors va, vb and vc along the rows. The cell will be oriented such that va and vb are normal to `ab_normal` and va will be along the projection of `a_direction` onto the a-b plane. Example: >>> cell = cellpar_to_cell([1, 2, 4, 10, 20, 30], (0,1,1), (1,2,3)) >>> np.round(cell, 3) array([[ 0.816, -0.408, 0.408], [ 1.992, -0.13 , 0.13 ], [ 3.859, -0.745, 0.745]])
6,829
def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): api_path = .format(mount_point=mount_point, path=path) response = self._adapter.list( url=api_path, ) return response.json()
Return a list of key names at the specified location. Folders are suffixed with /. The input must be a folder; list on a file will not return a value. Note that no policy-based filtering is performed on keys; do not encode sensitive information in key names. The values themselves are not accessible via this command. Supported methods: LIST: /{mount_point}/{path}. Produces: 200 application/json :param path: Specifies the path of the secrets to list. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the list_secrets request. :rtype: dict
6,830
def _convert_angle_limit(angle, joint, **kwargs): angle_pypot = angle if joint["orientation"] == "indirect": angle_pypot = 1 * angle_pypot return angle_pypot * np.pi / 180
Converts the limit angle of the PyPot JSON file to the internal format
6,831
def detect(): compiler = new_compiler() hasopenmp = hasfunction(compiler, ) needs_gomp = hasopenmp if not hasopenmp: compiler.add_library() hasopenmp = hasfunction(compiler, ) needs_gomp = hasopenmp return hasopenmp
Does this compiler support OpenMP parallelization?
6,832
def recover_from_duplicatekeyerror(self, e): if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error(.format(e), exc_info=True) else: msg = self.logger.error(msg)
method tries to recover from DuplicateKeyError
6,833
def scanAllProcessesForOpenFile(searchPortion, isExactMatch=True, ignoreCase=False): pids = getAllRunningPids() mappingResults = [scanProcessForOpenFile(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids] ret = {} for i in range(len(pids)): if mappingResults[i] is not None: ret[pids[i]] = mappingResults[i] return ret
scanAllProcessessForOpenFile - Scans all processes on the system for a given filename @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForOpenFile
6,834
def get_all_user_ssh_settings(application_name): settings = get_ssh_settings(application_name) settings.update(get_ssh_settings(application_name, user=)) return settings
Retrieve the known host entries and public keys for application Retrieve the known host entries and public keys for application for all units of the given application related to this application for root user and nova user. :param application_name: Name of application eg nova-compute-something :type application_name: str :returns: Public keys + host keys for all units for app + user combination. :rtype: dict
6,835
def _get_all_indexes(self): if self.parser: return [v.index for v in self.parser.get_volumes()] + [d.index for d in self.parser.disks] else: return None
Returns all indexes available in the parser
6,836
def _get_blkid_type(self): try: result = _util.check_output_([, , , str(self.offset), self.get_raw_path()]) if not result: return None blkid_result = dict(re.findall(r, result)) self.info[] = blkid_result if in blkid_result and not in blkid_result: return blkid_result.get() else: return blkid_result.get() except Exception: return None
Retrieves the FS type from the blkid command.
6,837
def find_noncopyable_vars(class_type, already_visited_cls_vars=None): assert isinstance(class_type, class_declaration.class_t) logger = utils.loggers.cxx_parser mvars = class_type.variables( lambda v: not v.type_qualifiers.has_static, recursive=False, allow_empty=True) noncopyable_vars = [] if already_visited_cls_vars is None: already_visited_cls_vars = [] message = ( "__contains_noncopyable_mem_var - %s - TRUE - " + "contains const member variable") for mvar in mvars: var_type = type_traits.remove_reference(mvar.decl_type) if type_traits.is_const(var_type): no_const = type_traits.remove_const(var_type) if type_traits.is_fundamental(no_const) or is_enum(no_const): logger.debug( (message + "- fundamental or enum"), var_type.decl_string) noncopyable_vars.append(mvar) if is_class(no_const): logger.debug((message + " - class"), var_type.decl_string) noncopyable_vars.append(mvar) if type_traits.is_array(no_const): logger.debug((message + " - array"), var_type.decl_string) noncopyable_vars.append(mvar) if type_traits.is_pointer(var_type): continue if class_traits.is_my_case(var_type): cls = class_traits.get_declaration(var_type) if cls in already_visited_cls_vars: continue already_visited_cls_vars.append(cls) if is_noncopyable(cls, already_visited_cls_vars): logger.debug( (message + " - class that is not copyable"), var_type.decl_string) noncopyable_vars.append(mvar) logger.debug(( "__contains_noncopyable_mem_var - %s - FALSE - doesn't " + "contain noncopyable members"), class_type.decl_string) return noncopyable_vars
Returns list of all `noncopyable` variables. If an already_visited_cls_vars list is provided as argument, the returned list will not contain these variables. This list will be extended with whatever variables pointing to classes have been found. Args: class_type (declarations.class_t): the class to be searched. already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: list: list of all `noncopyable` variables.
6,838
def get_self_attention_bias(x): x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triangle( x_shape[1]) return self_attention_bias
Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1]
6,839
def _issubclass(subclass, superclass, bound_Generic=None, bound_typevars=None, bound_typevars_readonly=False, follow_fwd_refs=True, _recursion_check=None): if bound_typevars is None: bound_typevars = {} if superclass is Any: return True if subclass == superclass: return True if subclass is Any: return superclass is Any if isinstance(subclass, ForwardRef) or isinstance(superclass, ForwardRef): if not follow_fwd_refs: raise pytypes.ForwardRefError( "ForwardRef encountered, but follow_fwd_refs is False: \n%s"% ((subclass if isinstance(subclass, ForwardRef) else superclass) .__forward_arg__, "Retry with follow_fwd_refs=True.")) if _recursion_check is None: _recursion_check = {superclass: {subclass}} elif superclass in _recursion_check: if subclass in _recursion_check[superclass]: return False else: _recursion_check[superclass].add(subclass) else: _recursion_check[superclass] = {subclass} if isinstance(subclass, ForwardRef): if not subclass.__forward_evaluated__: raise pytypes.ForwardRefError("ForwardRef in subclass not evaluated: \n%s"% (subclass.__forward_arg__, "Use pytypes.resolve_fw_decl")) else: return _issubclass(subclass.__forward_value__, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) else: if not superclass.__forward_evaluated__: raise pytypes.ForwardRefError("ForwardRef in superclass not evaluated: \n%s"% (superclass.__forward_arg__, "Use pytypes.resolve_fw_decl")) else: return _issubclass(subclass, superclass.__forward_value__, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if pytypes.apply_numeric_tower: if superclass is float and subclass is int: return True elif superclass is complex and \ (subclass is int or subclass is float): return True if superclass in _extra_dict: superclass = _extra_dict[superclass] try: if _issubclass_2(subclass, Empty, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): for empty_target in [Container, Sized, Iterable]: try: if _issubclass_2(superclass.__origin__, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass.__args__[0], superclass.__origin__, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if _issubclass_2(superclass, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass.__args__[0], superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass try: if _issubclass_2(superclass, Empty, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): for empty_target in [Container, Sized, Iterable]: try: if _issubclass_2(subclass.__origin__, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass.__origin__, superclass.__args__[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if _issubclass_2(subclass, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass, superclass.__args__[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if isinstance(superclass, TypeVar): if not superclass.__bound__ is None: if not _issubclass(subclass, superclass.__bound__, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return False if not bound_typevars is None: try: if superclass.__contravariant__: return _issubclass(bound_typevars[superclass], subclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) elif superclass.__covariant__: return _issubclass(subclass, bound_typevars[superclass], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) else: return _issubclass(bound_typevars[superclass], subclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) and \ _issubclass(subclass, bound_typevars[superclass], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if not bound_Generic is None: superclass = get_arg_for_TypeVar(superclass, bound_Generic) if not superclass is None: return _issubclass(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if not bound_typevars is None: if bound_typevars_readonly: return False else: bound_typevars[superclass] = subclass return True return False if isinstance(subclass, TypeVar): if not bound_typevars is None: try: return _issubclass(bound_typevars[subclass], superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if not bound_Generic is None: subclass = get_arg_for_TypeVar(subclass, bound_Generic) if not subclass is None: return _issubclass(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if not subclass.__bound__ is None: return _issubclass(subclass.__bound__, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) return False res = _issubclass_2(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) return res
Access this via ``pytypes.is_subtype``. Works like ``issubclass``, but supports PEP 484 style types from ``typing`` module. subclass : type The type to check for being a subtype of ``superclass``. superclass : type The type to check for being a supertype of ``subclass``. bound_Generic : Optional[Generic] A type object holding values for unbound typevars occurring in ``subclass`` or ``superclass``. Default: None If subclass or superclass contains unbound ``TypeVar``s and ``bound_Generic`` is provided, this function attempts to retrieve corresponding values for the unbound ``TypeVar``s from ``bound_Generic``. In collision case with ``bound_typevars`` the value from ``bound_Generic`` if preferred. bound_typevars : Optional[Dict[typing.TypeVar, type]] A dictionary holding values for unbound typevars occurring in ``subclass`` or ``superclass``. Default: {} Depending on ``bound_typevars_readonly`` pytypes can also bind values to typevars as needed. This is done by inserting according mappings into this dictionary. This can e.g. be useful to infer values for ``TypeVar``s or to consistently check a set of ``TypeVar``s across multiple calls, e.g. when checking all arguments of a function call. In collision case with ``bound_Generic`` the value from ``bound_Generic`` if preferred. bound_typevars_readonly : bool Defines if pytypes is allowed to write into the ``bound_typevars`` dictionary. Default: True If set to False, pytypes cannot assign values to ``TypeVar``s, but only checks regarding values already present in ``bound_typevars`` or ``bound_Generic``. follow_fwd_refs : bool Defines if ``_ForwardRef``s should be explored. Default: True If this is set to ``False`` and a ``_ForwardRef`` is encountered, pytypes aborts the check raising a ForwardRefError. _recursion_check : Optional[Dict[type, Set[type]]] Internally used for recursion checks. Default: None If ``Union``s and ``_ForwardRef``s occur in the same type, recursions can occur. As soon as a ``_ForwardRef`` is encountered, pytypes automatically creates this dictionary and continues in recursion-proof manner.
6,840
def file_system(self): if self._file_system is None: self._file_system = self.scheduler.get_file_system() return self._file_system
Gets the filesystem corresponding to the open scheduler.
6,841
def has_cell(self, s): for t in self.transitions: if len(t.lhs[s]) != 1: return False if len(t.rhs[s]) != 1: return False if t.lhs[s].position != 0: return False if t.rhs[s].position != 0: return False return True
Tests whether store `s` is a cell, that is, it uses exactly one cell, and there can take on only a finite number of states).
6,842
def wait_for_notification(self, handle: int, delegate, notification_timeout: float): if not self.is_connected(): raise BluetoothBackendException() attempt = 0 delay = 10 _LOGGER.debug("Enter write_ble (%s)", current_thread()) while attempt <= self.retries: cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format( self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN), self.adapter) _LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd) with Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid) as process: try: result = process.communicate(timeout=notification_timeout)[0] _LOGGER.debug("Finished gatttool") except TimeoutExpired: os.killpg(process.pid, signal.SIGINT) result = process.communicate()[0] _LOGGER.debug("Listening stopped forcefully after timeout.") result = result.decode("utf-8").strip() if "Write Request failed" in result: raise BluetoothBackendException(.format(result)) _LOGGER.debug("Got %s from gatttool", result) if "successfully" in result: _LOGGER.debug("Exit write_ble with result (%s)", current_thread()) for element in self.extract_notification_payload(result): delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()])) return True attempt += 1 _LOGGER.debug("Waiting for %s seconds before retrying", delay) if attempt < self.retries: time.sleep(delay) delay *= 2 raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
Listen for characteristics changes from a BLE address. @param: mac - MAC address in format XX:XX:XX:XX:XX:XX @param: handle - BLE characteristics handle in format 0xXX a value of 0x0100 is written to register for listening @param: delegate - gatttool receives the --listen argument and the delegate object's handleNotification is called for every returned row @param: notification_timeout
6,843
def iter_list(self, id, *args, **kwargs): l = partial(self.list, id) return self.service.iter_list(l, *args, **kwargs)
Get a list of attachments. Whereas ``list`` fetches a single page of attachments according to its ``limit`` and ``page`` arguments, ``iter_list`` returns all attachments by internally making successive calls to ``list``. :param id: Device ID as an int. :param args: Arguments that ``list`` takes. :param kwargs: Optional arguments that ``list`` takes. :return: :class:`attachments.Attachment <attachments.Attachment>` list
6,844
def __compare_parameters(self, width, height, zoom, parameters): if not global_gui_config.get_config_value(, True): return False if not self.__image: return False if self.__width != width or self.__height != height: return False if zoom > self.__zoom * self.__zoom_multiplicator: return False if zoom < self.__zoom / self.__zoom_multiplicator: return False for key in parameters: try: if key not in self.__last_parameters or self.__last_parameters[key] != parameters[key]: return False except (AttributeError, ValueError): try: import numpy if isinstance(self.__last_parameters[key], numpy.ndarray): return numpy.array_equal(self.__last_parameters[key], parameters[key]) except ImportError: return False return False return True
Compare parameters for equality Checks if a cached image is existing, the the dimensions agree and finally if the properties are equal. If so, True is returned, else False, :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :return: True if all parameters are equal, False else
6,845
def tointerval(s): if isinstance(s, basestring): m = coord_re.search(s) if m.group(): return pybedtools.create_interval_from_list([ m.group(), m.group(), m.group(), , , m.group()]) else: return pybedtools.create_interval_from_list([ m.group(), m.group(), m.group(), ]) return s
If string, then convert to an interval; otherwise just return the input
6,846
def translate(term=None, phrase=None, api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None): return Giphy(api_key=api_key, strict=strict).translate( term=term, phrase=phrase, rating=rating)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method.
6,847
def absent(self, name, rdtype=None): if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if rdtype is None: rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, dns.rdatatype.ANY, dns.rdatatype.NONE, None, True, True) else: if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, rdtype, dns.rdatatype.NONE, None, True, True)
Require that an owner name (and optionally an rdata type) does not exist as a prerequisite to the execution of the update.
6,848
def get_task_df(self): term_time_df = self._get_term_time_df() terms_to_include = ( term_time_df .groupby()[] .sum() .sort_values(ascending=False) .iloc[:self.num_terms_to_include].index ) task_df = ( term_time_df[term_time_df.term.isin(terms_to_include)][[, ]] .groupby() .apply(lambda x: pd.Series(self._find_sequences(x[]))) .reset_index() .rename({0: }, axis=1) .reset_index() .assign(start=lambda x: x[].apply(lambda x: x[0])) .assign(end=lambda x: x[].apply(lambda x: x[1])) [[, , ]] ) return task_df
Returns -------
6,849
def adjustText(self): pos = self.cursorPosition() self.blockSignals(True) super(XLineEdit, self).setText(self.formatText(self.text())) self.setCursorPosition(pos) self.blockSignals(False)
Updates the text based on the current format options.
6,850
def to_xdr_object(self): return Xdr.types.Memo(type=Xdr.const.MEMO_TEXT, text=self.text)
Creates an XDR Memo object for a transaction with MEMO_TEXT.
6,851
def write_file(self, filename): writer = self.__str__()[:-1].decode() with open(filename, ) as fout: fout.write(writer)
Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = ProbModelXMLWriter(model) >>> writer.write_file(test_file)
6,852
def waypoint_request_list_send(self): if self.mavlink10(): self.mav.mission_request_list_send(self.target_system, self.target_component) else: self.mav.waypoint_request_list_send(self.target_system, self.target_component)
wrapper for waypoint_request_list_send
6,853
def _install_packages(path, packages): def _filter_blacklist(package): blacklist = [, , , ] return all(package.startswith(entry) is False for entry in blacklist) filtered_packages = filter(_filter_blacklist, packages) for package in filtered_packages: if package.startswith(): package = package.replace(, ) print(.format(package=package)) subprocess.check_call([sys.executable, , , , package, , path, ]) print (.format(directory=os.listdir(path)))
Install all packages listed to the target directory. Ignores any package that includes Python itself and python-lambda as well since its only needed for deploying and not running the code :param str path: Path to copy installed pip packages to. :param list packages: A list of packages to be installed via pip.
6,854
def isMultiContract(self, contract): if contract.m_secType == "FUT" and contract.m_expiry == "": return True if contract.m_secType in ["OPT", "FOP"] and \ (contract.m_expiry == "" or contract.m_strike == "" or contract.m_right == ""): return True tickerId = self.tickerId(contract) if tickerId in self.contract_details and \ len(self.contract_details[tickerId]["contracts"]) > 1: return True return False
tells if is this contract has sub-contract with expiries/strikes/sides
6,855
def kill_all(job_queue, reason=, states=None): if states is None: states = [, , ] batch = boto3.client() runnable = batch.list_jobs(jobQueue=job_queue, jobStatus=) job_info = runnable.get() if job_info: job_ids = [job[] for job in job_info] for job_id in job_ids: batch.cancel_job(jobId=job_id, reason=reason) res_list = [] for status in states: running = batch.list_jobs(jobQueue=job_queue, jobStatus=status) job_info = running.get() if job_info: job_ids = [job[] for job in job_info] for job_id in job_ids: logger.info( % job_id) res = batch.terminate_job(jobId=job_id, reason=reason) res_list.append(res) return res_list
Terminates/cancels all RUNNING, RUNNABLE, and STARTING jobs.
6,856
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore): for idx, param_on_devs in enumerate(param_arrays): name = param_names[idx] kvstore.init(name, arg_params[name]) if update_on_kvstore: kvstore.pull(name, param_on_devs, priority=-idx)
Initialize kvstore
6,857
def get_languages(self): languages = [] all_options = CONF.options(self.CONF_SECTION) for option in all_options: if option in [l.lower() for l in LSP_LANGUAGES]: languages.append(option) return languages
Get the list of languages we need to start servers and create clients for.
6,858
def copy_file(self): share_name = self._create_share() directory_name = self._create_directory(share_name) source_file_name = self._get_file_reference() self.service.create_file(share_name, directory_name, source_file_name, 512) source = self.service.make_file_url(share_name, directory_name, source_file_name) copy = self.service.copy_file(share_name, None, , source) while copy.status != : count = count + 1 if count > 5: print() time.sleep(30) copy = self.service.get_file_properties(share_name, dir_name, ).properties.copy self.service.delete_share(share_name)
source_file_url = self.service.make_file_url( remote_share_name, remote_directory_name, remote_file_name, sas_token=remote_sas_token, ) copy = self.service.copy_file(destination_sharename, destination_directory_name, destination_file_name, source_file_url)
6,859
def load_keys(self, issuer, jwks_uri=, jwks=None, replace=False): logger.debug("Initiating key bundle for issuer: %s" % issuer) if replace or issuer not in self.issuer_keys: self.issuer_keys[issuer] = [] if jwks_uri: self.add_url(issuer, jwks_uri) elif jwks: _keys = jwks[] self.issuer_keys[issuer].append(self.keybundle_cls(_keys))
Fetch keys from another server :param jwks_uri: A URL pointing to a site that will return a JWKS :param jwks: A dictionary representation of a JWKS :param issuer: The provider URL :param replace: If all previously gathered keys from this provider should be replace. :return: Dictionary with usage as key and keys as values
6,860
def run_cmd(cmd, remote, rootdir=, workdir=, ignore_exit_code=False, ssh=): rtest.shw./test.shWorldHello World\n[:upper:][:lower:]hello world\n logger = logging.getLogger(__name__) workdir = os.path.join(rootdir, workdir) if type(cmd) in [list, tuple]: use_shell = False else: cmd = str(cmd) use_shell = True try: if remote is None: workdir = os.path.expanduser(workdir) if use_shell: logger.debug("COMMAND: %s", cmd) else: logger.debug("COMMAND: %s", " ".join([quote(part) for part in cmd])) if workdir == : response = sp.check_output(cmd, stderr=sp.STDOUT, shell=use_shell) else: response = sp.check_output(cmd, stderr=sp.STDOUT, cwd=workdir, shell=use_shell) else: if not use_shell: cmd = " ".join(cmd) if workdir == : cmd = [ssh, remote, cmd] else: cmd = [ssh, remote, % (workdir, cmd)] logger.debug("COMMAND: %s", " ".join([quote(part) for part in cmd])) response = sp.check_output(cmd, stderr=sp.STDOUT) except sp.CalledProcessError as e: if ignore_exit_code: response = e.output else: raise if sys.version_info >= (3, 0): response = response.decode(CMD_RESPONSE_ENCODING) if logger.getEffectiveLevel() <= logging.DEBUG: if "\n" in response: if len(response.splitlines()) == 1: logger.debug("RESPONSE: %s", response) else: logger.debug("RESPONSE: ---\n%s\n---", response) else: logger.debug("RESPONSE: ", response) return response
r'''Run the given cmd in the given workdir, either locally or remotely, and return the combined stdout/stderr Parameters: cmd (list of str or str): Command to execute, as list consisting of the command, and options. Alternatively, the command can be given a single string, which will then be executed as a shell command. Only use shell commands when necessary, e.g. when the command involves a pipe. remote (None or str): If None, run command locally. Otherwise, run on the given host (via SSH) rootdir (str, optional): Local or remote root directory. The `workdir` variable is taken relative to `rootdir`. If not specified, effectively the current working directory is used as the root for local commands, and the home directory for remote commands. Note that `~` may be used to indicate the home directory locally or remotely. workdir (str, optional): Local or remote directory from which to run the command, relative to `rootdir`. If `rootdir` is empty, `~` may be used to indicate the home directory. ignore_exit_code (boolean, optional): By default, `subprocess.CalledProcessError` will be raised if the call has an exit code other than 0. This exception can be supressed by passing `ignore_exit_code=False` ssh (str, optional): The executable to be used for ssh. If not a full path, the executable must be in ``$PATH`` Example: >>> import tempfile, os, shutil >>> tempfolder = tempfile.mkdtemp() >>> scriptfile = os.path.join(tempfolder, 'test.sh') >>> with open(scriptfile, 'w') as script_fh: ... script_fh.writelines(["#!/bin/bash\n", "echo Hello $1\n"]) >>> set_executable(scriptfile) >>> run_cmd(['./test.sh', 'World'], remote=None, workdir=tempfolder) 'Hello World\n' >>> run_cmd("./test.sh World | tr '[:upper:]' '[:lower:]'", remote=None, ... workdir=tempfolder) 'hello world\n' >>> shutil.rmtree(tempfolder)
6,861
def get_object_references(tb, source, max_string_length=1000): global obj_ref_regex referenced_attr = set() for line in source.split(): referenced_attr.update(set(re.findall(obj_ref_regex, line))) referenced_attr = sorted(referenced_attr) info = [] for attr in referenced_attr: v = string_variable_lookup(tb, attr) if v is not ValueError: ref_string = format_reference(v, max_string_length=max_string_length) info.append((attr, ref_string)) return info
Find the values of referenced attributes of objects within the traceback scope. :param tb: traceback :return: list of tuples containing (variable name, value)
6,862
def get_context_files(data): ref_file = dd.get_ref_file(data) all_files = [] for ext in [".bed.gz"]: all_files += sorted(glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", "problem_regions", "*", "*%s" % ext)))) return sorted(all_files)
Retrieve pre-installed annotation files for annotating genome context.
6,863
def perform_permissions_check(self, user, obj, perms): return self.request.forum_permission_handler.can_update_topics_to_announces(obj, user)
Performs the permissions check.
6,864
def add_cell_code(self, cell_str, pos=None): cell_str = cell_str.strip() logging.debug("add_cell_code: {}".format(cell_str)) cell = nbf.v4.new_code_cell(cell_str) if pos is None: self.nb[].append(cell) else: self.nb[].insert(pos, cell)
Add Python cell :param cell_str: cell content :return:
6,865
def read_content(path: str, limit: Optional[int] = None) -> Iterator[List[str]]: with smart_open(path) as indata: for i, line in enumerate(indata): if limit is not None and i == limit: break yield list(get_tokens(line))
Returns a list of tokens for each line in path up to a limit. :param path: Path to files containing sentences. :param limit: How many lines to read from path. :return: Iterator over lists of words.
6,866
def dump_json(data, indent=None): return json.dumps(data, indent=indent, ensure_ascii=False, sort_keys=True, separators=(, ))
:param list | dict data: :param Optional[int] indent: :rtype: unicode
6,867
def connect_pores(network, pores1, pores2, labels=[], add_conns=True): rthroat.connsthroat.conns try: len(pores1[0]) except (TypeError, IndexError): pores1 = [pores1] try: len(pores2[0]) except (TypeError, IndexError): pores2 = [pores2] if len(pores1) != len(pores2): raise Exception( + \ ) arr1, arr2 = [], [] for ps1, ps2 in zip(pores1, pores2): size1 = sp.size(ps1) size2 = sp.size(ps2) arr1.append(sp.repeat(ps1, size2)) arr2.append(sp.tile(ps2, size1)) conns = sp.vstack([sp.concatenate(arr1), sp.concatenate(arr2)]).T if add_conns: extend(network=network, throat_conns=conns, labels=labels) else: return conns
r''' Returns the possible connections between two group of pores, and optionally makes the connections. See ``Notes`` for advanced usage. Parameters ---------- network : OpenPNM Network Object pores1 : array_like The first group of pores on the network pores2 : array_like The second group of pores on the network labels : list of strings The labels to apply to the new throats. This argument is only needed if ``add_conns`` is True. add_conns : bool Indicates whether the connections should be added to the supplied network (default is True). Otherwise, the connections are returned as an Nt x 2 array that can be passed directly to ``extend``. Notes ----- (1) The method also works if ``pores1`` and ``pores2`` are list of lists, in which case it consecutively connects corresponding members of the two lists in a 1-to-1 fashion. Example: pores1 = [[0, 1], [2, 3]] and pores2 = [[5], [7, 9]] leads to creation of the following connections: 0 --> 5 2 --> 7 3 --> 7 1 --> 5 2 --> 9 3 --> 9 (2) If you want to use the batch functionality, make sure that each element within ``pores1`` and ``pores2`` are of type list or ndarray. (3) It creates the connections in a format which is acceptable by the default OpenPNM connection ('throat.conns') and either adds them to the network or returns them. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.Nt 300 >>> op.topotools.connect_pores(network=pn, pores1=[22, 32], ... pores2=[16, 80, 68]) >>> pn.Nt 306 >>> pn['throat.conns'][300:306] array([[16, 22], [22, 80], [22, 68], [16, 32], [32, 80], [32, 68]])
6,868
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None): params = {} if instance_ids: self.build_list_params(params, instance_ids, ) if max_records: params[] = max_records if next_token: params[] = next_token return self.get_list(, params, [(, Instance)])
Returns a description of each Auto Scaling instance in the instance_ids list. If a list is not provided, the service returns the full details of all instances up to a maximum of fifty. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter. :type instance_ids: list :param instance_ids: List of Autoscaling Instance IDs which should be searched for. :type max_records: int :param max_records: Maximum number of results to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.activity.Activity` objects.
6,869
def dump_tables_to_tskit(pop): node_view = np.array(pop.tables.nodes, copy=True) node_view[] -= node_view[].max() node_view[][np.where(node_view[] != 0.0)[0]] *= -1.0 edge_view = np.array(pop.tables.edges, copy=False) mut_view = np.array(pop.tables.mutations, copy=False) tc = tskit.TableCollection(pop.tables.genome_length) derived_state=derived_state, derived_state_offset=ancestral_state_offset, metadata=md, metadata_offset=mdo) return tc.tree_sequence()
Converts fwdpy11.TableCollection to an tskit.TreeSequence
6,870
def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs): results = [] for start in range(0, expected, batchsize): batch_handle = entrez_retry( Entrez.efetch, retstart=start, retmax=batchsize, webenv=record["WebEnv"], query_key=record["QueryKey"], *fnargs, **fnkwargs) batch_record = Entrez.read(batch_handle, validate=False) results.extend(batch_record) return results
Recovers the Entrez data from a prior NCBI webhistory search, in batches of defined size, using Efetch. Returns all results as a list. - record: Entrez webhistory record - expected: number of expected search returns - batchsize: how many search returns to retrieve in a batch - *fnargs: arguments to Efetch - **fnkwargs: keyword arguments to Efetch
6,871
def vnic_compose_empty(device=None): nicspec = vim.vm.device.VirtualDeviceSpec() if device: nicspec.device = device nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit else: nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nicspec.device = vim.vm.device.VirtualVmxnet3() nicspec.device.wakeOnLanEnabled = True nicspec.device.deviceInfo = vim.Description() nicspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nicspec.device.connectable.startConnected = True nicspec.device.connectable.allowGuestControl = True return nicspec
Compose empty vNIC for next attaching to a network :param device: <vim.vm.device.VirtualVmxnet3 or None> Device for this this 'spec' will be composed. If 'None' a new device will be composed. 'Operation' - edit/add' depends on if device existed :return: <vim.vm.device.VirtualDeviceSpec>
6,872
def _process_key_val(self, instance, key, val): if instance.check_precondition(key, val): combined = .format(k=key, v=val) self._increment_total_stat(combined) self._increment_plugin_stat( instance.__class__.__name__, combined) instance.handle(key, val) self.redis_conn.delete(key) failkey = self._get_fail_key(key) if self.redis_conn.exists(failkey): self.redis_conn.delete(failkey)
Logic to let the plugin instance process the redis key/val Split out for unit testing @param instance: the plugin instance @param key: the redis key @param val: the key value from redis
6,873
def get_package_version(self): output = subprocess.check_output([ .format(self.python), , , ]).decode() return output.rstrip()
Get the version of the package :return:
6,874
def _to_args(x): if not isinstance(x, (list, tuple, np.ndarray)): x = [x] return x
Convert to args representation
6,875
def precision_recall(y_true, y_score, ax=None): if any((val is None for val in (y_true, y_score))): raise ValueError( ) if ax is None: ax = plt.gca() y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score) if y_score_is_vector: n_classes = 2 else: _, n_classes = y_score.shape if n_classes > 2: y_true_bin = label_binarize(y_true, classes=np.unique(y_true)) _precision_recall_multi(y_true_bin, y_score, ax=ax) for i in range(n_classes): _precision_recall(y_true_bin[:, i], y_score[:, i], ax=ax) else: if y_score_is_vector: _precision_recall(y_true, y_score, ax) else: _precision_recall(y_true, y_score[:, 1], ax) return ax
Plot precision-recall curve. Parameters ---------- y_true : array-like, shape = [n_samples] Correct target values (ground truth). y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary classification or [n_samples, n_classes] for multiclass Target scores (estimator predictions). ax : matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Notes ----- It is assumed that the y_score parameter columns are in order. For example, if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score must countain the scores for class 0, second column for class 1 and so on. Returns ------- ax: matplotlib Axes Axes containing the plot Examples -------- .. plot:: ../../examples/precision_recall.py
6,876
def bind( self, server_name, script_name=None, subdomain=None, url_scheme="http", default_method="GET", path_info=None, query_args=None, ): server_name = server_name.lower() if self.host_matching: if subdomain is not None: raise RuntimeError("host matching enabled and a subdomain was provided") elif subdomain is None: subdomain = self.default_subdomain if script_name is None: script_name = "/" if path_info is None: path_info = "/" try: server_name = _encode_idna(server_name) except UnicodeError: raise BadHost() return MapAdapter( self, server_name, script_name, subdomain, url_scheme, path_info, default_method, query_args, )
Return a new :class:`MapAdapter` with the details specified to the call. Note that `script_name` will default to ``'/'`` if not further specified or `None`. The `server_name` at least is a requirement because the HTTP RFC requires absolute URLs for redirects and so all redirect exceptions raised by Werkzeug will contain the full canonical URL. If no path_info is passed to :meth:`match` it will use the default path info passed to bind. While this doesn't really make sense for manual bind calls, it's useful if you bind a map to a WSGI environment which already contains the path info. `subdomain` will default to the `default_subdomain` for this map if no defined. If there is no `default_subdomain` you cannot use the subdomain feature. .. versionadded:: 0.7 `query_args` added .. versionadded:: 0.8 `query_args` can now also be a string. .. versionchanged:: 0.15 ``path_info`` defaults to ``'/'`` if ``None``.
6,877
def dispatch_request(self): req = _request_ctx_stack.top.request app = current_app if req.method == "OPTIONS": return app.make_default_options_response() if req.routing_exception is not None: app.raise_routing_exception(req) rule = req.url_rule view_func = self.wrap_view_func( app, rule, req, app.view_functions[rule.endpoint], req.view_args ) return view_func(**req.view_args)
Modified version of Flask.dispatch_request to call process_view.
6,878
def autoencoder_range(rhp): rhp.set_float("dropout", 0.01, 0.3) rhp.set_float("gan_loss_factor", 0.01, 0.1) rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) rhp.set_float("gumbel_temperature", 0, 1) rhp.set_float("gumbel_noise_factor", 0, 0.5)
Tuning grid of the main autoencoder params.
6,879
def array_from_nested_dictionary( nested_dict, array_fn, dtype="float32", square_result=False): if square_result: outer_key_indices = inner_key_indices = flattened_nested_key_indices( nested_dict) else: outer_key_indices, inner_key_indices = nested_key_indices( nested_dict) n_rows = len(outer_key_indices) n_cols = len(inner_key_indices) shape = (n_rows, n_cols) result = array_fn(shape, dtype) for outer_key, sub_dictionary in nested_dict.items(): i = outer_key_indices[outer_key] for inner_key, value in sub_dictionary.items(): j = inner_key_indices[inner_key] result[i, j] = value outer_key_list = index_dict_to_sorted_list(outer_key_indices) inner_key_list = index_dict_to_sorted_list(inner_key_indices) return result, outer_key_list, inner_key_list
Parameters ---------- nested_dict : dict Dictionary which contains dictionaries array_fn : function Takes shape and dtype as arguments, returns empty array. dtype : dtype NumPy dtype of result array square_result : bool Combine keys from outer and inner dictionaries. Returns array and sorted lists of the outer and inner keys.
6,880
def init(): if not os.path.isdir(): print(cyan()) local() print(green()) print(green())
Execute init tasks for all components (virtualenv, pip).
6,881
def post(request): data = request.POST or json.loads(request.body)[] guid = data.get(, None) res = Result() if guid: obj = getObjectsFromGuids([guid,])[0] comment = Comment() comment.comment = data.get(, ) comment.user = request.user comment.user_name = request.user.get_full_name() comment.user_email = request.user.email comment.content_object = obj comment.site_id = 1 comment.save() obj.comment_count += 1 obj.save() emailComment(comment, obj, request) res.append(commentToJson(comment)) return JsonResponse(res.asDict())
Returns a serialized object
6,882
def get_interface_name(): interface_name = interfaces = psutil.net_if_addrs() for name, details in interfaces.items(): for detail in details: if detail.family == socket.AF_INET: ip_address = ipaddress.ip_address(detail.address) if not (ip_address.is_link_local or ip_address.is_loopback): interface_name = name break return interface_name
Returns the interface name of the first not link_local and not loopback interface.
6,883
def get_content_of_file(self, project, repository, filename, at=None, markup=None): headers = self.form_token_headers url = .format(project=project, repository=repository, filename=filename) params = {} if at is not None: params[] = at if markup is not None: params[] = markup return self.get(url, params=params, not_json_response=True, headers=headers)
Retrieve the raw content for a file path at a specified revision. The authenticated user must have REPO_READ permission for the specified repository to call this resource. :param project: :param repository: :param filename: :param at: OPTIONAL ref string :param markup: if present or "true", triggers the raw content to be markup-rendered and returned as HTML; otherwise, if not specified, or any value other than "true", the content is streamed without markup :return:
6,884
def register_identity(self, id_stmt): bst = id_stmt.search_one("base") if bst: bder = self.identity_deps.setdefault(bst.i_identity, []) bder.append(id_stmt)
Register `id_stmt` with its base identity, if any.
6,885
def auth(self, auth_key): if self._socket is None: self._socket = self._connect() return self.call(, {"AuthKey": auth_key}, expect_body=False)
Performs the initial authentication on connect
6,886
def extract_zipdir(zip_file): if not os.path.exists(zip_file): raise ValueError(.format(zip_file)) directory = os.path.dirname(zip_file) filename = os.path.basename(zip_file) dirpath = os.path.join(directory, filename.replace(, )) with zipfile.ZipFile(zip_file, , zipfile.ZIP_DEFLATED) as zipf: zipf.extractall(dirpath) return dirpath
Extract contents of zip file into subfolder in parent directory. Parameters ---------- zip_file : str Path to zip file Returns ------- str : folder where the zip was extracted
6,887
def remove(name=None, pkgs=None, purge=False, **kwargs): ***["foo", "bar"] try: pkg_params = [x.split()[0] for x in __salt__[](name, pkgs)[0]] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = [, , ] if purge: cmd.append() cmd.extend(targets) out = __salt__[]( cmd, python_shell=False, output_loglevel= ) if out[] != 0 and out[]: errors = [out[]] else: errors = [] __context__.pop(, None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( , info={: errors, : ret} ) return ret
Remove a single package with pkg_delete Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'
6,888
def streaming_market_data_filter(fields=None, ladder_levels=None): args = locals() return { to_camel_case(k): v for k, v in args.items() if v is not None }
:param list fields: EX_BEST_OFFERS_DISP, EX_BEST_OFFERS, EX_ALL_OFFERS, EX_TRADED, EX_TRADED_VOL, EX_LTP, EX_MARKET_DEF, SP_TRADED, SP_PROJECTED :param int ladder_levels: 1->10 :return: dict
6,889
def _predicted(self): return np.squeeze( np.matmul(self.xwins, np.expand_dims(self.solution, axis=-1)) )
The predicted values of y ('yhat').
6,890
def get_apps_json(self, url, timeout, auth, acs_url, ssl_verify, tags, group): if self.apps_response is not None: return self.apps_response if group is None: marathon_path = urljoin(url, "v2/apps?embed=apps.counts") else: marathon_path = urljoin( url, "v2/groups/{}?embed=group.groups".format(group) + "&embed=group.apps&embed=group.apps.counts" ) self.apps_response = self.get_json(marathon_path, timeout, auth, acs_url, ssl_verify, tags) return self.apps_response
The dictionary containing the apps is cached during collection and reset at every `check()` call.
6,891
def horiz_string(*args, **kwargs): import unicodedata precision = kwargs.get(, None) sep = kwargs.get(, ) if len(args) == 1 and not isinstance(args[0], six.string_types): val_list = args[0] else: val_list = args val_list = [unicodedata.normalize(, ensure_unicode(val)) for val in val_list] all_lines = [] hpos = 0 for sx in range(len(val_list)): val = val_list[sx] str_ = None if precision is not None: if util_type.HAVE_NUMPY: try: if isinstance(val, np.ndarray): str_ = np.array_str(val, precision=precision, suppress_small=True) except ImportError: pass if str_ is None: str_ = six.text_type(val_list[sx]) lines = str_.split() line_diff = len(lines) - len(all_lines) if line_diff > 0: all_lines += [ * hpos] * line_diff for lx, line in enumerate(lines): all_lines[lx] += line hpos = max(hpos, len(all_lines[lx])) for lx in range(len(all_lines)): hpos_diff = hpos - len(all_lines[lx]) all_lines[lx] += * hpos_diff + sep all_lines = [line.rstrip() for line in all_lines] ret = .join(all_lines) return ret
Horizontally concatenates strings reprs preserving indentation Concats a list of objects ensuring that the next item in the list is all the way to the right of any previous items. Args: *args: list of strings to concat **kwargs: precision, sep CommandLine: python -m utool.util_str --test-horiz_string Example1: >>> # ENABLE_DOCTEST >>> # Pretty printing of matrices demo / test >>> import utool >>> import numpy as np >>> # Wouldn't it be nice if we could print this operation easily? >>> B = np.array(((1, 2), (3, 4))) >>> C = np.array(((5, 6), (7, 8))) >>> A = B.dot(C) >>> # Eg 1: >>> result = (utool.hz_str('A = ', A, ' = ', B, ' * ', C)) >>> print(result) A = [[19 22] = [[1 2] * [[5 6] [43 50]] [3 4]] [7 8]] Exam2: >>> # Eg 2: >>> str_list = ['A = ', str(B), ' * ', str(C)] >>> horizstr = (utool.horiz_string(*str_list)) >>> result = (horizstr) >>> print(result) A = [[1 2] * [[5 6] [3 4]] [7 8]]
6,892
def thread_function(self): self.__subscribed = True url = SUBSCRIBE_ENDPOINT + "?token=" + self._session_token data = self._session.query(url, method=, raw=True, stream=True) if not data or not data.ok: _LOGGER.debug("Did not receive a valid response. Aborting..") return None self.__sseclient = sseclient.SSEClient(data) try: for event in (self.__sseclient).events(): if not self.__subscribed: break data = json.loads(event.data) if data.get() == "connected": _LOGGER.debug("Successfully subscribed this base station") elif data.get(): action = data.get() resource = data.get() if action == "logout": _LOGGER.debug("Logged out by some other entity") self.__subscribed = False break elif action == "is" and "subscriptions/" not in resource: self.__events.append(data) self.__event_handle.set() except TypeError as error: _LOGGER.debug("Got unexpected error: %s", error) return None return True
Thread function.
6,893
def set_base_headers(self, hdr): hdr[] = (__version__, ) hdr[] = (self.__class__.__name__, ) hdr[] = (self.__version__, ) return hdr
Set metadata in FITS headers.
6,894
def removeThing(self, thingTypeId, thingId): thingUrl = ApiClient.thingUrl % (self.host, thingTypeId, thingId) r = requests.delete(thingUrl, auth=self.credentials, verify=self.verify) status = r.status_code if status == 204: self.logger.debug("Thing was successfully removed") return True elif status == 401: raise ibmiotf.APIException(401, "The authentication token is empty or invalid", None) elif status == 403: raise ibmiotf.APIException(403, "The authentication method is invalid or the api key used does not exist", None) elif status == 404: raise ibmiotf.APIException(404, "A thing type or thing instance with the specified id does not exist.", None) elif status == 409: raise ibmiotf.APIException(409, "The thing instance is aggregated into another thing instance.", None) elif status == 500: raise ibmiotf.APIException(500, "Unexpected error", None) else: raise ibmiotf.APIException(None, "Unexpected error", None)
Delete an existing thing. It accepts thingTypeId (string) and thingId (string) as parameters In case of failure it throws APIException
6,895
def enc_setup(self, enc_alg, msg, auth_data=b, key=None, iv=""): iv = self._generate_iv(enc_alg, iv) if enc_alg in ["A192GCM", "A128GCM", "A256GCM"]: aes = AES_GCMEncrypter(key=key) ctx, tag = split_ctx_and_tag(aes.encrypt(msg, iv, auth_data)) elif enc_alg in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]: aes = AES_CBCEncrypter(key=key) ctx, tag = aes.encrypt(msg, iv, auth_data) else: raise NotSupportedAlgorithm(enc_alg) return ctx, tag, aes.key
Encrypt JWE content. :param enc_alg: The JWE "enc" value specifying the encryption algorithm :param msg: The plain text message :param auth_data: Additional authenticated data :param key: Key (CEK) :return: Tuple (ciphertext, tag), both as bytes
6,896
def create(cls, zmq_context, endpoint): socket = zmq_context.socket(zmq.ROUTER) socket.bind(endpoint) return cls(socket)
Create new server transport. Instead of creating the socket yourself, you can call this function and merely pass the :py:class:`zmq.core.context.Context` instance. By passing a context imported from :py:mod:`zmq.green`, you can use green (gevent) 0mq sockets as well. :param zmq_context: A 0mq context. :param endpoint: The endpoint clients will connect to.
6,897
def formfield_for_dbfield(self, db_field, **kwargs): overrides = self.formfield_overrides.get(db_field.name) if overrides: kwargs.update(overrides) field = super(AbstractEntryBaseAdmin, self).formfield_for_dbfield(db_field, **kwargs) if db_field.name == : field.user = kwargs[].user return field
Allow formfield_overrides to contain field names too.
6,898
def email_action_view(self, id, action): user_email = self.db_manager.get_user_email_by_id(id=id) if not user_email or user_email.user_id != current_user.id: return self.unauthorized_view() if action == : if user_email.is_primary: return self.unauthorized_view() self.db_manager.delete_object(user_email) self.db_manager.commit() elif action == : user_emails = self.db_manager.find_user_emails(current_user) for other_user_email in user_emails: if other_user_email.is_primary: other_user_email.is_primary=False self.db_manager.save_object(other_user_email) user_email.is_primary=True self.db_manager.save_object(user_email) self.db_manager.commit() elif action == : self._send_confirm_email_email(user_email.user, user_email) else: return self.unauthorized_view() return redirect(url_for())
Perform action 'action' on UserEmail object 'id'
6,899
def read_table(self): self.bitcount = self.bits = 0 tlen = unpack(, self.input.read(4))[0] table_data = AMQPReader(self.input.read(tlen)) result = {} while table_data.input.tell() < tlen: name = table_data.read_shortstr() ftype = ord(table_data.input.read(1)) if ftype == 83: val = table_data.read_longstr() elif ftype == 73: val = unpack(, table_data.input.read(4))[0] elif ftype == 68: d = table_data.read_octet() n = unpack(, table_data.input.read(4))[0] val = Decimal(n) / Decimal(10 ** d) elif ftype == 84: val = table_data.read_timestamp() elif ftype == 70: val = table_data.read_table() else: raise ValueError( % repr(ftype)) result[name] = val return result
Read an AMQP table, and return as a Python dictionary.