Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
387,400
def press_key(self, key, mode=0): if isinstance(key, str): assert key in KEYS, .format(key) key = KEYS[key] _LOGGER.info(, self.__get_key_name(key)) return self.rq(, OrderedDict([(, key), (, mode)]))
modes: 0 -> simple press 1 -> long press 2 -> release after long press
387,401
def start_vm(access_token, subscription_id, resource_group, vm_name): endpoint = .join([get_rm_endpoint(), , subscription_id, , resource_group, , vm_name, , , COMP_API]) return do_post(endpoint, , access_token)
Start a virtual machine. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. Returns: HTTP response.
387,402
def add_execution_data(self, context_id, data): if context_id not in self._contexts: LOGGER.warning("Context_id not in contexts, %s", context_id) return False context = self._contexts.get(context_id) context.add_execution_data(data) return True
Within a context, append data to the execution result. Args: context_id (str): the context id returned by create_context data (bytes): data to append Returns: (bool): True if the operation is successful, False if the context_id doesn't reference a known context.
387,403
def get_max_instances_of_storage_bus(self, chipset, bus): if not isinstance(chipset, ChipsetType): raise TypeError("chipset can only be an instance of type ChipsetType") if not isinstance(bus, StorageBus): raise TypeError("bus can only be an instance of type StorageBus") max_instances = self._call("getMaxInstancesOfStorageBus", in_p=[chipset, bus]) return max_instances
Returns the maximum number of storage bus instances which can be configured for each VM. This corresponds to the number of storage controllers one can have. Value may depend on chipset type used. in chipset of type :class:`ChipsetType` The chipset type to get the value for. in bus of type :class:`StorageBus` The storage bus type to get the value for. return max_instances of type int The maximum number of instances for the given storage bus.
387,404
def p_expression_sra(self, p): p[0] = Sra(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : expression RSHIFTA expression
387,405
def get_cutout(self, token, channel, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start=0, t_stop=1, resolution=1, block_size=DEFAULT_BLOCK_SIZE, neariso=False): if block_size is None: block_size = self.get_block_size(token, resolution) origin = self.get_image_offset(token, resolution) if (z_stop - z_start) < 16: z_slices = 16 else: z_slices = z_stop - z_start size = (x_stop - x_start) * (y_stop - y_start) * z_slices * 4 if six.PY2: dl_func = self._get_cutout_blosc_no_chunking elif six.PY3: dl_func = self._get_cutout_no_chunking else: raise ValueError("Invalid Python version.") if size < self._chunk_threshold: vol = dl_func(token, channel, resolution, x_start, x_stop, y_start, y_stop, z_start, z_stop, t_start, t_stop, neariso=neariso) vol = numpy.rollaxis(vol, 1) vol = numpy.rollaxis(vol, 2) return vol else: from ndio.utils.parallel import block_compute blocks = block_compute(x_start, x_stop, y_start, y_stop, z_start, z_stop, origin, block_size) vol = numpy.zeros(((z_stop - z_start), (y_stop - y_start), (x_stop - x_start))) for b in blocks: data = dl_func(token, channel, resolution, b[0][0], b[0][1], b[1][0], b[1][1], b[2][0], b[2][1], 0, 1, neariso=neariso) if b == blocks[0]: vol = numpy.zeros(((z_stop - z_start), (y_stop - y_start), (x_stop - x_start)), dtype=data.dtype) vol[b[2][0] - z_start: b[2][1] - z_start, b[1][0] - y_start: b[1][1] - y_start, b[0][0] - x_start: b[0][1] - x_start] = data vol = numpy.rollaxis(vol, 1) vol = numpy.rollaxis(vol, 2) return vol
Get volumetric cutout data from the neurodata server. Arguments: token (str): Token to identify data to download channel (str): Channel resolution (int): Resolution level Q_start (int): The lower bound of dimension 'Q' Q_stop (int): The upper bound of dimension 'Q' block_size (int[3]): Block size of this dataset. If not provided, ndio uses the metadata of this tokenchannel to set. If you find that your downloads are timing out or otherwise failing, it may be wise to start off by making this smaller. neariso (bool : False): Passes the 'neariso' param to the cutout. If you don't know what this means, ignore it! Returns: numpy.ndarray: Downloaded data.
387,406
def _warning_handler(self, code: int): if code == 300: warnings.warn( "ExpireWarning", RuntimeWarning, stacklevel=3 ) elif code == 301: warnings.warn( "ExpireStreamWarning", RuntimeWarning, stacklevel=3 ) else: if self.debug: print("unknow code {}".format(code)) return False return True
处理300~399段状态码,抛出对应警告. Parameters: (code): - 响应的状态码 Return: (bool): - 已知的警告类型则返回True,否则返回False
387,407
def WriteProtoFile(self, printer): self.Validate() extended_descriptor.WriteMessagesFile( self.__file_descriptor, self.__package, self.__client_info.version, printer)
Write the messages file to out as proto.
387,408
def get_algorithm(alg: str) -> Callable: if alg not in algorithms: raise ValueError(.format(alg)) return algorithms[alg]
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related. :type alg: str :return: The requested algorithm. :rtype: Callable :raises: ValueError
387,409
def get_nearest_nodes(G, X, Y, method=None): start_time = time.time() if method is None: nn = [get_nearest_node(G, (y, x), method=) for x, y in zip(X, Y)] elif method == : if not cKDTree: raise ImportError() nodes = pd.DataFrame({:nx.get_node_attributes(G, ), :nx.get_node_attributes(G, )}) tree = cKDTree(data=nodes[[, ]], compact_nodes=True, balanced_tree=True) points = np.array([X, Y]).T dist, idx = tree.query(points, k=1) nn = nodes.iloc[idx].index elif method == : if not BallTree: raise ImportError() nodes = pd.DataFrame({:nx.get_node_attributes(G, ), :nx.get_node_attributes(G, )}) nodes_rad = np.deg2rad(nodes[[, ]].astype(np.float)) points = np.array([Y.astype(np.float), X.astype(np.float)]).T points_rad = np.deg2rad(points) tree = BallTree(nodes_rad, metric=) idx = tree.query(points_rad, k=1, return_distance=False) nn = nodes.iloc[idx[:,0]].index else: raise ValueError() log(.format(len(X), time.time()-start_time)) return np.array(nn)
Return the graph nodes nearest to a list of points. Pass in points as separate vectors of X and Y coordinates. The 'kdtree' method is by far the fastest with large data sets, but only finds approximate nearest nodes if working in unprojected coordinates like lat-lng (it precisely finds the nearest node if working in projected coordinates). The 'balltree' method is second fastest with large data sets, but it is precise if working in unprojected coordinates like lat-lng. Parameters ---------- G : networkx multidigraph X : list-like The vector of longitudes or x's for which we will find the nearest node in the graph Y : list-like The vector of latitudes or y's for which we will find the nearest node in the graph method : str {None, 'kdtree', 'balltree'} Which method to use for finding nearest node to each point. If None, we manually find each node one at a time using osmnx.utils.get_nearest_node and haversine. If 'kdtree' we use scipy.spatial.cKDTree for very fast euclidean search. If 'balltree', we use sklearn.neighbors.BallTree for fast haversine search. Returns ------- nn : array list of nearest node IDs
387,410
def remove_timedim(self, var): if self.pps and var.dims[0] == : data = var[0, :, :] data.attrs = var.attrs var = data return var
Remove time dimension from dataset
387,411
def _zp_decode(self, msg): zone_partitions = [ord(x)-0x31 for x in msg[4:4+Max.ZONES.value]] return {: zone_partitions}
ZP: Zone partitions.
387,412
def create_win32tz_map(windows_zones_xml): coming_comment = None win32_name = None territory = None parser = genshi.input.XMLParser(StringIO(windows_zones_xml)) map_zones = {} zone_comments = {} for kind, data, _ in parser: if kind == genshi.core.START and str(data[0]) == "mapZone": attrs = data[1] win32_name, territory, olson_name = ( attrs.get("other"), attrs.get("territory"), attrs.get("type").split(" ")[0]) map_zones[(win32_name, territory)] = olson_name elif kind == genshi.core.END and str(data) == "mapZone" and win32_name: if coming_comment: zone_comments[(win32_name, territory)] = coming_comment coming_comment = None win32_name = None elif kind == genshi.core.COMMENT: coming_comment = data.strip() elif kind in (genshi.core.START, genshi.core.END, genshi.core.COMMENT): coming_comment = None for win32_name, territory in sorted(map_zones): yield (win32_name, territory, map_zones[(win32_name, territory)], zone_comments.get((win32_name, territory), None))
Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment)
387,413
def set_enumerated_subtypes(self, subtype_fields, is_catch_all): assert self._enumerated_subtypes is None, \ assert isinstance(is_catch_all, bool), type(is_catch_all) self._is_catch_all = is_catch_all self._enumerated_subtypes = [] if self.parent_type: raise InvalidSpec( " enumerates subtypes so it cannot extend another struct." % self.name, self._ast_node.lineno, self._ast_node.path) if self.parent_type and not self.parent_type.has_enumerated_subtypes(): raise InvalidSpec( " cannot enumerate subtypes if parent does not." % (self.name, self.parent_type.name), self._ast_node.lineno, self._ast_node.path) enumerated_subtype_names = set() for subtype_field in subtype_fields: path = subtype_field._ast_node.path lineno = subtype_field._ast_node.lineno if subtype_field.data_type.name in enumerated_subtype_names: raise InvalidSpec( "Subtype can only be specified once." % subtype_field.data_type.name, lineno, path) if subtype_field.data_type.parent_type != self: raise InvalidSpec( " is not a subtype of ." % (subtype_field.data_type.name, self.name), lineno, path) orig_field = self._fields_by_name[subtype_field.name] raise InvalidSpec( "Field already defined on line %d." % (subtype_field.name, lineno), orig_field._ast_node.lineno, orig_field._ast_node.path) cur_type = self.parent_type while cur_type: if subtype_field.name in cur_type._fields_by_name: orig_field = cur_type._fields_by_name[subtype_field.name] raise InvalidSpec( "Field already defined in parent (%s:%d)." % (subtype_field.name, cur_type.name, orig_field._ast_node.path, orig_field._ast_node.lineno), lineno, path) cur_type = cur_type.parent_type self._fields_by_name[subtype_field.name] = subtype_field enumerated_subtype_names.add(subtype_field.data_type.name) self._enumerated_subtypes.append(subtype_field) assert len(self._enumerated_subtypes) > 0 for subtype in self.subtypes: if subtype.name not in enumerated_subtype_names: raise InvalidSpec( " does not enumerate all subtypes, missing " % (self.name, subtype.name), self._ast_node.lineno)
Sets the list of "enumerated subtypes" for this struct. This differs from regular subtyping in that each subtype is associated with a tag that is used in the serialized format to indicate the subtype. Also, this list of subtypes was explicitly defined in an "inner-union" in the specification. The list of fields must include all defined subtypes of this struct. NOTE(kelkabany): For this to work with upcoming forward references, the hierarchy of parent types for this struct must have had this method called on them already. :type subtype_fields: List[UnionField]
387,414
def mcc(y, z): tp, tn, fp, fn = contingency_table(y, z) return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
Matthews correlation coefficient
387,415
def intersection(l1, l2): if len(l1) == 0 or len(l2) == 0: return [] out = [] l2_pos = 0 for l in l1: while l2_pos < len(l2) and l2[l2_pos].end < l.start: l2_pos += 1 if l2_pos == len(l2): break while l2_pos < len(l2) and l.intersects(l2[l2_pos]): out.append(l.intersection(l2[l2_pos])) l2_pos += 1 l2_pos = max(0, l2_pos - 1) return out
Returns intersection of two lists. Assumes the lists are sorted by start positions
387,416
def init_request(self): builder = Request.Builder() builder.url(self.url) for k, v in self.headers.items(): builder.addHeader(k, v) body = self.body if body: media_type = MediaType( __id__=MediaType.parse(self.content_type)) request_body = RequestBody( __id__=RequestBody.create(media_type, body)) builder.method(self.method, request_body) elif self.method in [, , ]: getattr(builder, self.method)() else: raise ValueError("Cannot do a request " "without a body".format(self.method)) self.request = Request(__id__=builder.build())
Init the native request using the okhttp3.Request.Builder
387,417
def _read_python_source(self, filename): try: f = open(filename, "rb") except IOError as err: self.log_error("Can't open %s: %s", filename, err) return None, None try: encoding = tokenize.detect_encoding(f.readline)[0] finally: f.close() with _open_with_encoding(filename, "r", encoding=encoding) as f: return _from_system_newlines(f.read()), encoding
Do our best to decode a Python source file correctly.
387,418
def named(self, name): name name = self.serialize(name) return self.get_by(, name)
Returns .get_by('name', name)
387,419
def getDarkCurrentAverages(exposuretimes, imgs): x, imgs_p = sortForSameExpTime(exposuretimes, imgs) s0, s1 = imgs[0].shape imgs = np.empty(shape=(len(x), s0, s1), dtype=imgs[0].dtype) for i, ip in zip(imgs, imgs_p): if len(ip) == 1: i[:] = ip[0] else: i[:] = averageSameExpTimes(ip) return x, imgs
return exposure times, image averages for each exposure time
387,420
def projScatter(lon, lat, **kwargs): hp.projscatter(lon, lat, lonlat=True, **kwargs)
Create a scatter plot on HEALPix projected axes. Inputs: lon (deg), lat (deg)
387,421
def _string_from_ip_int(self, ip_int=None): if not ip_int and ip_int != 0: ip_int = int(self._ip) if ip_int > self._ALL_ONES: raise ValueError() hex_str = % ip_int hextets = [] for x in range(0, 32, 4): hextets.append( % int(hex_str[x:x+4], 16)) hextets = self._compress_hextets(hextets) return .join(hextets)
Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones.
387,422
def publish(self, rawtx): tx = deserialize.signedtx(rawtx) if not self.dryrun: self.service.send_tx(tx) return serialize.txid(tx.hash())
Publish signed <rawtx> to bitcoin network.
387,423
def pgettext(msgctxt, message): key = msgctxt + + message translation = get_translation().gettext(key) return message if translation == key else translation
Particular gettext' function. It works with 'msgctxt' .po modifiers and allow duplicate keys with different translations. Python 2 don't have support for this GNU gettext function, so we reimplement it. It works by joining msgctx and msgid by '4' byte.
387,424
def add_droplets(self, droplet): droplets = droplet if not isinstance(droplets, list): droplets = [droplet] resources = self.__extract_resources_from_droplets(droplets) if len(resources) > 0: return self.__add_resources(resources) return False
Add the Tag to a Droplet. Attributes accepted at creation time: droplet: array of string or array of int, or array of Droplets.
387,425
def set_webconfiguration_settings(name, settings, location=): r*IIS:\nameenabledfiltersystem.webServer/security/authentication/anonymousAuthenticationvalue ps_cmd = [] if not settings: log.warning() return False settings = _prepare_settings(name, settings) for idx, setting in enumerate(settings): if setting[].split()[-1] != : settings[idx][] = six.text_type(setting[]) current_settings = get_webconfiguration_settings( name=name, settings=settings, location=location) if settings == current_settings: log.debug() return True for setting in settings: new_settings = get_webconfiguration_settings( name=name, settings=settings, location=location) failed_settings = [] for idx, setting in enumerate(settings): is_collection = setting[].split()[-1] == if ((not is_collection and six.text_type(setting[]) != six.text_type(new_settings[idx][])) or (is_collection and list(map(dict, setting[])) != list(map(dict, new_settings[idx][])))): failed_settings.append(setting) if failed_settings: log.error(, failed_settings) return False log.debug(, settings) return True
r''' Set the value of the setting for an IIS container. Args: name (str): The PSPath of the IIS webconfiguration settings. settings (list): A list of dictionaries containing setting name, filter and value. location (str): The location of the settings (optional) Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.set_webconfiguration_settings name='IIS:\' settings="[{'name': 'enabled', 'filter': 'system.webServer/security/authentication/anonymousAuthentication', 'value': False}]"
387,426
def show(self): if self._future: self._job.poll_once() return if self._model_json is None: print("No model trained yet") return if self.model_id is None: print("This H2OEstimator has been removed.") return model = self._model_json["output"] print("Model Details") print("=============") print(self.__class__.__name__, ": ", self._model_json["algo_full_name"]) print("Model Key: ", self._id) self.summary() print() tm = model["training_metrics"] if tm: tm.show() vm = model["validation_metrics"] if vm: vm.show() xm = model["cross_validation_metrics"] if xm: xm.show() xms = model["cross_validation_metrics_summary"] if xms: xms.show() if "scoring_history" in model and model["scoring_history"]: model["scoring_history"].show() if "variable_importances" in model and model["variable_importances"]: model["variable_importances"].show()
Print innards of model, without regards to type.
387,427
def _validate_class(self, cl): if cl not in self.schema_def.attributes_by_class: search_string = self._build_search_string(cl) err = self.err( "{0} - invalid class", self._field_name_from_uri(cl), search_string=search_string) return ValidationWarning(ValidationResult.ERROR, err[], err[], err[])
return error if class `cl` is not found in the ontology
387,428
def git_path_valid(git_path=None): if git_path is None and GIT_PATH is None: return False if git_path is None: git_path = GIT_PATH try: call([git_path, ]) return True except OSError: return False
Check whether the git executable is found.
387,429
def _handle_consent_response(self, context): consent_state = context.state[STATE_KEY] saved_resp = consent_state["internal_resp"] internal_response = InternalData.from_dict(saved_resp) hash_id = self._get_consent_id(internal_response.requester, internal_response.subject_id, internal_response.attributes) try: consent_attributes = self._verify_consent(hash_id) except ConnectionError as e: satosa_logging(logger, logging.ERROR, "Consent service is not reachable, no consent given.", context.state) consent_attributes = None if consent_attributes is None: satosa_logging(logger, logging.INFO, "Consent was NOT given", context.state) consent_attributes = [] else: satosa_logging(logger, logging.INFO, "Consent was given", context.state) internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_attributes) return self._end_consent(context, internal_response)
Endpoint for handling consent service response :type context: satosa.context.Context :rtype: satosa.response.Response :param context: response context :return: response
387,430
def elements(self): ABCABCAABBCCs count has been set to zero or is a negative number, elements() will ignore it. ' for elem, count in iteritems(self): for _ in range(count): yield elem
Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] If an element's count has been set to zero or is a negative number, elements() will ignore it.
387,431
def div(self, key, value=2): return uwsgi.cache_mul(key, value, self.timeout, self.name)
Divides the specified key value by the specified value. :param str|unicode key: :param int value: :rtype: bool
387,432
def walklevel(path, depth = -1, **kwargs): if depth < 0: for root, dirs, files in os.walk(path, **kwargs): yield root, dirs, files path = path.rstrip(os.path.sep) num_sep = path.count(os.path.sep) for root, dirs, files in os.walk(path, **kwargs): yield root, dirs, files num_sep_this = root.count(os.path.sep) if num_sep + depth <= num_sep_this: del dirs[:]
It works just like os.walk, but you can pass it a level parameter that indicates how deep the recursion will go. If depth is -1 (or less than 0), the full depth is walked.
387,433
def on_configurationdone_request(self, py_db, request): self.api.run(py_db) configuration_done_response = pydevd_base_schema.build_response(request) return NetCommand(CMD_RETURN, 0, configuration_done_response, is_json=True)
:param ConfigurationDoneRequest request:
387,434
def encode_for_locale(s): try: return s.encode(LOCALE_ENCODING, ) except (AttributeError, UnicodeDecodeError): return s.decode(, ).encode(LOCALE_ENCODING)
Encode text items for system locale. If encoding fails, fall back to ASCII.
387,435
def new(self, *args, **kwargs): return self.session().add(self.model(*args, **kwargs))
Create a new instance of :attr:`model` and commit it to the backend server. This a shortcut method for the more verbose:: instance = manager.session().add(MyModel(**kwargs))
387,436
def wrap_exceptions(callable): def wrapper(self, *args, **kwargs): try: return callable(self, *args, **kwargs) except EnvironmentError: err = sys.exc_info()[1] if err.errno in (errno.ENOENT, errno.ESRCH): raise NoSuchProcess(self.pid, self._process_name) if err.errno in (errno.EPERM, errno.EACCES): raise AccessDenied(self.pid, self._process_name) raise return wrapper
Call callable into a try/except clause and translate ENOENT, EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
387,437
def publish(self, load): payload = {: } crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets[][].value) payload[] = crypticle.dumps(load) if self.opts[]: master_pem_path = os.path.join(self.opts[], ) log.debug("Signing data packet") payload[] = salt.crypt.sign_message(master_pem_path, payload[]) if self.opts.get(, ) == : pull_uri = int(self.opts.get(, 4514)) else: pull_uri = os.path.join(self.opts[], ) pub_sock = salt.utils.asynchronous.SyncWrapper( salt.transport.ipc.IPCMessageClient, (pull_uri,) ) pub_sock.connect() int_payload = {: self.serial.dumps(payload)} if load[] == : if isinstance(load[], six.string_types): _res = self.ckminions.check_minions(load[], tgt_type=load[]) match_ids = _res[] log.debug("Publish Side Match: %s", match_ids) int_payload[] = match_ids else: int_payload[] = load[] pub_sock.send(int_payload)
Publish "load" to minions
387,438
def return_action(self, text, loc, ret): exshared.setpos(loc, text) if DEBUG > 0: print("RETURN:",ret) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return if not self.symtab.same_types(self.shared.function_index, ret.exp[0]): raise SemanticException("Incompatible type in return") self.codegen.free_register(reg) self.codegen.unconditional_jump(self.codegen.label(self.shared.function_name+"_exit", True))
Code executed after recognising a return statement
387,439
def greenlet_timeouts(self): while True: now = datetime.datetime.utcnow() for greenlet in list(self.gevent_pool): job = get_current_job(id(greenlet)) if job and job.timeout and job.datestarted: expires = job.datestarted + datetime.timedelta(seconds=job.timeout) if now > expires: job.kill(block=False, reason="timeout") time.sleep(1)
This greenlet kills jobs in other greenlets if they timeout.
387,440
def clean_existing(self, value): existing_pk = value[self.pk_field] try: obj = self.fetch_existing(existing_pk) except ReferenceNotFoundError: raise ValidationError() orig_data = self.get_orig_data_from_existing(obj) value = self.schema_class(value, orig_data).full_clean() for field_name, field_value in value.items(): if field_name != self.pk_field: setattr(obj, field_name, field_value) return obj
Clean the data and return an existing document with its fields updated based on the cleaned values.
387,441
def update_extent_from_rectangle(self): self.show() self.canvas.unsetMapTool(self.rectangle_map_tool) self.canvas.setMapTool(self.pan_tool) rectangle = self.rectangle_map_tool.rectangle() if rectangle: self.bounding_box_group.setTitle( self.tr()) extent = rectangle_geo_array(rectangle, self.iface.mapCanvas()) self.update_extent(extent)
Update extent value in GUI based from the QgsMapTool rectangle. .. note:: Delegates to update_extent()
387,442
def ensure_dir(path): dirpath = os.path.dirname(path) if dirpath and not os.path.exists(dirpath): os.makedirs(dirpath)
Ensure directory exists. Args: path(str): dir path
387,443
def _should_run(het_file): has_hets = False with open(het_file) as in_handle: for i, line in enumerate(in_handle): if i > 1: has_hets = True break return has_hets
Check for enough input data to proceed with analysis.
387,444
def type_alias(self): type_alias = self._kwargs.get(self._TYPE_ALIAS_FIELD, None) return type_alias if type_alias is not None else type(self).__name__
Return the type alias this target was constructed via. For a target read from a BUILD file, this will be target alias, like 'java_library'. For a target constructed in memory, this will be the simple class name, like 'JavaLibrary'. The end result is that the type alias should be the most natural way to refer to this target's type to the author of the target instance. :rtype: string
387,445
def _pred(aclass): isaclass = inspect.isclass(aclass) return isaclass and aclass.__module__ == _pred.__module__
:param aclass :return: boolean
387,446
def sample(self, cursor): count = cursor.count() if count == 0: self._empty = True raise ValueError("Empty collection") if self.p >= 1 and self.max_items <= 0: for item in cursor: yield item return if self.max_items <= 0: n_target = max(self.min_items, self.p * count) else: if self.p <= 0: n_target = max(self.min_items, self.max_items) else: n_target = max(self.min_items, min(self.max_items, self.p * count)) if n_target == 0: raise ValueError("No items requested") n = 0 while n < n_target: try: item = next(cursor) except StopIteration: cursor.rewind() item = next(cursor) if self._keep(): yield item n += 1
Extract records randomly from the database. Continue until the target proportion of the items have been extracted, or until `min_items` if this is larger. If `max_items` is non-negative, do not extract more than these. This function is a generator, yielding items incrementally. :param cursor: Cursor to sample :type cursor: pymongo.cursor.Cursor :return: yields each item :rtype: dict :raise: ValueError, if max_items is valid and less than `min_items` or if target collection is empty
387,447
def down(force): try: cloud_config = CloudConfig() cloud_controller = CloudController(cloud_config) cloud_controller.down(force) except CloudComposeException as ex: print(ex)
destroys an existing cluster
387,448
def package_releases(self, package, url_fmt=lambda u: u): return [{ : package, : version, : [self.get_urlhash(f, url_fmt) for f in files] } for version, files in self.storage.get(package, {}).items()]
List all versions of a package Along with the version, the caller also receives the file list with all the available formats.
387,449
def focus_up(pymux): " Move focus up. " _move_focus(pymux, lambda wp: wp.xpos, lambda wp: wp.ypos - 2)
Move focus up.
387,450
def touch(self): assert not self._has_responded self.trigger(event.TOUCH, message=self)
Respond to ``nsqd`` that you need more time to process the message.
387,451
def close(self, reason=None): with self._closing: if self._closed: return if self.is_active: _LOGGER.debug("Stopping consumer.") self._consumer.stop() self._consumer = None _LOGGER.debug("Stopping scheduler.") self._scheduler.shutdown() self._scheduler = None _LOGGER.debug("Stopping leaser.") self._leaser.stop() self._leaser = None _LOGGER.debug("Stopping dispatcher.") self._dispatcher.stop() self._dispatcher = None _LOGGER.debug("Stopping heartbeater.") self._heartbeater.stop() self._heartbeater = None self._rpc = None self._closed = True _LOGGER.debug("Finished stopping manager.") for callback in self._close_callbacks: callback(self, reason)
Stop consuming messages and shutdown all helper threads. This method is idempotent. Additional calls will have no effect. Args: reason (Any): The reason to close this. If None, this is considered an "intentional" shutdown. This is passed to the callbacks specified via :meth:`add_close_callback`.
387,452
def from_string(values, separator, remove_duplicates = False): result = AnyValueArray() if values == None or len(values) == 0: return result items = str(values).split(separator) for item in items: if (item != None and len(item) > 0) or remove_duplicates == False: result.append(item) return result
Splits specified string into elements using a separator and assigns the elements to a newly created AnyValueArray. :param values: a string value to be split and assigned to AnyValueArray :param separator: a separator to split the string :param remove_duplicates: (optional) true to remove duplicated elements :return: a newly created AnyValueArray.
387,453
def _scan_block(self, cfg_job): addr = cfg_job.addr current_func_addr = cfg_job.func_addr if self._addr_hooked_or_syscall(addr): entries = self._scan_procedure(cfg_job, current_func_addr) else: entries = self._scan_soot_block(cfg_job, current_func_addr) return entries
Scan a basic block starting at a specific address :param CFGJob cfg_job: The CFGJob instance. :return: a list of successors :rtype: list
387,454
def constraint(self): constraint_arr = [] if self._not_null: constraint_arr.append("PRIMARY KEY" if self._pk else "NOT NULL") if self._unique: constraint_arr.append("UNIQUE") return " ".join(constraint_arr)
Constraint string
387,455
def get(self, list_id, segment_id): return self._mc_client._get(url=self._build_path(list_id, , segment_id))
returns the specified list segment.
387,456
def _safe_sendBreak_v2_7(self): result = True try: self.sendBreak() except: try: self.setBreak(False) except: result = False return result
! pyserial 2.7 API implementation of sendBreak/setBreak @details Below API is deprecated for pyserial 3.x versions! http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.sendBreak http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.setBreak
387,457
def init_default(m:nn.Module, func:LayerFunc=nn.init.kaiming_normal_)->None: "Initialize `m` weights with `func` and set `bias` to 0." if func: if hasattr(m, ): func(m.weight) if hasattr(m, ) and hasattr(m.bias, ): m.bias.data.fill_(0.) return m
Initialize `m` weights with `func` and set `bias` to 0.
387,458
def ip4_address(self): if self._ip4_address is None and self.network is not None: self._ip4_address = self._get_ip_address( libvirt.VIR_IP_ADDR_TYPE_IPV4) return self._ip4_address
Returns the IPv4 address of the network interface. If multiple interfaces are provided, the address of the first found is returned.
387,459
def _exec_loop(self, a, bd_all, mask): npt = bd_all.shape[0] n = self.X_ADJUSTED.shape[0] kvalues = np.zeros(npt) sigmasq = np.zeros(npt) a_inv = scipy.linalg.inv(a) for j in np.nonzero(~mask)[0]: bd = bd_all[j] if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_value = False zero_index = None b = np.zeros((n+1, 1)) b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd) if zero_value: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = np.dot(a_inv, b) kvalues[j] = np.sum(x[:n, 0] * self.VALUES) sigmasq[j] = np.sum(x[:, 0] * -b[:, 0]) return kvalues, sigmasq
Solves the kriging system by looping over all specified points. Less memory-intensive, but involves a Python-level loop.
387,460
def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray: result = np.array([[0] * 4] * 4, dtype=np.complex128) order = [0, 2, 1, 3] for i in range(4): for j in range(4): result[order[i], order[j]] = mat4x4[i, j] return result
Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).
387,461
def _push_new_tag_to_git(self): print("Pushing new version to git") subprocess.call(["git", "add", self.release_file]) subprocess.call(["git", "add", self.init_file]) subprocess.call([ "git", "commit", "-m", "Updating {}/__init__.py to version {}"\ .format(self.package, self.tag)]) subprocess.call(["git", "push", "origin", self.branch]) if self.deploy: subprocess.call([ "git", "tag", "-a", self.tag, "-m", "Updating version to {}".format(self.tag), ]) subprocess.call(["git", "push", "origin"])
tags a new release and pushes to origin/master
387,462
def get(self, byte_sig: str, online_timeout: int = 2) -> List[str]: byte_sig = self._normalize_byte_sig(byte_sig) text_sigs = self.solidity_sigs.get(byte_sig) if text_sigs is not None: return text_sigs with SQLiteDB(self.path) as cur: cur.execute("SELECT text_sig FROM signatures WHERE byte_sig=?", (byte_sig,)) text_sigs = cur.fetchall() if text_sigs: return [t[0] for t in text_sigs] if ( not self.enable_online_lookup or byte_sig in self.online_lookup_miss or time.time() < self.online_lookup_timeout ): return [] try: text_sigs = self.lookup_online(byte_sig=byte_sig, timeout=online_timeout) if not text_sigs: self.online_lookup_miss.add(byte_sig) return [] else: for resolved in text_sigs: self.add(byte_sig, resolved) return text_sigs except FourByteDirectoryOnlineLookupError as fbdole: self.online_lookup_timeout = int(time.time()) + 2 * 60 log.warning("Online lookup failed, not retrying for 2min: %s", fbdole) return []
Get a function text signature for a byte signature 1) try local cache 2) try online lookup (if enabled; if not flagged as unavailable) :param byte_sig: function signature hash as hexstr :param online_timeout: online lookup timeout :return: list of matching function text signatures
387,463
def inject_code(self, payload, lpParameter = 0): lpStartAddress = self.malloc(len(payload)) try: self.write(lpStartAddress, payload) aThread = self.start_thread(lpStartAddress, lpParameter, bSuspended = False) aThread.pInjectedMemory = lpStartAddress except Exception: self.free(lpStartAddress) raise return aThread, lpStartAddress
Injects relocatable code into the process memory and executes it. @warning: Don't forget to free the memory when you're done with it! Otherwise you'll be leaking memory in the target process. @see: L{inject_dll} @type payload: str @param payload: Relocatable code to run in a new thread. @type lpParameter: int @param lpParameter: (Optional) Parameter to be pushed in the stack. @rtype: tuple( L{Thread}, int ) @return: The injected Thread object and the memory address where the code was written. @raise WindowsError: An exception is raised on error.
387,464
def calc_bhhh_hessian_approximation_mixed_logit(params, design_3d, alt_IDs, rows_to_obs, rows_to_alts, rows_to_mixers, choice_vector, utility_transform, ridge=None, weights=None): if weights is None: weights = np.ones(design_3d.shape[0]) weights_per_obs =\ np.max(rows_to_mixers.toarray() * weights[:, None], axis=0) prob_array = general_calc_probabilities(params, design_3d, alt_IDs, rows_to_obs, rows_to_alts, utility_transform, return_long_probs=True) prob_results = calc_choice_sequence_probs(prob_array, choice_vector, rows_to_mixers, return_type="all") sequence_prob_array = prob_results[1] simulated_probs = prob_results[0] long_sequence_prob_array = rows_to_mixers.dot(sequence_prob_array) long_simulated_probs = rows_to_mixers.dot(simulated_probs) scaled_sequence_probs = (long_sequence_prob_array / long_simulated_probs[:, None]) scaled_error = ((choice_vector[:, None] - prob_array) * scaled_sequence_probs) gradient = (scaled_error[:, :, None] * design_3d).mean(axis=1) gradient_per_obs = rows_to_mixers.T.dot(gradient) bhhh_matrix =\ gradient_per_obs.T.dot(weights_per_obs[:, None] * gradient_per_obs) if ridge is not None: bhhh_matrix -= 2 * ridge return -1 * bhhh_matrix
Parameters ---------- params : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features + num_coefs_being_mixed). design_3d : 3D ndarray. All elements should be ints, floats, or longs. Should have one row per observation per available alternative. The second axis should have as many elements as there are draws from the mixing distributions of the coefficients. The last axis should have one element per index coefficient being estimated. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform : callable. Should accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 2D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated and the given draw of the random coefficients. There should be one column for each draw of the random coefficients. There should have one row per individual per choice situation per available alternative. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Default == None. Returns ------- bhhh_matrix : 2D ndarray of shape `(design.shape[1], design.shape[1])`. The returned array is the BHHH approximation of the Fisher Information Matrix. I.e it is the negative of the sum of the outer product of each individual's gradient with itself.
387,465
def check_spelling(spelling_lang, txt): if os.name == "nt": assert(not "check_spelling() not available on Windows") return with _ENCHANT_LOCK: words_dict = enchant.request_dict(spelling_lang) try: tknzr = enchant.tokenize.get_tokenizer(spelling_lang) except enchant.tokenize.TokenizerNotFoundError: tknzr = enchant.tokenize.get_tokenizer() score = 0 offset = 0 for (word, word_pos) in tknzr(txt): if len(word) < _MIN_WORD_LEN: continue if words_dict.check(word): score += 100 continue suggestions = words_dict.suggest(word) if (len(suggestions) <= 0): score -= 10 continue main_suggestion = suggestions[0] lv_dist = Levenshtein.distance(word, main_suggestion) if (lv_dist > _MAX_LEVENSHTEIN_DISTANCE): pre_txt = txt[:word_pos + offset] post_txt = txt[word_pos + len(word) + offset:] txt = pre_txt + main_suggestion + post_txt offset += (len(main_suggestion) - len(word)) score += 5 return (txt, score)
Check the spelling in the text, and compute a score. The score is the number of words correctly (or almost correctly) spelled, minus the number of mispelled words. Words "almost" correct remains neutral (-> are not included in the score) Returns: A tuple : (fixed text, score)
387,466
def get_date_datetime_param(self, request, param): if param in request.GET: param_value = request.GET.get(param, None) date_match = dateparse.date_re.match(param_value) if date_match: return timezone.datetime.combine( dateparse.parse_date(date_match.group(0)), timezone.datetime.min.time() ) datetime_match = dateparse.datetime_re.match(param_value) if datetime_match: return timezone.datetime.combine( dateparse.parse_datetime(datetime_match.group(0)).date(), timezone.datetime.min.time() ) return None
Check the request for the provided query parameter and returns a rounded value. :param request: WSGI request object to retrieve query parameter data. :param param: the name of the query parameter.
387,467
def get_traceback_data(self): default_template_engine = None if default_template_engine is None: template_loaders = [] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if in frame: frame_vars = [] for k, v in frame[]: v = pformat(v) if isinstance(v, six.binary_type): v = v.decode(, ) return c
Return a dictionary containing traceback information.
387,468
def add_source_get_correlated(gta, name, src_dict, correl_thresh=0.25, non_null_src=False): if gta.roi.has_source(name): gta.zero_source(name) gta.update_source(name) test_src_name = "%s_test" % name else: test_src_name = name gta.add_source(test_src_name, src_dict) gta.free_norm(test_src_name) gta.free_shape(test_src_name, free=False) fit_result = gta.fit(covar=True) mask = fit_result[] src_names = np.array(fit_result[])[mask] idx = (src_names == test_src_name).argmax() correl_vals = fit_result[][idx][mask] cdict = {} for src_name, correl_val in zip(src_names, correl_vals): if src_name == name: continue if np.fabs(correl_val) > 0.25: cdict[src_name] = correl_val if not non_null_src: gta.zero_source(test_src_name) gta.fit(covar=True) return cdict, test_src_name
Add a source and get the set of correlated sources Parameters ---------- gta : `fermipy.gtaanalysis.GTAnalysis` The analysis object name : str Name of the source we are adding src_dict : dict Dictionary of the source parameters correl_thresh : float Threshold for considering a source to be correlated non_null_src : bool If True, don't zero the source Returns ------- cdict : dict Dictionary with names and correlation factors of correlated sources test_src_name : bool Name of the test source
387,469
def get_info(self, wiki=None, show=True, proxy=None, timeout=0): if wiki: self.params.update({: wiki}) self._get(, show=False, proxy=proxy, timeout=timeout) self._get(, show, proxy, timeout) return self
GET site info (general, statistics, siteviews, mostviewed) via https://www.mediawiki.org/wiki/API:Siteinfo, and https://www.mediawiki.org/wiki/Extension:PageViewInfo Optional arguments: - [wiki]: <str> alternate wiki site (default=en.wikipedia.org) - [show]: <bool> echo page data if true - [proxy]: <str> use this HTTP proxy - [timeout]: <int> timeout in seconds (0=wait forever) Data captured: - info: <dict> API:Siteinfo - mostviewed: <list> mostviewed articles {ns=0, title, count} - site: <str> sitename, e.g. 'enwiki' - siteviews: <int> sitewide pageview totals over last WEEK - visitors: <int> sitewide unique visitor total over last WEEK - various counts: activeusers, admins, articles, edits, images jobs, pages, queued-massmessages, siteviews, users, visitors
387,470
def elliptical_arc_to(x1, y1, rx, ry, phi, large_arc_flag, sweep_flag, x2, y2): rx = abs(rx) ry = abs(ry) phi = phi % 360 if x1==x2 and y1==y2: return [] if rx == 0 or ry == 0: return [(x2,y2)] rphi = radians(phi) cphi = cos(rphi) sphi = sin(rphi) dx = 0.5*(x1 - x2) dy = 0.5*(y1 - y2) x1p = cphi * dx + sphi * dy y1p = -sphi * dx + cphi * dy lam = (x1p/rx)**2 + (y1p/ry)**2 if lam > 1.0: scale = sqrt(lam) rx *= scale ry *= scale num = max((rx*ry)**2 - (rx*y1p)**2 - (ry*x1p)**2, 0.0) den = ((rx*y1p)**2 + (ry*x1p)**2) a = sqrt(num / den) cxp = a * rx*y1p/ry cyp = -a * ry*x1p/rx if large_arc_flag == sweep_flag: cxp = -cxp cyp = -cyp mx = 0.5*(x1+x2) my = 0.5*(y1+y2) dx = (x1p-cxp) / rx dy = (y1p-cyp) / ry dx2 = (-x1p-cxp) / rx dy2 = (-y1p-cyp) / ry theta1 = angle(1,0,dx,dy) dtheta = angle(dx,dy,dx2,dy2) if not sweep_flag and dtheta > 0: dtheta -= 360 elif sweep_flag and dtheta < 0: dtheta += 360 p = [] control_points = bezier_arc(cxp-rx,cyp-ry,cxp+rx,cyp+ry, theta1, dtheta) for x1p,y1p, x2p,y2p, x3p,y3p, x4p,y4p in control_points: p.append(( transform_from_local(x2p,y2p,cphi,sphi,mx,my) + transform_from_local(x3p,y3p,cphi,sphi,mx,my) + transform_from_local(x4p,y4p,cphi,sphi,mx,my) )) return p
An elliptical arc approximated with Bezier curves or a line segment. Algorithm taken from the SVG 1.1 Implementation Notes: http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
387,471
def main(): firstline,itilt,igeo,linecnt,key=1,0,0,0,"" out="" data,k15=[],[] dir= ofile="" if in sys.argv: ind=sys.argv.index() dir=sys.argv[ind+1]+ if in sys.argv: print(main.__doc__) sys.exit() if in sys.argv: file=input("Input file name [.k15 format]: ") f=open(file,) data=f.readlines() f.close() file=input("Output file name [.s format]: ") out=open(file,) print (" [g]eographic, [t]ilt corrected, ") tg=input(" [return for specimen coordinates]: ") if tg==: igeo=1 elif tg==: igeo,itilt=1,1 elif in sys.argv: ind=sys.argv.index() file=dir+sys.argv[ind+1] f=open(file,) data=f.readlines() f.close() else: data= sys.stdin.readlines() if len(data)==0: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() ofile=dir+sys.argv[ind+1] out=open(ofile,) if in sys.argv: ind=sys.argv.index() tg=sys.argv[ind+1] if tg==:igeo=1 if tg==: igeo,itilt=1,1 for line in data: rec=line.split() if firstline==1: firstline=0 nam=rec[0] if igeo==1: az,pl=float(rec[1]),float(rec[2]) if itilt==1: bed_az,bed_dip=90.+float(rec[3]),float(rec[4]) else: linecnt+=1 for i in range(5): k15.append(float(rec[i])) if linecnt==3: sbar,sigma,bulk=pmag.dok15_s(k15) if igeo==1: sbar=pmag.dosgeo(sbar,az,pl) if itilt==1: sbar=pmag.dostilt(sbar,bed_az,bed_dip) outstring="" for s in sbar:outstring+=%(s) outstring+=%(sigma) if out=="": print(outstring) else: out.write(outstring+) linecnt,firstline,k15=0,1,[] if ofile!="":print (,ofile)
NAME k15_s.py DESCRIPTION converts .k15 format data to .s format. assumes Jelinek Kappabridge measurement scheme SYNTAX k15_s.py [-h][-i][command line options][<filename] OPTIONS -h prints help message and quits -i allows interactive entry of options -f FILE, specifies input file, default: standard input -F FILE, specifies output file, default: standard output -crd [g, t] specifies [g]eographic rotation, or geographic AND tectonic rotation INPUT name [az,pl,strike,dip], followed by 3 rows of 5 measurements for each specimen OUTPUT least squares matrix elements and sigma: x11,x22,x33,x12,x23,x13,sigma
387,472
def contains_some_of(self, elements): if all(e not in self._subject for e in elements): raise self._error_factory(_format("Expected {} to have some of {}", self._subject, elements)) return ChainInspector(self._subject)
Ensures :attr:`subject` contains at least one of *elements*, which must be an iterable.
387,473
def random_state(state=None): if is_integer(state): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): return state elif state is None: return np.random else: raise ValueError("random_state must be an integer, a numpy " "RandomState, or None")
Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState
387,474
def write_sampler_metadata(self, sampler): self.attrs[] = sampler.name self[self.sampler_group].attrs[] = sampler.nwalkers sampler.model.write_metadata(self)
Writes the sampler's metadata.
387,475
def get_dist(dist): from scipy import stats dc = getattr(stats, dist, None) if dc is None: e = "Statistical distribution `{}` is not in scipy.stats.".format(dist) raise ValueError(e) return dc
Return a distribution object from scipy.stats.
387,476
def _get_prefixes(self, metric_type): prefixes = [] if self._prepend_metric_type: prefixes.append(self.METRIC_TYPES[metric_type]) return prefixes
Get prefixes where applicable Add metric prefix counters, timers respectively if :attr:`prepend_metric_type` flag is True. :param str metric_type: The metric type :rtype: list
387,477
def cleanTempDirs(job): if job is CWLJob and job._succeeded: for tempDir in job.openTempDirs: if os.path.exists(tempDir): shutil.rmtree(tempDir) job.openTempDirs = []
Remove temporarly created directories.
387,478
def drop_if_exists(self, table): blueprint = self._create_blueprint(table) blueprint.drop_if_exists() self._build(blueprint)
Drop a table from the schema. :param table: The table :type table: str
387,479
def get_events(fd, timeout=None): (rlist, _, _) = select([fd], [], [], timeout) if not rlist: return [] events = [] while True: buf = os.read(fd, _BUF_LEN) i = 0 while i < len(buf): (wd, mask, cookie, len_) = struct.unpack_from(_EVENT_FMT, buf, i) name = None if len_ > 0: start = i + _EVENT_SIZE end = start + len_ name = buf[start:end].rstrip(b).decode(ENCODING) events.append(InotifyEvent(wd, mask, cookie, name)) i += _EVENT_SIZE + len_ (rlist, _, _) = select([fd], [], [], 0) if not rlist: break return events
get_events(fd[, timeout]) Return a list of InotifyEvent instances representing events read from inotify. If timeout is None, this will block forever until at least one event can be read. Otherwise, timeout should be an integer or float specifying a timeout in seconds. If get_events times out waiting for events, an empty list will be returned. If timeout is zero, get_events will not block. This version of get_events() will only block the current greenlet.
387,480
def encode_dict(dynamizer, value): encoded_dict = {} for k, v in six.iteritems(value): encoded_type, encoded_value = dynamizer.raw_encode(v) encoded_dict[k] = { encoded_type: encoded_value, } return , encoded_dict
Encode a dict for the DynamoDB format
387,481
def _count_spaces_startswith(line): if line.split()[0].strip() == "": return None spaces = 0 for i in line: if i.isspace(): spaces += 1 else: return spaces
Count the number of spaces before the first character
387,482
def _get_mro(cls): if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__
Get an mro for a type or classic class
387,483
def plot(self, joints, ax, target=None, show=False): from . import plot_utils if ax is None: ax = plot_utils.init_3d_figure() plot_utils.plot_chain(self, joints, ax) plot_utils.plot_basis(ax, self._length) if target is not None: plot_utils.plot_target(target, ax) if show: plot_utils.show_figure()
Plots the Chain using Matplotlib Parameters ---------- joints: list The list of the positions of each joint ax: matplotlib.axes.Axes A matplotlib axes target: numpy.array An optional target show: bool Display the axe. Defaults to False
387,484
def img_from_vgg(x): x = x.transpose((1, 2, 0)) x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 x = x[:,:,::-1] return x
Decondition an image from the VGG16 model.
387,485
def _parse_byte_data(self, byte_data): chunks = unpack(, byte_data[:self.size]) det_id, run, time_slice, time_stamp, ticks = chunks self.det_id = det_id self.run = run self.time_slice = time_slice self.time_stamp = time_stamp self.ticks = ticks
Extract the values from byte string.
387,486
def is_contained_in(pe_pe, root): if not pe_pe: return False if type(pe_pe).__name__ != : pe_pe = one(pe_pe).PE_PE[8001]() ep_pkg = one(pe_pe).EP_PKG[8000]() c_c = one(pe_pe).C_C[8003]() if root in [ep_pkg, c_c]: return True elif is_contained_in(ep_pkg, root): return True elif is_contained_in(c_c, root): return True else: return False
Determine if a PE_PE is contained within a EP_PKG or a C_C.
387,487
def _get_signed_predecessors(im, node, polarity): signed_pred_list = [] for pred in im.predecessors(node): pred_edge = (pred, node) yield (pred, _get_edge_sign(im, pred_edge) * polarity)
Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node.
387,488
def zoneToRegion(zone): from toil.lib.context import Context return Context.availability_zone_re.match(zone).group(1)
Get a region (e.g. us-west-2) from a zone (e.g. us-west-1c).
387,489
def add_user_to_group(user_name, group_name, region=None, key=None, keyid=None, profile=None): user = get_user(user_name, region, key, keyid, profile) if not user: log.error(, user_name) return False if user_exists_in_group(user_name, group_name, region=region, key=key, keyid=keyid, profile=profile): return True conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: info = conn.add_user_to_group(group_name, user_name) if not info: return False return info except boto.exception.BotoServerError as e: log.debug(e) log.error(, user_name, group_name) return False
Add user to group. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.add_user_to_group myuser mygroup
387,490
def maxsize(self, size): if size < 0: raise ValueError() with self._lock: self._enforce_size_limit(size) self._maxsize = size
Resize the cache, evicting the oldest items if necessary.
387,491
def time_report(self, include_overhead=False, header=None, include_server=True, digits=4): try: self._timestamps.setdefault(, time.time()) header = header or [, , , , ] ret = [] if include_overhead: ret.append(self.time_report_item( , )) ret.append(self.time_report_item( , )) if in self._timestamps: ret.append(self.time_report_item( , )) ret.append(self.time_report_item(, )) if self._server_timing and include_server: send_start = ret[-1][] delta = max(0, ret[-1][] - ( self._server_timing[] - self._server_timing[])) if include_overhead: ret.append( {: "Internet overhead", "Start": send_start, "End": ret[0][] + delta, : delta, : 1}) if in self._server_timing: ret.append({: "Time profile overhead", "Start": send_start + delta, : self._server_timing[], : send_start + delta + self._server_timing[] - self._server_timing[], : sum( [len(msg.get(, [])) for msg in self._server_timing[ ]]) + 1}) for msg in self._server_timing[]: ret.append(msg.copy()) ret[-1][] = ret[-1].setdefault( , 0) + delta + send_start ret[-1][] = ret[-1].setdefault( , ret[-1][]) + delta + send_start else: ret += self._server_timing[] if in self._timestamps: ret.append( self.time_report_item(, )) if include_overhead: ret.append(self.time_report_item( , )) return % ( round(self.timedelta.total_seconds(), digits), reformat_date(self._timestamps.get(, )), str(ReprListDict(ret, col_names=header, digits=digits).list_of_list())) except Exception as ex: return "Exception creating time report with %s" % ex.message
Returns a str table of the times for this api call :param include_overhead: bool if True include information from overhead, such as the time for this class code :param header: bool if True includes the column header :param include_server: bool if True includes times reported by the server in the header :param digits: int of the number of significant digits :return: str table of the times for the api call
387,492
def get_byte(self, i): value = [] for x in range(2): c = next(i) if c.lower() in _HEX: value.append(c) else: raise SyntaxError( % (i.index - 1)) return .join(value)
Get byte.
387,493
def pi_zoom_origin(self, viewer, event, msg=True): origin = (event.data_x, event.data_y) return self._pinch_zoom_rotate(viewer, event.state, event.rot_deg, event.scale, msg=msg, origin=origin)
Like pi_zoom(), but pans the image as well to keep the coordinate under the cursor in that same position relative to the window.
387,494
def get_netloc(self): if self.proxy: scheme = self.proxytype host = self.proxyhost port = self.proxyport else: scheme = self.scheme host = self.host port = self.port return (scheme, host, port)
Determine scheme, host and port for this connection taking proxy data into account. @return: tuple (scheme, host, port) @rtype: tuple(string, string, int)
387,495
def deserialize(self, value, **kwargs): for validator in self.validators: validator.validate(value, **kwargs) return value
Deserialization of value. :return: Deserialized value. :raises: :class:`halogen.exception.ValidationError` exception if value is not valid.
387,496
def _reciprocal_condition_number(lu_mat, one_norm): r if _scipy_lapack is None: raise OSError("This function requires SciPy for calling into LAPACK.") rcond, info = _scipy_lapack.dgecon(lu_mat, one_norm) if info != 0: raise RuntimeError( "The reciprocal 1-norm condition number could not be computed." ) return rcond
r"""Compute reciprocal condition number of a matrix. Args: lu_mat (numpy.ndarray): A 2D array of a matrix :math:`A` that has been LU-factored, with the non-diagonal part of :math:`L` stored in the strictly lower triangle and :math:`U` stored in the upper triangle. one_norm (float): The 1-norm of the original matrix :math:`A`. Returns: float: The reciprocal condition number of :math:`A`. Raises: OSError: If SciPy is not installed. RuntimeError: If the reciprocal 1-norm condition number could not be computed.
387,497
def set_user_attribute(self, user_name, key, value): res = self._make_ocs_request( , self.OCS_SERVICE_CLOUD, + parse.quote(user_name), data={: self._encode_string(key), : self._encode_string(value)} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
Sets a user attribute :param user_name: name of user to modify :param key: key of the attribute to set :param value: value to set :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
387,498
def get_content(self, obj): serializer = ContentSerializer( instance=obj.contentitem_set.all(), many=True, context=self.context, ) return serializer.data
Obtain the QuerySet of content items. :param obj: Page object. :return: List of rendered content items.
387,499
def p_elseif_list(p): if len(p) == 2: p[0] = [] else: p[0] = p[1] + [ast.ElseIf(p[4], p[6], lineno=p.lineno(2))]
elseif_list : empty | elseif_list ELSEIF LPAREN expr RPAREN statement