Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
380,100
def get_labels(self, depth=None): if not isinstance(self.ref_cell, Cell): return [] if self.rotation is not None: ct = numpy.cos(self.rotation * numpy.pi / 180.0) st = numpy.sin(self.rotation * numpy.pi / 180.0) st = numpy.array([-st, st]) if self.magnification is not None: mag = numpy.array([self.magnification, self.magnification]) if self.origin is not None: orgn = numpy.array(self.origin) if self.x_reflection: xrefl = numpy.array([1, -1], dtype=) cell_labels = self.ref_cell.get_labels(depth=depth) labels = [] for ii in range(self.columns): for jj in range(self.rows): spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj]) for clbl in cell_labels: lbl = libCopy.deepcopy(clbl) if self.magnification: lbl.position = lbl.position * mag + spc else: lbl.position = lbl.position + spc if self.x_reflection: lbl.position = lbl.position * xrefl if self.rotation is not None: lbl.position = lbl.position * ct + lbl.position[::-1] * st if self.origin is not None: lbl.position = lbl.position + orgn labels.append(lbl) return labels
Returns a list of labels created by this reference. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references.
380,101
def show_compatibility_message(self, message): messageBox = QMessageBox(self) messageBox.setWindowModality(Qt.NonModal) messageBox.setAttribute(Qt.WA_DeleteOnClose) messageBox.setWindowTitle() messageBox.setText(message) messageBox.setStandardButtons(QMessageBox.Ok) messageBox.show()
Show compatibility message.
380,102
def pop_configuration(self): if len(self.__configurations) == 1: raise IndexError( ) self.__configurations.pop() self.__mapped_attr_cache.clear()
Pushes the currently active configuration from the stack of configurations managed by this mapping. :raises IndexError: If there is only one configuration in the stack.
380,103
def get_plaintext_document_body(fpath, keep_layout=False): textbody = [] mime_type = magic.from_file(fpath, mime=True) if mime_type == "text/plain": with open(fpath, "r") as f: textbody = [line.decode("utf-8") for line in f.readlines()] elif mime_type == "application/pdf": textbody = convert_PDF_to_plaintext(fpath, keep_layout) else: raise UnknownDocumentTypeError(mime_type) return textbody
Given a file-path to a full-text, return a list of unicode strings whereby each string is a line of the fulltext. In the case of a plain-text document, this simply means reading the contents in from the file. In the case of a PDF however, this means converting the document to plaintext. It raises UnknownDocumentTypeError if the document is not a PDF or plain text. @param fpath: (string) - the path to the fulltext file @return: (list) of strings - each string being a line in the document.
380,104
def unsubscribe_from_candles(self, pair, timeframe=None, **kwargs): valid_tfs = [, , , , , , , , , , , ] if timeframe: if timeframe not in valid_tfs: raise ValueError("timeframe must be any of %s" % valid_tfs) else: timeframe = identifier = (, pair, timeframe) pair = + pair if not pair.startswith() else pair key = + timeframe + + pair self._unsubscribe(, identifier, key=key, **kwargs)
Unsubscribe to the passed pair's OHLC data channel. :param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h, 1D, 7D, 14D, 1M} :param kwargs: :return:
380,105
def _invert(self): result = defaultdict(dict) for test_context, src_context in six.iteritems(self.data): for src, lines in six.iteritems(src_context): result[src][test_context] = lines return result
Invert coverage data from {test_context: {file: line}} to {file: {test_context: line}}
380,106
def decompress(databasepath, database_name, compression, compressed_file): if os.path.isfile(compressed_file): if compression == : logging.info(.format(dbname=database_name)) with tarfile.open(compressed_file, ) as tar: tar.extractall(path=databasepath) elif compression == : with gzip.open(compressed_file, ) as gz: file_name = os.path.basename(os.path.splitext(compressed_file)[0]) output_file = os.path.join(databasepath, database_name, file_name) logging.info(.format(file_name=file_name)) with open(output_file, ) as output: shutil.copyfileobj(gz, output) else: logging.info(.format(dbname=database_name)) with zipfile.ZipFile(compressed_file, ) as zip_file: zip_file.extractall(path=databasepath) os.remove(compressed_file)
Decompress the provided file using the appropriate library :param databasepath: Name and path of where the database files are to be downloaded :param database_name: Name of the database e.g. sipprverse :param compression: STR MOB-suite databases are .zip files, while OLC databases are .tar.gz :param compressed_file: Compressed file to process
380,107
def annual_heating_design_day_990(self): if bool(self._winter_des_day_dict) is True: return DesignDay.from_ashrae_dict_heating( self._winter_des_day_dict, self.location, True, self._stand_press_at_elev) else: return None
A design day object representing the annual 99.0% heating design day.
380,108
def delete_namespaced_limit_range(self, name, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.delete_namespaced_limit_range_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_limit_range_with_http_info(name, namespace, **kwargs) return data
delete a LimitRange This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_limit_range(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the LimitRange (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread.
380,109
def get_mockup_motor(self, motor): return next((m for m in self.robot.motors if m.name == motor.name), None)
Gets the equivalent :class:`~pypot.primitive.primitive.MockupMotor`.
380,110
def run_container(image, name=None, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, client_timeout=salt.utils.docker.CLIENT_TIMEOUT, bg=False, replace=False, force=False, networks=None, **kwargs): s logs when complete. .. note:: Not to be confused with :py:func:`docker.run <salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run <salt.modules.cmdmod.run>`-like interface for executing commands in a running container. This function accepts the same arguments as :py:func:`docker.create <salt.modules.dockermod.create>`, with the exception of ``start``. In addition, it accepts the arguments from :py:func:`docker.logs <salt.modules.dockermod.logs>`, with the exception of ``follow``, to control how logs are returned. Finally, the ``bg`` argument described below can be used to optionally run the container in the background (the default behavior is to block until the container exits). bg : False If ``True``, this function will not wait for the container to exit and will not return its logs. It will however return the containerperl /scripts/sync.pyperl /scripts/sync.py{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}} if kwargs.pop(, True) and not resolve_image_id(image): pull(image, client_timeout=client_timeout) removed_ids = None if name is not None: try: pre_state = __salt__[](name) except CommandExecutionError: pass else: if pre_state == and not (replace and force): raise CommandExecutionError( {0}\ .format(name) ) elif not replace: raise CommandExecutionError( {0}\ .format(name) ) else: try: for line in _client_wrapper(, ret[], stream=True, timestamps=False): output.append(salt.utils.stringutils.to_unicode(line)) except CommandExecutionError: msg = ( ) _append_warning(ret, msg) ret[] = time.time() - time_started _clear_context() if not bg: ret[] = .join(output) if not auto_remove: try: cinfo = inspect_container(ret[]) except CommandExecutionError: _append_warning( ret, ) else: cstate = cinfo.get(, {}) cstatus = cstate.get() if cstatus != : _append_warning( ret, exited\) ret[] = cstate.get() except CommandExecutionError as exc: try: exc_info.update(exc.info) except (TypeError, ValueError): raise CommandExecutionError(exc.__str__(), info=exc_info) return ret
.. versionadded:: 2018.3.0 Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits for it to exit, and returns the container's logs when complete. .. note:: Not to be confused with :py:func:`docker.run <salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run <salt.modules.cmdmod.run>`-like interface for executing commands in a running container. This function accepts the same arguments as :py:func:`docker.create <salt.modules.dockermod.create>`, with the exception of ``start``. In addition, it accepts the arguments from :py:func:`docker.logs <salt.modules.dockermod.logs>`, with the exception of ``follow``, to control how logs are returned. Finally, the ``bg`` argument described below can be used to optionally run the container in the background (the default behavior is to block until the container exits). bg : False If ``True``, this function will not wait for the container to exit and will not return its logs. It will however return the container's name and ID, allowing for :py:func:`docker.logs <salt.modules.dockermod.logs>` to be used to view the logs. .. note:: The logs will be inaccessible once the container exits if ``auto_remove`` is set to ``True``, so keep this in mind. replace : False If ``True``, and if the named container already exists, this will remove the existing container. The default behavior is to return a ``False`` result when the container already exists. force : False If ``True``, and the named container already exists, *and* ``replace`` is also set to ``True``, then the container will be forcibly removed. Otherwise, the state will not proceed and will return a ``False`` result. networks Networks to which the container should be connected. If automatic IP configuration is being used, the networks can be a simple list of network names. If custom IP configuration is being used, then this argument must be passed as a dictionary. CLI Examples: .. code-block:: bash salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh # Run container in the background salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True # Connecting to two networks using automatic IP configuration salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2 # net1 using automatic IP, net2 using static IPv4 address salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}'
380,111
def _dict_subset(keys, master_dict): return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys])
Return a dictionary of only the subset of keys/values specified in keys
380,112
def commit_transaction(self): self._check_ended() retry = False state = self._transaction.state if state is _TxnState.NONE: raise InvalidOperation("No transaction started") elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): self._transaction.state = _TxnState.COMMITTED_EMPTY return elif state is _TxnState.ABORTED: raise InvalidOperation( "Cannot call commitTransaction after calling abortTransaction") elif state is _TxnState.COMMITTED: self._transaction.state = _TxnState.IN_PROGRESS retry = True try: self._finish_transaction_with_retry("commitTransaction", retry) except ConnectionFailure as exc: exc._remove_error_label("TransientTransactionError") _reraise_with_unknown_commit(exc) except WTimeoutError as exc: _reraise_with_unknown_commit(exc) except OperationFailure as exc: if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: raise _reraise_with_unknown_commit(exc) finally: self._transaction.state = _TxnState.COMMITTED
Commit a multi-statement transaction. .. versionadded:: 3.7
380,113
def next(self): x, y = next(self.scan) xr = -x if self.rx else x yr = -y if self.ry else y return xr, yr
Next point in iteration
380,114
def hostinterface_update(interfaceid, **kwargs): s docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see modules docstring) :return: ID of the updated host interface, False on failure. CLI Example: .. code-block:: bash salt zabbix.hostinterface_update 6 ip_=0.0.0.2 hostinterface.updateurlauthresultinterfaceids'] else: raise KeyError except KeyError: return ret
.. versionadded:: 2016.3.0 Update host interface .. note:: This function accepts all standard hostinterface: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/hostinterface/object#host_interface :param interfaceid: ID of the hostinterface to update :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: ID of the updated host interface, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.hostinterface_update 6 ip_=0.0.0.2
380,115
def masks(list_of_index_lists, n): for il,l in enumerate(list_of_index_lists): mask = np.zeros(n,dtype=bool) mask[l] = True list_of_index_lists[il] = mask masks = np.array(list_of_index_lists) return masks
Make an array in which rows store 1d mask arrays from list of index lists. Parameters ---------- n : int Maximal index / number of samples.
380,116
def list_open_buffers(self): active_eb = self.active_editor_buffer visible_ebs = self.active_tab.visible_editor_buffers() def make_info(i, eb): return OpenBufferInfo( index=i, editor_buffer=eb, is_active=(eb == active_eb), is_visible=(eb in visible_ebs)) return [make_info(i, eb) for i, eb in enumerate(self.editor_buffers)]
Return a `OpenBufferInfo` list that gives information about the open buffers.
380,117
async def create_new_pump_async(self, partition_id, lease): loop = asyncio.get_event_loop() partition_pump = EventHubPartitionPump(self.host, lease) loop.create_task(partition_pump.open_async()) self.partition_pumps[partition_id] = partition_pump _logger.info("Created new partition pump %r %r", self.host.guid, partition_id)
Create a new pump thread with a given lease. :param partition_id: The partition ID. :type partition_id: str :param lease: The lease to be used. :type lease: ~azure.eventprocessorhost.lease.Lease
380,118
def main(): parser = argparse.ArgumentParser(description=) parser.add_argument(, type=str, metavar=, help=, nargs=) parser.add_argument(, type=str, help=) args = parser.parse_args() output_path = os.path.abspath(args.output_filename) if args.output_filename else None skips = [] tpip_pkgs = [] for pkg_name, pkg_item in sorted(pkg_resources.working_set.by_key.items()): if args.only and args.only not in pkg_name.lower(): continue if pkg_name in EXCLUDED_PACKAGES: skips.append(pkg_name) continue metadata_lines = get_metadata(pkg_item) tpip_pkg = process_metadata(pkg_name, metadata_lines) tpip_pkgs.append(force_ascii_values(tpip_pkg)) print(json.dumps(tpip_pkgs, indent=2, sort_keys=True)) print( % ( len(tpip_pkgs), output_path, .join(skips), )) output_path and write_csv_file(output_path, tpip_pkgs)
Generate a TPIP report.
380,119
def _get_user_class(self, name): self._user_classes.setdefault(name, _make_user_class(self, name)) return self._user_classes[name]
Get or create a user class of the given type.
380,120
def get_coordination_symmetry_measures_optim(self, only_minimum=True, all_csms=True, nb_set=None, optimization=None): cn = len(self.local_geometry.coords) test_geometries = self.allcg.get_implemented_geometries(cn) if all([cg.algorithms[0].algorithm_type == EXPLICIT_PERMUTATIONS for cg in test_geometries]): return self.get_coordination_symmetry_measures(only_minimum=only_minimum, all_csms=all_csms, optimization=optimization) if not all([all([algo.algorithm_type == SEPARATION_PLANE for algo in cg.algorithms]) for cg in test_geometries]): raise ValueError() result_dict = {} for geometry in test_geometries: self.perfect_geometry = AbstractGeometry.from_cg(cg=geometry, centering_type=self.centering_type, include_central_site_in_centroid= self.include_central_site_in_centroid) points_perfect = self.perfect_geometry.points_wcs_ctwcc() cgsm = self.coordination_geometry_symmetry_measures_sepplane_optim(geometry, points_perfect=points_perfect, nb_set=nb_set, optimization=optimization) result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm if only_minimum: if len(result) > 0: imin = np.argmin([rr[] for rr in result]) if geometry.algorithms is not None: algo = algos[imin] else: algo = algos result_dict[geometry.mp_symbol] = {: result[imin][], : permutations[ imin], : algo, : local2perfect_maps[ imin], : perfect2local_maps[ imin], : 1.0 / result[imin][], : np.linalg.inv(result[imin][]), : result[imin][]} if all_csms: self._update_results_all_csms(result_dict, permutations, imin, geometry) return result_dict
Returns the continuous symmetry measures of the current local geometry in a dictionary. :return: the continuous symmetry measures of the current local geometry in a dictionary.
380,121
def check_exports(mod, specs, renamings): functions = {renamings.get(k, k): v for k, v in specs.functions.items()} mod_functions = {node.name: node for node in mod.body if isinstance(node, ast.FunctionDef)} for fname, signatures in functions.items(): try: fnode = mod_functions[fname] except KeyError: raise PythranSyntaxError( "Invalid spec: exporting undefined function `{}`" .format(fname)) for signature in signatures: args_count = len(fnode.args.args) if len(signature) > args_count: raise PythranSyntaxError( "Too many arguments when exporting `{}`" .format(fname)) elif len(signature) < args_count - len(fnode.args.defaults): raise PythranSyntaxError( "Not enough arguments when exporting `{}`" .format(fname))
Does nothing but raising PythranSyntaxError if specs references an undefined global
380,122
def seek(self, rev): if not self: return if type(rev) is not int: raise TypeError("rev must be int") past = self._past future = self._future if future: appender = past.append popper = future.pop future_start = future[-1][0] while future_start <= rev: appender(popper()) if future: future_start = future[-1][0] else: break if past: popper = past.pop appender = future.append past_end = past[-1][0] while past_end > rev: appender(popper()) if past: past_end = past[-1][0] else: break
Arrange the caches to help look up the given revision.
380,123
def deleteFeatures(self, objectIds="", where="", geometryFilter=None, gdbVersion=None, rollbackOnFailure=True ): dURL = self._url + "/deleteFeatures" params = { "f": "json", : rollbackOnFailure } if gdbVersion is not None: params[] = gdbVersion if geometryFilter is not None and \ isinstance(geometryFilter, filters.GeometryFilter): gfilter = geometryFilter.filter params[] = gfilter[] params[] = gfilter[] params[] = gfilter[] params[] = gfilter[] if where is not None and \ where != "": params[] = where if objectIds is not None and \ objectIds != "": params[] = objectIds result = self._post(url=dURL, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) self.__init() return result
removes 1:n features based on a sql statement Input: objectIds - The object IDs of this layer/table to be deleted where - A where clause for the query filter. Any legal SQL where clause operating on the fields in the layer is allowed. Features conforming to the specified where clause will be deleted. geometryFilter - a filters.GeometryFilter object to limit deletion by a geometry. gdbVersion - Geodatabase version to apply the edits. This parameter applies only if the isDataVersioned property of the layer is true rollbackOnFailure - parameter to specify if the edits should be applied only if all submitted edits succeed. If false, the server will apply the edits that succeed even if some of the submitted edits fail. If true, the server will apply the edits only if all edits succeed. The default value is true. Output: JSON response as dictionary
380,124
def add_user( self, user, first_name=None, last_name=None, email=None, password=None ): self.project_service.set_auth(self._token_project) self.project_service.add_user( user, first_name, last_name, email, password)
Add a new user. Args: user (string): User name. first_name (optional[string]): User's first name. Defaults to None. last_name (optional[string]): User's last name. Defaults to None. email: (optional[string]): User's email address. Defaults to None. password: (optional[string]): User's password. Defaults to None. Raises: requests.HTTPError on failure.
380,125
def get_coordinate_systems( self, token: dict = None, srs_code: str = None, prot: str = "https" ) -> dict: if isinstance(srs_code, str): specific_srs = "/{}".format(srs_code) else: specific_srs = "" req_url = "{}://v1.{}.isogeo.com/coordinate-systems{}".format( prot, self.api_url, specific_srs ) req = self.get( req_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) checker.check_api_response(req) return req.json()
Get available coordinate systems in Isogeo API. :param str token: API auth token :param str srs_code: code of a specific coordinate system :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs).
380,126
def _autoinsert_quotes(self, key): char = {Qt.Key_QuoteDbl: , Qt.Key_Apostrophe: soleolsolcursorsolcursorsolcursorcursoreol,:;)]}')): self.editor.insert_text(char) elif (unmatched_quotes_in_line(line_text) and (not last_three == 3*char)): self.editor.insert_text(char) elif self.editor.next_char() == char: cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor, 1) cursor.clearSelection() self.editor.setTextCursor(cursor) elif last_three == 3*char: self.editor.insert_text(3*char) cursor = self.editor.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor, 3) cursor.clearSelection() self.editor.setTextCursor(cursor) elif last_two == 2*char: self.editor.insert_text(char) self.editor.delayed_popup_docstring() else: self.editor.insert_text(2*char) cursor = self.editor.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter) self.editor.setTextCursor(cursor)
Control how to automatically insert quotes in various situations.
380,127
def run_model(self, op_list, num_steps, feed_vars=(), feed_data=None, print_every=100, allow_initialize=True): feed_data = feed_data or itertools.repeat(()) ops = [bookkeeper.global_step()] ops.extend(op_list) sess = tf.get_default_session() self.prepare_model(sess, allow_initialize=allow_initialize) results = [] try: if num_steps is None: counter = itertools.count(0) elif num_steps >= 0: counter = xrange(num_steps) else: raise ValueError( % num_steps) for i, data in zip(counter, feed_data): log_this_time = print_every and i % print_every == 0 if len(data) != len(feed_vars): raise ValueError( % ( len(data), len(feed_vars))) if self._coord.should_stop(): print() sys.stdout.flush() self.stop_queues() break if len(feed_vars) != len(data): raise ValueError() if log_this_time and self._summary_writer: results = sess.run(ops + [self._summaries], dict(zip(feed_vars, data))) self._summary_writer.add_summary(results[-1], results[0]) results = results[:-1] else: results = sess.run(ops, dict(zip(feed_vars, data))) if log_this_time: self._log_and_save(sess, results) sys.stdout.flush() self.stop_queues() raise return results
Runs `op_list` for `num_steps`. Args: op_list: A list of ops to run. num_steps: Number of steps to run this for. If feeds are used, this is a maximum. `None` can be used to signal "forever". feed_vars: The variables to feed. feed_data: An iterator that feeds data tuples. print_every: Print a log line and checkpoing every so many steps. allow_initialize: If True, the model will be initialized if any variable is uninitialized, if False the model will not be initialized. Returns: The final run result as a list. Raises: ValueError: If feed_data doesn't match feed_vars.
380,128
def set_cmap(self, cmap, callback=True): self.cmap = cmap with self.suppress_changed: self.calc_cmap() self.t_.set(color_map=cmap.name, callback=False)
Set the color map used by this RGBMapper. `cmap` specifies a ColorMap object. If `callback` is True, then any callbacks associated with this change will be invoked.
380,129
def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float: if not text or not isinstance(text, str): return 0 if not ignore_chars: ignore_chars = "" num_thai = 0 num_ignore = 0 for ch in text: if ch in ignore_chars: num_ignore += 1 elif isthaichar(ch): num_thai += 1 num_count = len(text) - num_ignore return (num_thai / num_count) * 100
:param str text: input text :return: float, proportion of characters in the text that is Thai character
380,130
def string2identifier(s): if len(s) == 0: return "_" if s[0] not in string.ascii_letters: s = "_" + s valids = string.ascii_letters + string.digits + "_" out = "" for i, char in enumerate(s): if char in valids: out += char else: out += "_" return out
Turn a string into a valid python identifier. Currently only allows ASCII letters and underscore. Illegal characters are replaced with underscore. This is slightly more opinionated than python 3 itself, and may be refactored in future (see PEP 3131). Parameters ---------- s : string string to convert Returns ------- str valid python identifier.
380,131
def quantile(data, num_breaks): def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): def _quantiles1D(data,m,p): x = numpy.sort(data.compressed()) n = len(x) if n == 0: return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True) elif n == 1: return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask) aleph = (n*p + m) k = numpy.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] data = numpy.ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = numpy.ma.masked p = numpy.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) if (axis is None): return _quantiles1D(data, m, p) return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p) return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks))
Calculate quantile breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform.
380,132
def visit_AugAssign(self, node): args = (self.naming[get_variable(node.target).id], self.visit(node.value)) merge_dep = list({frozenset.union(*x) for x in itertools.product(*args)}) self.naming[get_variable(node.target).id] = merge_dep
AugAssigned value depend on r-value type dependencies. It is valid for subscript, `a[i] += foo()` means `a` type depend on `foo` return type and previous a types too.
380,133
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_vlan_disc_req(self, **kwargs): config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop() fcoe_intf_rx_vlan_disc_req = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-vlan-disc-req") fcoe_intf_rx_vlan_disc_req.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
380,134
def readFLOAT16(self): self.reset_bits_pending() word = self.readUI16() sign = -1 if ((word & 0x8000) != 0) else 1 exponent = (word >> 10) & 0x1f significand = word & 0x3ff if exponent == 0: if significand == 0: return 0.0 else: return sign * math.pow(2, 1 - SWFStream.FLOAT16_EXPONENT_BASE) * (significand / 1024.0) if exponent == 31: if significand == 0: return float() if sign < 0 else float() else: return float() return sign * math.pow(2, exponent - SWFStream.FLOAT16_EXPONENT_BASE) * (1 + significand / 1024.0)
Read a 2 byte float
380,135
def _configure_io_handler(self, handler): if self.check_events(): return if handler in self._unprepared_handlers: old_fileno = self._unprepared_handlers[handler] prepared = self._prepare_io_handler(handler) else: old_fileno = None prepared = True fileno = handler.fileno() if old_fileno is not None and fileno != old_fileno: del self._handlers[old_fileno] try: self.poll.unregister(old_fileno) except KeyError: pass if not prepared: self._unprepared_handlers[handler] = fileno if not fileno: return self._handlers[fileno] = handler events = 0 if handler.is_readable(): logger.debug(" {0!r} readable".format(handler)) events |= select.POLLIN if handler.is_writable(): logger.debug(" {0!r} writable".format(handler)) events |= select.POLLOUT if events: logger.debug(" registering {0!r} handler fileno {1} for" " events {2}".format(handler, fileno, events)) self.poll.register(fileno, events)
Register an io-handler at the polling object.
380,136
def search(self, pattern, minAddr = None, maxAddr = None): if isinstance(pattern, str): return self.search_bytes(pattern, minAddr, maxAddr) if isinstance(pattern, compat.unicode): return self.search_bytes(pattern.encode("utf-16le"), minAddr, maxAddr) if isinstance(pattern, Pattern): return Search.search_process(self, pattern, minAddr, maxAddr) raise TypeError("Unknown pattern type: %r" % type(pattern))
Search for the given pattern within the process memory. @type pattern: str, compat.unicode or L{Pattern} @param pattern: Pattern to search for. It may be a byte string, a Unicode string, or an instance of L{Pattern}. The following L{Pattern} subclasses are provided by WinAppDbg: - L{BytePattern} - L{TextPattern} - L{RegExpPattern} - L{HexPattern} You can also write your own subclass of L{Pattern} for customized searches. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @rtype: iterator of tuple( int, int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The size of the data that matches the pattern. - The data that matches the pattern. @raise WindowsError: An error occurred when querying or reading the process memory.
380,137
def url2domain(url): parsed_uri = urlparse.urlparse(url) domain = .format(uri=parsed_uri) domain = re.sub("^.+@", "", domain) domain = re.sub(":.+$", "", domain) return domain
extract domain from url
380,138
def _detect_xerial_stream(payload): if len(payload) > 16: header = struct.unpack( + _XERIAL_V1_FORMAT, bytes(payload)[:16]) return header == _XERIAL_V1_HEADER return False
Detects if the data given might have been encoded with the blocking mode of the xerial snappy library. This mode writes a magic header of the format: +--------+--------------+------------+---------+--------+ | Marker | Magic String | Null / Pad | Version | Compat | +--------+--------------+------------+---------+--------+ | byte | c-string | byte | int32 | int32 | +--------+--------------+------------+---------+--------+ | -126 | 'SNAPPY' | \0 | | | +--------+--------------+------------+---------+--------+ The pad appears to be to ensure that SNAPPY is a valid cstring The version is the version of this format as written by xerial, in the wild this is currently 1 as such we only support v1. Compat is there to claim the miniumum supported version that can read a xerial block stream, presently in the wild this is 1.
380,139
def queries(self, request): queries = self.get_queries(request) worlds = [] with self.mapper.begin() as session: for _ in range(queries): world = session.query(World).get(randint(1, MAXINT)) worlds.append(self.get_json(world)) return Json(worlds).http_response(request)
Multiple Database Queries
380,140
def read(self, auth, resource, options, defer=False): return self._call(, auth, [resource, options], defer)
Read value(s) from a dataport. Calls a function that builds a request to read the dataport specified by an alias or rid and returns timeseries data as defined by the options. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. options: Takes a list of options for what to return.
380,141
def get_pic(self, playingsong, tempfile_path): url = playingsong[].replace(, ) for _ in range(3): try: urllib.urlretrieve(url, tempfile_path) logger.debug() return True except (IOError, urllib.ContentTooShortError): pass logger.error() return False
获取专辑封面
380,142
def get_balance(self): xml_root = self.__init_xml() response = clockwork_http.request(BALANCE_URL, etree.tostring(xml_root, encoding=)) data_etree = etree.fromstring(response[]) err_desc = data_etree.find() if err_desc is not None: raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find().text) result = {} result[] = data_etree.find().text result[] = data_etree.find().text result[] = data_etree.find().text return result
Check the balance fot this account. Returns a dictionary containing: account_type: The account type balance: The balance remaining on the account currency: The currency used for the account balance. Assume GBP in not set
380,143
def _get_padded(data, start, end): if start < 0 and end > data.shape[0]: raise RuntimeError() if start < 0: start_zeros = np.zeros((-start, data.shape[1]), dtype=data.dtype) return np.vstack((start_zeros, data[:end])) elif end > data.shape[0]: end_zeros = np.zeros((end - data.shape[0], data.shape[1]), dtype=data.dtype) return np.vstack((data[start:], end_zeros)) else: return data[start:end]
Return `data[start:end]` filling in with zeros outside array bounds Assumes that either `start<0` or `end>len(data)` but not both.
380,144
def _resolve_paths(self, *paths): result = set() for path in paths: if os.path.isdir(path): for dirpath, _, filenames in os.walk(path): for filename in filenames: path = os.path.join(dirpath, filename) if path.startswith(): path = path[1:].lstrip() if not self._should_ignore(path): result.add(path) else: result.add(path) return result
Resolve paths into a set of filenames (no directories) to check. External tools will handle directories as arguments differently, so for consistency we just want to pass them filenames. This method will recursively walk all directories and filter out any paths that match self.options.ignores.
380,145
def init(cls, path=None, mkdir=True, odbt=GitCmdObjectDB, expand_vars=True, **kwargs): if path: path = expand_path(path, expand_vars) if mkdir and path and not osp.exists(path): os.makedirs(path, 0o755) git = Git(path) git.init(**kwargs) return cls(path, odbt=odbt)
Initialize a git repository at the given path if specified :param path: is the full path to the repo (traditionally ends with /<name>.git) or None in which case the repository will be created in the current working directory :param mkdir: if specified will create the repository directory if it doesn't already exists. Creates the directory with a mode=0755. Only effective if a path is explicitly given :param odbt: Object DataBase type - a type which is constructed by providing the directory containing the database objects, i.e. .git/objects. It will be used to access all object data :param expand_vars: if specified, environment variables will not be escaped. This can lead to information disclosure, allowing attackers to access the contents of environment variables :param kwargs: keyword arguments serving as additional options to the git-init command :return: ``git.Repo`` (the newly created repo)
380,146
def exists(config): exists = ( pathlib.Path(config.cache_path).exists() and pathlib.Path(config.cache_path).is_dir() ) if not exists: return False index_path = pathlib.Path(config.cache_path) / "index.json" if index_path.exists(): with open(index_path, "r") as out: index = json.load(out) if index["version"] != __version__: logger.warning( "Wily cache is old, you may incur errors until you rebuild the cache." ) else: logger.warning( "Wily cache was not versioned, you may incur errors until you rebuild the cache." ) create_index(config) return True
Check whether the .wily/ directory exists. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :return: Whether the .wily directory exists :rtype: ``boolean``
380,147
def DynamicCmd(name, plugins): exec( % name) plugin_objects = [] for plugin in plugins: classprefix = plugin[] plugin_list = plugin[] plugin_objects = plugin_objects + \ load_plugins(classprefix, plugin_list) exec_command = make_cmd_class(name, *plugin_objects)() return (exec_command, plugin_objects)
Returns a cmd with the added plugins, :param name: TODO: :param plugins: list of plugins
380,148
def get_hubs(self): output = helm( , ) if output.returncode != 0: print("Something went wrong!") print(output.stderr) else: hubs = output.stdout.split() return hubs
Get a list of hubs names. Returns ------- hubs : list List of hub names
380,149
def insert_file(self, file): if type(file) is bytes: file = open(file, ) self.insert_string(file.read())
insert_file(file) Load resources entries from FILE, and insert them into the database. FILE can be a filename (a string)or a file object.
380,150
def set_dhw_on(self, until=None): if until is None: data = {"Mode": "PermanentOverride", "State": "On", "UntilTime": None} else: data = {"Mode": "TemporaryOverride", "State": "On", "UntilTime": until.strftime()} self._set_dhw(data)
Sets the DHW on until a given time, or permanently.
380,151
async def _upload_chunks( cls, rfile: BootResourceFile, content: io.IOBase, chunk_size: int, progress_callback=None): content.seek(0, io.SEEK_SET) upload_uri = urlparse( cls._handler.uri)._replace(path=rfile._data[]).geturl() uploaded_size = 0 insecure = cls._handler.session.insecure connector = aiohttp.TCPConnector(verify_ssl=(not insecure)) session = aiohttp.ClientSession(connector=connector) async with session: while True: buf = content.read(chunk_size) length = len(buf) if length > 0: uploaded_size += length await cls._put_chunk(session, upload_uri, buf) if progress_callback is not None: progress_callback(uploaded_size / rfile.size) if length != chunk_size: break
Upload the `content` to `rfile` in chunks using `chunk_size`.
380,152
def predicate_type(self, pred: URIRef) -> URIRef: return self._o.value(pred, RDFS.range)
Return the type of pred :param pred: predicate to map :return:
380,153
def plot_poles(map_axis, plon, plat, A95, label=, color=, edgecolor=, marker=, markersize=20, legend=): map_axis.scatter(plon, plat, marker=marker, color=color, edgecolors=edgecolor, s=markersize, label=label, zorder=101, transform=ccrs.Geodetic()) if isinstance(color,str)==True: for n in range(0,len(A95)): A95_km = A95[n] * 111.32 equi(map_axis, plon[n], plat[n], A95_km, color) else: for n in range(0,len(A95)): A95_km = A95[n] * 111.32 equi(map_axis, plon[n], plat[n], A95_km, color[n]) if legend == : plt.legend(loc=2)
This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis. Before this function is called, a plot needs to be initialized with code such as that in the make_orthographic_map function. Examples ------- >>> plons = [200, 180, 210] >>> plats = [60, 40, 35] >>> A95 = [6, 3, 10] >>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30) >>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40) >>> plons = [200, 180, 210] >>> plats = [60, 40, 35] >>> A95 = [6, 3, 10] >>> colors = ['red','green','blue'] >>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30) >>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40) Required Parameters ----------- map_axis : the name of the current map axis that has been developed using cartopy plon : the longitude of the paleomagnetic pole being plotted (in degrees E) plat : the latitude of the paleomagnetic pole being plotted (in degrees) A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees) Optional Parameters (defaults are used if not specified) ----------- color : the default color is black. Other colors can be chosen (e.g. 'r') a list of colors can also be given so that each pole has a distinct color edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r') marker : the default is a circle. Other symbols can be chosen (e.g. 's') markersize : the default is 20. Other size can be chosen label : the default is no label. Labels can be assigned. legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
380,154
def _get(self, url, params={}): req = self._session.get(self._api_prefix + url, params=params) return self._action(req)
Wrapper around request.get() to use the API prefix. Returns a JSON response.
380,155
def _get_argv(index, default=None): return _sys.argv[index] if len(_sys.argv) > index else default
get the argv input argument defined by index. Return the default attribute if that argument does not exist
380,156
def load_plume_package(package, plume_dir, accept_defaults): from canari.commands.load_plume_package import load_plume_package load_plume_package(package, plume_dir, accept_defaults)
Loads a canari package into Plume.
380,157
def is_attacked_by(self, color: Color, square: Square) -> bool: return bool(self.attackers_mask(color, square))
Checks if the given side attacks the given square. Pinned pieces still count as attackers. Pawns that can be captured en passant are **not** considered attacked.
380,158
def svm_train(arg1, arg2=None, arg3=None): prob, param = None, None if isinstance(arg1, (list, tuple)) or (scipy and isinstance(arg1, scipy.ndarray)): assert isinstance(arg2, (list, tuple)) or (scipy and isinstance(arg2, (scipy.ndarray, sparse.spmatrix))) y, x, options = arg1, arg2, arg3 param = svm_parameter(options) prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED)) elif isinstance(arg1, svm_problem): prob = arg1 if isinstance(arg2, svm_parameter): param = arg2 else: param = svm_parameter(arg2) if prob == None or param == None: raise TypeError("Wrong types for the arguments") if param.kernel_type == PRECOMPUTED: for i in range(prob.l): xi = prob.x[i] idx, val = xi[0].index, xi[0].value if idx != 0: raise ValueError() if val <= 0 or val > prob.n: raise ValueError() if param.gamma == 0 and prob.n > 0: param.gamma = 1.0 / prob.n libsvm.svm_set_print_string_function(param.print_func) err_msg = libsvm.svm_check_parameter(prob, param) if err_msg: raise ValueError( % err_msg) if param.cross_validation: l, nr_fold = prob.l, param.nr_fold target = (c_double * l)() libsvm.svm_cross_validation(prob, param, nr_fold, target) ACC, MSE, SCC = evaluations(prob.y[:l], target[:l]) if param.svm_type in [EPSILON_SVR, NU_SVR]: print("Cross Validation Mean squared error = %g" % MSE) print("Cross Validation Squared correlation coefficient = %g" % SCC) return MSE else: print("Cross Validation Accuracy = %g%%" % ACC) return ACC else: m = libsvm.svm_train(prob, param) m = toPyModel(m) m.x_space = prob.x_space return m
svm_train(y, x [, options]) -> model | ACC | MSE y: a list/tuple/ndarray of l true labels (type must be int/double). x: 1. a list/tuple of l training instances. Feature vector of each training instance is a list/tuple or dictionary. 2. an l * n numpy ndarray or scipy spmatrix (n: number of features). svm_train(prob [, options]) -> model | ACC | MSE svm_train(prob, param) -> model | ACC| MSE Train an SVM model from data (y, x) or an svm_problem prob using 'options' or an svm_parameter param. If '-v' is specified in 'options' (i.e., cross validation) either accuracy (ACC) or mean-squared error (MSE) is returned. options: -s svm_type : set type of SVM (default 0) 0 -- C-SVC (multi-class classification) 1 -- nu-SVC (multi-class classification) 2 -- one-class SVM 3 -- epsilon-SVR (regression) 4 -- nu-SVR (regression) -t kernel_type : set type of kernel function (default 2) 0 -- linear: u'*v 1 -- polynomial: (gamma*u'*v + coef0)^degree 2 -- radial basis function: exp(-gamma*|u-v|^2) 3 -- sigmoid: tanh(gamma*u'*v + coef0) 4 -- precomputed kernel (kernel values in training_set_file) -d degree : set degree in kernel function (default 3) -g gamma : set gamma in kernel function (default 1/num_features) -r coef0 : set coef0 in kernel function (default 0) -c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1) -n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5) -p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1) -m cachesize : set cache memory size in MB (default 100) -e epsilon : set tolerance of termination criterion (default 0.001) -h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1) -b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0) -wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1) -v n: n-fold cross validation mode -q : quiet mode (no outputs)
380,159
def places_photo(client, photo_reference, max_width=None, max_height=None): if not (max_width or max_height): raise ValueError("a max_width or max_height arg is required") params = {"photoreference": photo_reference} if max_width: params["maxwidth"] = max_width if max_height: params["maxheight"] = max_height response = client._request("/maps/api/place/photo", params, extract_body=lambda response: response, requests_kwargs={"stream": True}) return response.iter_content()
Downloads a photo from the Places API. :param photo_reference: A string identifier that uniquely identifies a photo, as provided by either a Places search or Places detail request. :type photo_reference: string :param max_width: Specifies the maximum desired width, in pixels. :type max_width: int :param max_height: Specifies the maximum desired height, in pixels. :type max_height: int :rtype: iterator containing the raw image data, which typically can be used to save an image file locally. For example: ``` f = open(local_filename, 'wb') for chunk in client.places_photo(photo_reference, max_width=100): if chunk: f.write(chunk) f.close() ```
380,160
def fetch(**kwargs): pre = post = run_args = {} if float(__grains__[]) >= 10.2: post += else: pre += run_args[] = True return _wrapper(, pre=pre, post=post, run_args=run_args, **kwargs)
.. versionadded:: 2016.3.4 freebsd-update fetch wrapper. Based on the currently installed world and the configuration options set, fetch all available binary updates. kwargs: Parameters of freebsd-update command.
380,161
def quad_info(name, quad, pretty): cl = clientv1() mosaic, = cl.get_mosaic_by_name(name).items_iter(1) echo_json_response(call_and_wrap(cl.get_quad_by_id, mosaic, quad), pretty)
Get information for a specific mosaic quad
380,162
def from_vertices_and_edges(vertices, edges, vertex_name_key=, vertex_id_key=, edge_foreign_keys=(, ), directed=True): vertex_data = _dicts_to_columns(vertices) edge_data = _dicts_to_columns(edges) n = len(vertices) vertex_index = dict(zip(vertex_data[vertex_id_key], range(n))) edge_list = list(map(lambda source, target: (vertex_index[source], vertex_index[target]), edge_data[edge_foreign_keys[0]], edge_data[edge_foreign_keys[1]])) g = IGraph(n=n, edges=edge_list, directed=directed, vertex_attrs=vertex_data, edge_attrs=edge_data) g.vs[] = g.vs[vertex_name_key] g.vs[] = g.degree(mode="in") g.vs[] = g.degree(mode="out") g.vs[] = g.vs[vertex_name_key] if not in g.vs.attributes(): g.vs[] = labels_to_groups(g.vs[]) return g
This representation assumes that vertices and edges are encoded in two lists, each list containing a Python dict for each vertex and each edge, respectively. A distinguished element of the vertex dicts contain a vertex ID which is used in the edge dicts to refer to source and target vertices. All the remaining elements of the dicts are considered vertex and edge attributes. @param vertices: a list of dicts for the vertices. @param edges: a list of dicts for the edges. @param vertex_name_key: the name of the distinguished key in the dicts in the vertex data source that contains the vertex names. Will also be used as vertex label. @param vertex_id_key: the name of the distinguished key in the dicts in the vertex data source that contains a unique identifier for the vertex. @param edge_foreign_keys: the name of the attributes in the dicts in C{edges} that contain the source and target vertex names. @return: IGraph instance with integers for vertex ids, edge sources, and edge targets.
380,163
def render(obj): def get_v(v): return v % env if isinstance(v, basestring) else v if isinstance(obj, types.StringType): return obj % env elif isinstance(obj, types.TupleType) or isinstance(obj, types.ListType): rv = [] for v in obj: rv.append(get_v(v)) elif isinstance(obj, types.DictType): rv = {} for k, v in obj.items(): rv[k] = get_v(v) return rv
Convienently render strings with the fabric context
380,164
def get_relevant_policy_section(self, policy_name, group=None): policy_bundle = self._operation_policies.get(policy_name) if not policy_bundle: self._logger.warning( "The policy does not exist.".format(policy_name) ) return None if group: groups_policy_bundle = policy_bundle.get() if not groups_policy_bundle: self._logger.debug( "The policy does not support groups.".format( policy_name ) ) return None else: group_policy = groups_policy_bundle.get(group) if not group_policy: self._logger.debug( "The policy does not support group .".format( policy_name, group ) ) return None else: return group_policy else: return policy_bundle.get()
Look up the policy corresponding to the provided policy name and group (optional). Log any issues found during the look up.
380,165
def validate(self, key, value): if self._validator is not None: self._validator(key, value)
Validation function run before setting. Uses function from __init__.
380,166
def assert_satisfies(v, cond, message=None): if not cond: vname, vexpr = _retrieve_assert_arguments() if not message: message = "Argument `{var}` (= {val!r}) does not satisfy the condition {expr}" \ .format(var=vname, val=v, expr=vexpr) raise H2OValueError(message=message, var_name=vname, skip_frames=1)
Assert that variable satisfies the provided condition. :param v: variable to check. Its value is only used for error reporting. :param bool cond: condition that must be satisfied. Should be somehow related to the variable ``v``. :param message: message string to use instead of the default.
380,167
def close(self): if not self._closed: if self.protocol_version >= 3: log_debug("[ self._append(b"\x02", ()) try: self.send() except ServiceUnavailable: pass log_debug("[ try: self.socket.close() except IOError: pass finally: self._closed = True
Close the connection.
380,168
def invert_projection(self, X, identities): distances = self.transform(X) if len(distances) != len(identities): raise ValueError("X and identities are not the same length: " "{0} and {1}".format(len(X), len(identities))) node_match = [] for d in distances.__getattribute__(self.argfunc)(0): node_match.append(identities[d]) return np.array(node_match)
Calculate the inverted projection. The inverted projectio of a SOM is created by association each weight with the input which matches it the most, thus giving a good approximation of the "influence" of each input item. Works best for symbolic (instead of continuous) input data. Parameters ---------- X : numpy array Input data identities : list A list of names for each of the input data. Must be the same length as X. Returns ------- m : numpy array An array with the same shape as the map
380,169
def plotRatePSD(include=[, ], timeRange=None, binSize=5, maxFreq=100, NFFT=256, noverlap=128, smooth=0, overlay=True, ylim = None, popColors = {}, fontSize=12, figSize=(10,8), saveData=None, saveFig=None, showFig=True): allallCellsallNetStimsE1L2L5allCellseachPoplinebarlineratecountratefileNamefileName from .. import sim print() if in include: include.remove() for pop in sim.net.allPops: include.append(pop) if timeRange is None: timeRange = [0,sim.cfg.duration] histData = [] fig,ax1 = plt.subplots(figsize=figSize) fontsiz = fontSize plt.rcParams.update({: fontSize}) allPower, allSignal, allFreqs = [], [], [] for iplot,subset in enumerate(include): cells, cellGids, netStimLabels = getCellsInclude([subset]) numNetStims = 0 if len(cellGids) > 0: try: spkinds,spkts = list(zip(*[(spkgid,spkt) for spkgid,spkt in zip(sim.allSimData[],sim.allSimData[]) if spkgid in cellGids])) except: spkinds,spkts = [],[] else: spkinds,spkts = [],[] spkts, spkinds = list(spkts), list(spkinds) numNetStims = 0 if in sim.allSimData: for netStimLabel in netStimLabels: netStimSpks = [spk for cell,stims in sim.allSimData[].items() \ for stimLabel,stimSpks in stims.items() for spk in stimSpks if stimLabel == netStimLabel] if len(netStimSpks) > 0: lastInd = max(spkinds) if len(spkinds)>0 else 0 spktsNew = netStimSpks spkindsNew = [lastInd+1+i for i in range(len(netStimSpks))] spkts.extend(spktsNew) spkinds.extend(spkindsNew) numNetStims += 1 histo = np.histogram(spkts, bins = np.arange(timeRange[0], timeRange[1], binSize)) histoT = histo[1][:-1]+binSize/2 histoCount = histo[0] histoCount = histoCount * (1000.0 / binSize) / (len(cellGids)+numNetStims) histData.append(histoCount) color = popColors[subset] if isinstance(subset, (str, tuple)) and subset in popColors else colorList[iplot%len(colorList)] if not overlay: plt.subplot(len(include),1,iplot+1) title (str(subset), fontsize=fontsiz) color = Fs = 1000.0/binSize power = mlab.psd(histoCount, Fs=Fs, NFFT=NFFT, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=noverlap, pad_to=None, sides=, scale_by_freq=None) if smooth: signal = _smooth1d(10*np.log10(power[0]), smooth) else: signal = 10*np.log10(power[0]) freqs = power[1] allFreqs.append(freqs) allPower.append(power) allSignal.append(signal) plt.plot(freqs[freqs<maxFreq], signal[freqs<maxFreq], linewidth=1.5, color=color) plt.xlabel(, fontsize=fontsiz) plt.ylabel(, fontsize=fontsiz) plt.xlim([0, maxFreq]) if ylim: plt.ylim(ylim) if overlay: for i,subset in enumerate(include): color = popColors[subset] if isinstance(subset, basestring) and subset in popColors else colorList[i%len(colorList)] plt.plot(0,0,color=color,label=str(subset)) plt.legend(fontsize=fontsiz, loc=1) maxLabelLen = min(10,max([len(str(l)) for l in include])) if saveData: figData = {: histData, : histoT, : include, : timeRange, : binSize, : saveData, : saveFig, : showFig} _saveFigData(figData, saveData, ) if saveFig: if isinstance(saveFig, basestring): filename = saveFig else: filename = sim.cfg.filename++ plt.savefig(filename) if showFig: _showFigure() return fig, {:allSignal, :allPower, :allFreqs}
Plot firing rate power spectral density (PSD) - include (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): List of data series to include. Note: one line per item, not grouped (default: ['allCells', 'eachPop']) - timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None) - binSize (int): Size in ms of spike bins (default: 5) - maxFreq (float): Maximum frequency to show in plot (default: 100) - NFFT (float): The number of data points used in each block for the FFT (power of 2) (default: 256) - smooth (int): Window size for smoothing; no smoothing if 0 (default: 0) - overlay (True|False): Whether to overlay the data lines or plot in separate subplots (default: True) - graphType ('line'|'bar'): Type of graph to use (line graph or bar plot) (default: 'line') - yaxis ('rate'|'count'): Units of y axis (firing rate in Hz, or spike count) (default: 'rate') - popColors (dict): Dictionary with color (value) used for each population (key) (default: None) - figSize ((width, height)): Size of figure (default: (10,8)) - saveData (None|True|'fileName'): File name where to save the final data used to generate the figure; if set to True uses filename from simConfig (default: None) - saveFig (None|True|'fileName'): File name where to save the figure; if set to True uses filename from simConfig (default: None) - showFig (True|False): Whether to show the figure or not (default: True) - Returns figure handle
380,170
def convert_la_to_rgba(self, row, result): for i in range(len(row) // 3): for j in range(3): result[(4 * i) + j] = row[2 * i] result[(4 * i) + 3] = row[(2 * i) + 1]
Convert a grayscale image with alpha to RGBA.
380,171
def PostRegistration(method): if not isinstance(method, types.FunctionType): raise TypeError("@PostRegistration can only be applied on functions") validate_method_arity(method, "service_reference") _append_object_entry( method, constants.IPOPO_METHOD_CALLBACKS, constants.IPOPO_CALLBACK_POST_REGISTRATION, ) return method
The service post-registration callback decorator is called after a service of the component has been registered to the framework. The decorated method must accept the :class:`~pelix.framework.ServiceReference` of the registered service as argument:: @PostRegistration def callback_method(self, service_reference): ''' service_reference: The ServiceReference of the provided service ''' # ... :param method: The decorated method :raise TypeError: The decorated element is not a valid function
380,172
def content(): message = m.Message() paragraph = m.Paragraph( m.Image( % resources_path()), style_class= ) message.add(paragraph) body = tr( structure\ highway\ ) tips = m.BulletedList() tips.add(tr( select on map\ )) tips.add(tr( )) tips.add(tr( padang-\ padang-roads.shp\padang-buildings.shp\ -\_\ )) tips.add(tr( )) tips.add(tr( )) tips.add(m.Link( , text=tr( ) )) message.add(m.Paragraph(body)) message.add(tips) message.add(m.Paragraph( tr( ) % ( m.ImportantText(tr()).to_html(), ))) message.add(m.Paragraph( m.ImportantText(tr()), tr( ), m.Link( , text=tr( )))) return message
Helper method that returns just the content. This method was added so that the text could be reused in the dock_help module. .. versionadded:: 3.2.2 :returns: A message object without brand element. :rtype: safe.messaging.message.Message
380,173
def check_dupl_sources(self): dd = collections.defaultdict(list) for src_group in self.src_groups: for src in src_group: try: srcid = src.source_id except AttributeError: srcid = src[] dd[srcid].append(src) dupl = [] for srcid, srcs in sorted(dd.items()): if len(srcs) > 1: _assert_equal_sources(srcs) dupl.append(srcs) return dupl
Extracts duplicated sources, i.e. sources with the same source_id in different source groups. Raise an exception if there are sources with the same ID which are not duplicated. :returns: a list of list of sources, ordered by source_id
380,174
def get_data(self): "Get SNMP values from host" alarm_oids = [netsnmp.Varbind(alarms[alarm_id][]) for alarm_id in self.models[self.modem_type][]] metric_oids = [netsnmp.Varbind(metrics[metric_id][]) for metric_id in self.models[self.modem_type][]] response = self.snmp_session.get(netsnmp.VarList(*alarm_oids + metric_oids)) return ( response[0:len(alarm_oids)], response[len(alarm_oids):] )
Get SNMP values from host
380,175
def start(self): if not self.auto_retry: self.curl() return while not self.is_finished: try: self.curl() except pycurl.error as e: if e.args[0] == pycurl.E_PARTIAL_FILE: pass elif e.args[0] == pycurl.E_HTTP_RANGE_ERROR: break elif e.args[0] == pycurl.E_RECV_ERROR: if self._rst_retries < self.max_rst_retries: pass else: raise e self._rst_retries += 1 else: raise e self._move_path() self._done()
Start downloading, handling auto retry, download resume and path moving
380,176
def remove_target(self, target_name: str): if target_name in self.targets: del self.targets[target_name] build_module = split_build_module(target_name) if build_module in self.targets_by_module: self.targets_by_module[build_module].remove(target_name)
Remove (unregister) a `target` from this build context. Removes the target instance with the given name, if it exists, from both the `targets` map and the `targets_by_module` map. Doesn't do anything if no target with that name is found. Doesn't touch the target graph, if it exists.
380,177
def subn_filter(s, find, replace, count=0): return re.gsub(find, replace, count, s)
A non-optimal implementation of a regex filter
380,178
def remove_last(ol,value,**kwargs): aaaaaaaa if( in kwargs): mode = kwargs["mode"] else: mode = "new" new = copy.deepcopy(ol) new.reverse() new.remove(value) new.reverse() if(mode == "new"): return(new) else: ol.clear() ol.extend(new) return(ol)
from elist.elist import * ol = [1,'a',3,'a',5,'a'] id(ol) new = remove_last(ol,'a') ol new id(ol) id(new) #### ol = [1,'a',3,'a',5,'a'] id(ol) rslt = remove_last(ol,'a',mode="original") ol rslt id(ol) id(rslt)
380,179
def _adjust_auto(self, real_wave_mfcc, algo_parameters): self.log(u"Called _adjust_auto") self.log(u"Nothing to do, return unchanged")
AUTO (do not modify)
380,180
def start(self): self.log.info("Starting Insecure Session for Monitor %s" % self.monitor_id) if self.socket is not None: raise Exception("Socket already established for %s." % self) try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((self.client.hostname, PUSH_OPEN_PORT)) self.socket.setblocking(0) except socket.error as exception: self.socket.close() self.socket = None raise self.send_connection_request()
Creates a TCP connection to Device Cloud and sends a ConnectionRequest message
380,181
def after_unassign(duplicate_analysis): analysis_events.after_unassign(duplicate_analysis) parent = duplicate_analysis.aq_parent logger.info("Removing duplicate from " .format(duplicate_analysis.getId(), parent.getId())) parent.manage_delObjects([duplicate_analysis.getId()])
Removes the duplicate from the system
380,182
def create_oracle(username, password, host, port, database, **kwargs): return create_engine( _create_oracle(username, password, host, port, database), **kwargs )
create an engine connected to a oracle database using cx_oracle.
380,183
def payload_class_for_element_name(element_name): logger.debug(" looking up payload class for element: {0!r}".format( element_name)) logger.debug(" known: {0!r}".format(STANZA_PAYLOAD_CLASSES)) if element_name in STANZA_PAYLOAD_CLASSES: return STANZA_PAYLOAD_CLASSES[element_name] else: return XMLPayload
Return a payload class for given element name.
380,184
def _set_gbc(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=gbc.gbc, is_container=, presence=False, yang_name="gbc", rest_name="gbc", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__gbc = t if hasattr(self, ): self._set()
Setter method for gbc, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/gbc (container) If this variable is read-only (config: false) in the source YANG file, then _set_gbc is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_gbc() directly.
380,185
def get_workflow(self): extra_context = self.get_initial() entry_point = self.request.GET.get("step", None) workflow = self.workflow_class(self.request, context_seed=extra_context, entry_point=entry_point) return workflow
Returns the instantiated workflow class.
380,186
def preferred_width(self, cli, max_available_width): if cli.current_buffer.complete_state: state = cli.current_buffer.complete_state return 2 + max(get_cwidth(c.display_meta) for c in state.current_completions) else: return 0
Report the width of the longest meta text as the preferred width of this control. It could be that we use less width, but this way, we're sure that the layout doesn't change when we select another completion (E.g. that completions are suddenly shown in more or fewer columns.)
380,187
def _get_adjusted_merge_area(self, attrs, insertion_point, no_to_insert, axis): assert axis in range(2) if "merge_area" not in attrs or attrs["merge_area"] is None: return top, left, bottom, right = attrs["merge_area"] selection = Selection([(top, left)], [(bottom, right)], [], [], []) selection.insert(insertion_point, no_to_insert, axis) __top, __left = selection.block_tl[0] __bottom, __right = selection.block_br[0] rows, cols, tabs = self.shape if __top < 0 and __bottom < 0 or __top >= rows and __bottom >= rows or\ __left < 0 and __right < 0 or __left >= cols and __right >= cols: return if __top < 0: __top = 0 if __top >= rows: __top = rows - 1 if __bottom < 0: __bottom = 0 if __bottom >= rows: __bottom = rows - 1 if __left < 0: __left = 0 if __left >= cols: __left = cols - 1 if __right < 0: __right = 0 if __right >= cols: __right = cols - 1 return __top, __left, __bottom, __right
Returns updated merge area Parameters ---------- attrs: Dict \tCell attribute dictionary that shall be adjusted insertion_point: Integer \tPont on axis, before which insertion takes place no_to_insert: Integer >= 0 \tNumber of rows/cols/tabs that shall be inserted axis: Integer in range(2) \tSpecifies number of dimension, i.e. 0 == row, 1 == col
380,188
def get_variants(data, include_germline=False): data = utils.deepish_copy(data) supported = ["precalled", "vardict", "vardict-java", "vardict-perl", "freebayes", "octopus", "strelka2"] if include_germline: supported.insert(1, "gatk-haplotype") out = [] if isinstance(data.get("variants"), dict) and "samples" in data["variants"]: cur_vs = [] if (isinstance(data["variants"]["samples"], (list, tuple)) and len(data["variants"]["samples"]) == 1 and isinstance(data["variants"]["samples"][0], (list, tuple))): data["variants"]["samples"] = data["variants"]["samples"][0] for fname in data["variants"]["samples"]: variantcaller = utils.splitext_plus(os.path.basename(fname))[0] variantcaller = variantcaller.replace(dd.get_sample_name(data) + "-", "") for batch in dd.get_batches(data): variantcaller = variantcaller.replace(batch + "-", "") cur_vs.append({"vrn_file": fname, "variantcaller": variantcaller}) data["variants"] = cur_vs for v in data.get("variants", []): if v["variantcaller"] in supported and v.get("vrn_file"): out.append((supported.index(v["variantcaller"]), v)) out.sort() return [xs[1] for xs in out]
Retrieve set of variant calls to use for heterogeneity analysis.
380,189
def _count_leading(line, ch): i, n = 0, len(line) while i < n and line[i] == ch: i += 1 return i
Return number of `ch` characters at the start of `line`. Example: >>> _count_leading(' abc', ' ') 3
380,190
def create(self): if self._track is None: self._track = self.db[self.tracking_collection_name]
Create tracking collection. Does nothing if tracking collection already exists.
380,191
def from_file(filepath, delimiter=, blanklines=False): data = [] try: with open(filepath, ) as f: for line in f: if blanklines and line.strip() == : continue data.append(line) except IOError: raise IOError(.format(filepath)) return Base64(Join(delimiter, data))
Imports userdata from a file. :type filepath: string :param filepath The absolute path to the file. :type delimiter: string :param: delimiter Delimiter to use with the troposphere.Join(). :type blanklines: boolean :param blanklines If blank lines shoud be ignored rtype: troposphere.Base64 :return The base64 representation of the file.
380,192
def map_(input_layer, fn): if not input_layer.is_sequence(): raise ValueError() return [fn(x) for x in input_layer]
Maps the given function across this sequence. To map an entire template across the sequence, use the `as_fn` method on the template. Args: input_layer: The input tensor. fn: A function of 1 argument that is applied to each item in the sequence. Returns: A new sequence Pretty Tensor. Raises: ValueError: If the input_layer does not hold a sequence.
380,193
async def send_from_directory( directory: FilePath, file_name: str, *, mimetype: Optional[str]=None, as_attachment: bool=False, attachment_filename: Optional[str]=None, add_etags: bool=True, cache_timeout: Optional[int]=None, conditional: bool=True, last_modified: Optional[datetime]=None, ) -> Response: file_path = safe_join(directory, file_name) if not file_path.is_file(): raise NotFound() return await send_file( file_path, mimetype=mimetype, as_attachment=as_attachment, attachment_filename=attachment_filename, add_etags=add_etags, cache_timeout=cache_timeout, conditional=conditional, last_modified=last_modified, )
Send a file from a given directory. Arguments: directory: Directory that when combined with file_name gives the file path. file_name: File name that when combined with directory gives the file path. See :func:`send_file` for the other arguments.
380,194
def build_global(self, global_node): config_block_lines = self.__build_config_block( global_node.config_block) return config.Global(config_block=config_block_lines)
parse `global` section, and return the config.Global Args: global_node (TreeNode): `global` section treenode Returns: config.Global: an object
380,195
def get_first_node( node, node_not_to_step_past ): ingoing = None i = 0 current_node = node while current_node.ingoing: i = random.randrange(len(current_node.ingoing)) if current_node.ingoing[i] == node_not_to_step_past: break ingoing = current_node.ingoing current_node = current_node.ingoing[i] if ingoing: return ingoing[i] return current_node
This is a super hacky way of getting the first node after a statement. We do this because we visit a statement and keep on visiting and get something in return that is rarely the first node. So we loop and loop backwards until we hit the statement or there is nothing to step back to.
380,196
def get_reviews(self, user_id): url = _REVIEWS_USER.format(c_api=_C_API_BEGINNING, api=_API_VERSION, user_id=user_id, at=self.access_token) return _get_request(url)
Get reviews for a particular user
380,197
def conn_aws(cred, crid): driver = get_driver(Provider.EC2) try: aws_obj = driver(cred[], cred[], region=cred[]) except SSLError as e: abort_err("\r SSL Error with AWS: {}".format(e)) except InvalidCredsError as e: abort_err("\r Error with AWS Credentials: {}".format(e)) return {crid: aws_obj}
Establish connection to AWS service.
380,198
def uninstall(self, auto_confirm=False): if not self.check_if_exists(): raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,)) dist = self.satisfied_by or self.conflicts_with paths_to_remove = UninstallPathSet(dist) pip_egg_info_path = os.path.join(dist.location, dist.egg_name()) + debian_egg_info_path = pip_egg_info_path.replace( % pkg_resources.PY_MAJOR, ) easy_install_egg = dist.egg_name() + develop_egg_link = egg_link_path(dist) pip_egg_info_exists = os.path.exists(pip_egg_info_path) debian_egg_info_exists = os.path.exists(debian_egg_info_path) if pip_egg_info_exists or debian_egg_info_exists: if pip_egg_info_exists: egg_info_path = pip_egg_info_path else: egg_info_path = debian_egg_info_path paths_to_remove.add(egg_info_path) if dist.has_metadata(): for installed_file in dist.get_metadata().splitlines(): path = os.path.normpath(os.path.join(egg_info_path, installed_file)) paths_to_remove.add(path) elif dist.has_metadata(): if dist.has_metadata(): namespaces = dist.get_metadata() else: namespaces = [] for top_level_pkg in [p for p in dist.get_metadata().splitlines() if p and p not in namespaces]: path = os.path.join(dist.location, top_level_pkg) paths_to_remove.add(path) paths_to_remove.add(path + ) paths_to_remove.add(path + ) elif dist.location.endswith(easy_install_egg): paths_to_remove.add(dist.location) easy_install_pth = os.path.join(os.path.dirname(dist.location), ) paths_to_remove.add_pth(easy_install_pth, + easy_install_egg) elif develop_egg_link: fh = open(develop_egg_link, ) link_pointer = os.path.normcase(fh.readline().strip()) fh.close() assert (link_pointer == dist.location), % (link_pointer, self.name, dist.location) paths_to_remove.add(develop_egg_link) easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), ) paths_to_remove.add_pth(easy_install_pth, dist.location) if dist.has_metadata() and dist.metadata_isdir(): for script in dist.metadata_listdir(): paths_to_remove.add(os.path.join(bin_py, script)) if sys.platform == : paths_to_remove.add(os.path.join(bin_py, script) + ) if dist.has_metadata(): config = ConfigParser.SafeConfigParser() config.readfp(FakeFile(dist.get_metadata_lines())) if config.has_section(): for name, value in config.items(): paths_to_remove.add(os.path.join(bin_py, name)) if sys.platform == : paths_to_remove.add(os.path.join(bin_py, name) + ) paths_to_remove.add(os.path.join(bin_py, name) + ) paths_to_remove.add(os.path.join(bin_py, name) + ) paths_to_remove.remove(auto_confirm) self.uninstalled = paths_to_remove
Uninstall the distribution currently satisfying this requirement. Prompts before removing or modifying files unless ``auto_confirm`` is True. Refuses to delete or modify files outside of ``sys.prefix`` - thus uninstallation within a virtual environment can only modify that virtual environment, even if the virtualenv is linked to global site-packages.
380,199
def inbox(request, template_name=): message_list = Message.objects.inbox_for(request.user) return render(request, template_name, { : message_list, })
Displays a list of received messages for the current user. Optional Arguments: ``template_name``: name of the template to use.