Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
28,800
def make_error_redirect(self, authorization_error=None): if not self.redirect_uri: return HttpResponseRedirect(self.missing_redirect_uri) authorization_error = (authorization_error or AccessDenied()) response_params = get_error_details(authorization_error) if self.state is not None: response_params[] = self.state return HttpResponseRedirect( update_parameters(self.redirect_uri, response_params))
Return a Django ``HttpResponseRedirect`` describing the request failure. If the :py:meth:`validate` method raises an error, the authorization endpoint should return the result of calling this method like so: >>> auth_code_generator = ( >>> AuthorizationCodeGenerator('/oauth2/missing_redirect_uri/')) >>> try: >>> auth_code_generator.validate(request) >>> except AuthorizationError as authorization_error: >>> return auth_code_generator.make_error_redirect(authorization_error) If there is no known Client ``redirect_uri`` (because it is malformed, or the Client is invalid, or if the supplied ``redirect_uri`` does not match the regsitered value, or some other request failure) then the response will redirect to the ``missing_redirect_uri`` passed to the :py:meth:`__init__` method. Also used to signify user denial; call this method without passing in the optional ``authorization_error`` argument to return a generic :py:class:`AccessDenied` message. >>> if not user_accepted_request: >>> return auth_code_generator.make_error_redirect()
28,801
def option(default_value): def _option(f): (args, varargs, kwargs, dflts) = getargspec_py27like(f) if varargs is not None or kwargs is not None or dflts: raise ValueError( ) if len(args) != 1: raise ValueError() f._pimms_immutable_data_ = {} f._pimms_immutable_data_[] = True f._pimms_immutable_data_[] = default_value f._pimms_immutable_data_[] = f.__name__ f = staticmethod(f) return f return _option
The @option(x) decorator, usable in an immutable class (see immutable), is identical to the @param decorator except that the parameter is not required and instead takes on the default value x when the immutable is created.
28,802
def nginx_web_ssl_config(self): dt = [self.nginx_web_dir, self.nginx_ssl_dir] return nginx_conf_string.simple_ssl_web_conf.format(dt=dt)
Nginx web ssl config
28,803
def send_game(self, *args, **kwargs): return send_game(*args, **self._merge_overrides(**kwargs)).run()
See :func:`send_game`
28,804
def sequence_diversity(pos, ac, start=None, stop=None, is_accessible=None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) ac = asarray_ndim(ac, 2) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] mpd = mean_pairwise_difference(ac, fill=0) mpd_sum = np.sum(mpd) if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) pi = mpd_sum / n_bases return pi
Estimate nucleotide diversity within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity. Notes ----- If start and/or stop are not provided, uses the difference between the last and the first position as a proxy for the total number of sites, which can overestimate the sequence diversity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31) >>> pi 0.13978494623655915
28,805
def reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions): return _swigibpy.EClientSocketBase_reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions)
reqHistoricalData(EClientSocketBase self, TickerId id, Contract contract, IBString const & endDateTime, IBString const & durationStr, IBString const & barSizeSetting, IBString const & whatToShow, int useRTH, int formatDate, TagValueListSPtr const & chartOptions)
28,806
def _get_qgrams(self, src, tar, qval=0, skip=0): if isinstance(src, Counter) and isinstance(tar, Counter): return src, tar if qval > 0: return QGrams(src, qval, , skip), QGrams(tar, qval, , skip) return Counter(src.strip().split()), Counter(tar.strip().split())
Return the Q-Grams in src & tar. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version skip : int The number of characters to skip (only works when src and tar are strings) Returns ------- tuple of Counters Q-Grams Examples -------- >>> pe = _TokenDistance() >>> pe._get_qgrams('AT', 'TT', qval=2) (QGrams({'$A': 1, 'AT': 1, 'T#': 1}), QGrams({'$T': 1, 'TT': 1, 'T#': 1}))
28,807
def top_commenters(self, num): num = min(num, len(self.commenters)) if num <= 0: return top_commenters = sorted( iteritems(self.commenters), key=lambda x: (-sum(y.score for y in x[1]), -len(x[1]), str(x[0])))[:num] retval = self.post_header.format() for author, comments in top_commenters: retval += .format( self._user(author), self._points(sum(x.score for x in comments)), len(comments), if len(comments) != 1 else ) return .format(retval)
Return a markdown representation of the top commenters.
28,808
def salt_ssh(): import salt.cli.ssh if in sys.path: sys.path.remove() try: client = salt.cli.ssh.SaltSSH() _install_signal_handlers(client) client.run() except SaltClientError as err: trace = traceback.format_exc() try: hardcrash = client.options.hard_crash except (AttributeError, KeyError): hardcrash = False _handle_interrupt( SystemExit(err), err, hardcrash, trace=trace)
Execute the salt-ssh system
28,809
def attach_events(*args): def wrapper(cls): for name, fn in cls.__dict__.items(): if not name.startswith() and hasattr(fn, _SQLAlchemyEvent.ATTR): e = getattr(fn, _SQLAlchemyEvent.ATTR) if e.field_name: event.listen(getattr(cls, e.field_name), e.event_name, fn, **e.listen_kwargs) else: event.listen(cls, e.event_name, fn, **e.listen_kwargs) return cls if args and callable(args[0]): return wrapper(args[0]) return wrapper
Class decorator for SQLAlchemy models to attach listeners on class methods decorated with :func:`.on` Usage:: @attach_events class User(Model): email = Column(String(50)) @on('email', 'set') def lowercase_email(self, new_value, old_value, initiating_event): self.email = new_value.lower()
28,810
def set_description(self): if self.device_info[] == : self.node[] = % (self.device_info[], self.device_info[]) else: self.node[] = self.device_info[]
Set the node description
28,811
def piper(self, in_sock, out_sock, out_addr, onkill): "Worker thread for data reading" try: while True: written = in_sock.recv(32768) if not written: try: out_sock.shutdown(socket.SHUT_WR) except socket.error: self.threads[onkill].kill() break try: out_sock.sendall(written) except socket.error: pass self.data_handled += len(written) except greenlet.GreenletExit: return
Worker thread for data reading
28,812
def eigb(A, y0, eps, rmax=150, nswp=20, max_full_size=1000, verb=1): ry = y0.r.copy() lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps, rmax, ry[y0.d], 0, nswp, max_full_size, verb) y = tensor() y.d = y0.d y.n = A.n.copy() y.r = ry y.core = tt_eigb.tt_block_eig.result_core.copy() tt_eigb.tt_block_eig.deallocate_result() y.get_ps() return y, lam
Approximate computation of minimal eigenvalues in tensor train format This function uses alternating least-squares algorithm for the computation of several minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function. :Reference: S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov. Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm., 185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought :type y0: tensor :param eps: Accuracy required :type eps: float :param rmax: Maximal rank :type rmax: int :param kickrank: Addition rank, the larger the more robus the method, :type kickrank: int :rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors. :Example: >>> import tt >>> import tt.eigb >>> d = 8; f = 3 >>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1 >>> x = tt.rand(n, d * f, r) >>> a = tt.qlaplace_dd([8, 8, 8]) >>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0) Solving a block eigenvalue problem Looking for 8 eigenvalues with accuracy 1E-06 swp: 1 er = 35.93 rmax:19 swp: 2 er = 4.51015E-04 rmax:18 swp: 3 er = 1.87584E-12 rmax:17 Total number of matvecs: 0 >>> print ev [ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448 0.0013448 0.00164356]
28,813
def get_nameserver_detail_output_show_nameserver_nameserver_xlatedomain(self, **kwargs): config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop() nameserver_xlatedomain = ET.SubElement(show_nameserver, "nameserver-xlatedomain") nameserver_xlatedomain.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
28,814
def run(self): vertices_resources = { component: {Cores: 1, SDRAM: component._get_config_size()} for component in self._components } vertices_applications = {component: component._get_kernel() for component in self._components} net_keys = {Net(wire.source, wire.sinks): (wire.routing_key, 0xFFFFFFFF) for wire in self._wires} nets = list(net_keys) mc = MachineController(self._hostname) mc.boot() system_info = mc.get_system_info() placements, allocations, application_map, routing_tables = \ place_and_route_wrapper(vertices_resources, vertices_applications, nets, net_keys, system_info) with mc.application(): memory_allocations = sdram_alloc_for_vertices(mc, placements, allocations) for component, memory in memory_allocations.items(): component._write_config(memory) mc.load_routing_tables(routing_tables) mc.load_application(application_map) mc.wait_for_cores_to_reach_state("sync0", len(self._components)) mc.send_signal("sync0") time.sleep(self.length * 0.001) mc.wait_for_cores_to_reach_state("exit", len(self._components)) for component, memory in memory_allocations.items(): component._read_results(memory)
Run the simulation.
28,815
def regroup_vectorized(srccat, eps, far=None, dist=norm_dist): if far is None: far = 0.5 order = np.argsort(srccat.dec, kind=)[::-1] groups = [[order[0]]] for idx in order[1:]: rec = srccat[idx] decmin = rec.dec - far for group in reversed(groups): if srccat.dec[group[-1]] < decmin: groups.append([idx]) rafar = far / np.cos(np.radians(rec.dec)) group_recs = np.take(srccat, group, mode=) group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar] if len(group_recs) and dist(rec, group_recs).min() < eps: group.append(idx) break else: groups.append([idx]) return groups
Regroup the islands of a catalog according to their normalised distance. Assumes srccat is recarray-like for efficiency. Return a list of island groups. Parameters ---------- srccat : np.rec.arry or pd.DataFrame Should have the following fields[units]: ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any] eps : float maximum normalised distance within which sources are considered to be grouped far : float (degrees) sources that are further than this distance apart will not be grouped, and will not be tested. Default = 0.5. dist : func a function that calculates the distance between a source and each element of an array of sources. Default = :func:`AegeanTools.cluster.norm_dist` Returns ------- islands : list of lists Each island contians integer indices for members from srccat (in descending dec order).
28,816
def start(self, wait_for_completion=True, operation_timeout=None, status_timeout=None): result = self.manager.session.post( self.uri + , wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["active", "degraded"] self.wait_for_status(statuses, status_timeout) return result
Start (activate) this Partition, using the HMC operation "Start Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the partition status has reached the desired value (it still may show status "paused"). If `wait_for_completion=True`, this method repeatedly checks the status of the partition after the HMC operation has completed, and waits until the status is in one of the desired states "active" or "degraded". TODO: Describe what happens if the maximum number of active partitions is exceeded. Authorization requirements: * Object-access permission to this Partition. * Object-access permission to the CPC containing this Partition. * Task permission to the "Start Partition" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the partition has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. Returns: :class:`py:dict` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns an empty :class:`py:dict` object. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired partition status.
28,817
def get_hull_energy(self, comp): e = 0 for k, v in self.get_decomposition(comp).items(): e += k.energy_per_atom * v return e * comp.num_atoms
Args: comp (Composition): Input composition Returns: Energy of lowest energy equilibrium at desired composition. Not normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
28,818
def write_xsd(cls) -> None: with open(cls.filepath_source) as file_: template = file_.read() template = template.replace( , cls.get_insertion()) template = template.replace( , cls.get_exchangeinsertion()) with open(cls.filepath_target, ) as file_: file_.write(template)
Write the complete base schema file `HydPyConfigBase.xsd` based on the template file `HydPyConfigBase.xsdt`. Method |XSDWriter.write_xsd| adds model specific information to the general information of template file `HydPyConfigBase.xsdt` regarding reading and writing of time series data and exchanging parameter and sequence values e.g. during calibration. The following example shows that after writing a new schema file, method |XMLInterface.validate_xml| does not raise an error when either applied on the XML configuration files `single_run.xml` or `multiple_runs.xml` of the `LahnH` example project: >>> import os >>> from hydpy.auxs.xmltools import XSDWriter, XMLInterface >>> if os.path.exists(XSDWriter.filepath_target): ... os.remove(XSDWriter.filepath_target) >>> os.path.exists(XSDWriter.filepath_target) False >>> XSDWriter.write_xsd() >>> os.path.exists(XSDWriter.filepath_target) True >>> from hydpy import data >>> for configfile in ('single_run.xml', 'multiple_runs.xml'): ... XMLInterface(configfile, data.get_path('LahnH')).validate_xml()
28,819
def freeze(self): self.target.disable() self.filter.configure(state=) self.prog_ob.configure(state=) self.pi.configure(state=) self.observers.configure(state=) self.comment.configure(state=)
Freeze all settings so that they can't be altered
28,820
def merge(self, config): if isinstance(config, ConfigParser) is True: self.update(config) elif isinstance(config, str): self.read(config)
Load configuration from given configuration. :param config: config to load. If config is a string type, then it's treated as .ini filename :return: None
28,821
def isPow2(num) -> bool: if not isinstance(num, int): num = int(num) return num != 0 and ((num & (num - 1)) == 0)
Check if number or constant is power of two
28,822
def _clean_key_type(key_name, escape_char=ESCAPE_SEQ): for i in (2, 1): if len(key_name) < i: return None, key_name type_v = key_name[-i:] if type_v in _KEY_SPLIT: if len(key_name) <= i: return _KEY_SPLIT[type_v], esc_cnt = 0 for pos in range(-i - 1, -len(key_name) - 1, -1): if key_name[pos] == escape_char: esc_cnt += 1 else: break if esc_cnt % 2 == 0: return _KEY_SPLIT[type_v], key_name[:-i] else: return None, key_name return None, key_name
Removes type specifier returning detected type and a key name without type specifier. :param str key_name: A key name containing type postfix. :rtype: tuple[type|None, str] :returns: Type definition and cleaned key name.
28,823
def _str_replace(txt): txt = txt.replace(",", "") txt = txt.replace(" ", "_") txt = txt.replace(":", "") txt = txt.replace(".", "") txt = txt.replace("/", "") txt = txt.replace("", "") return txt
Makes a small text amenable to being used in a filename.
28,824
def combine(self, members, output_file, dimension=None, start_index=None, stop_index=None, stride=None): nco = None try: nco = Nco() except BaseException: raise ImportError("NCO not found. The NCO python bindings are required to use .") if len(members) > 0 and hasattr(members[0], ): members = [ m.path for m in members ] options = [] options += [, ] options += [] nco.ncrcat(input=members, output=output_file, options=options)
Combine many files into a single file on disk. Defaults to using the 'time' dimension.
28,825
def dead(name, enable=None, sig=None, init_delay=None, **kwargs): ret = {: name, : {}, : True, : } if in kwargs: return _enabled_used_error(ret) if isinstance(enable, six.string_types): enable = salt.utils.data.is_true(enable) try: if not _available(name, ret): if __opts__.get(): ret[] = None ret[] = .format(name) else: before_toggle_status = __salt__[](name, sig) if in __salt__: if salt.utils.platform.is_windows(): before_toggle_enable_status = __salt__[](name)[] in [, ] else: before_toggle_enable_status = __salt__[](name) else: before_toggle_enable_status = True if not before_toggle_status: ret[] = .format(name) if enable is True and not before_toggle_enable_status: ret.update(_enable(name, None, skip_verify=False, **kwargs)) elif enable is False and before_toggle_enable_status: ret.update(_disable(name, None, skip_verify=False, **kwargs)) return ret if __opts__[]: ret[] = None ret[] = .format(name) return ret stop_kwargs, warnings = _get_systemd_only(__salt__[], kwargs) if warnings: ret.setdefault(, []).extend(warnings) if salt.utils.platform.is_windows(): for arg in [, , ]: if kwargs.get(arg, False): stop_kwargs.update({arg: kwargs.get(arg)}) func_ret = __salt__[](name, **stop_kwargs) if not func_ret: ret[] = False ret[] = .format(name) if enable is True: ret.update(_enable(name, True, result=False, skip_verify=False, **kwargs)) elif enable is False: ret.update(_disable(name, True, result=False, skip_verify=False, **kwargs)) return ret if init_delay: time.sleep(init_delay) after_toggle_status = __salt__[](name) if in __salt__: after_toggle_enable_status = __salt__[](name) else: after_toggle_enable_status = True if ( (before_toggle_enable_status != after_toggle_enable_status) or (before_toggle_status != after_toggle_status) ) and not ret.get(, {}): ret[][name] = after_toggle_status if after_toggle_status: ret[] = False ret[] = .format(name) else: ret[] = .format(name) if enable is True: ret.update(_enable(name, after_toggle_status, result=not after_toggle_status, skip_verify=False, **kwargs)) elif enable is False: ret.update(_disable(name, after_toggle_status, result=not after_toggle_status, skip_verify=False, **kwargs)) return ret
Ensure that the named service is dead by stopping the service if it is running name The name of the init or rc script used to manage the service enable Set the service to be enabled at boot time, ``True`` sets the service to be enabled, ``False`` sets the named service to be disabled. The default is ``None``, which does not enable or disable anything. sig The string to search for when looking for the service process with ps init_delay Add a sleep command (in seconds) before the check to make sure service is killed. .. versionadded:: 2017.7.0 no_block : False **For systemd minions only.** Stops the service using ``--no-block``. .. versionadded:: 2017.7.0
28,826
def linear_interpolation(first, last, steps): result = [] for step in xrange(0, steps): fpart = (steps - step) * first lpart = (step + 1) * last value = (fpart + lpart) / float(steps + 1) result.append(value) return result
Interpolates all missing values using linear interpolation. :param numeric first: Start value for the interpolation. :param numeric last: End Value for the interpolation :param integer steps: Number of missing values that have to be calculated. :return: Returns a list of floats containing only the missing values. :rtype: list :todo: Define a more general interface!
28,827
def move_images(self, image_directory): image_paths = glob(image_directory + "/**/*.png", recursive=True) for image_path in image_paths: destination = image_path.replace("\\image\\", "\\") shutil.move(image_path, destination) image_folders = glob(image_directory + "/**/image", recursive=True) for image_folder in image_folders: os.removedirs(image_folder)
Moves png-files one directory up from path/image/*.png -> path/*.png
28,828
def transform_metadata(blob): o = {} for e in blob: i = e[u] o[i] = e return o
Transforms metadata types about channels / users / bots / etc. into a dict rather than a list in order to enable faster lookup.
28,829
def cli(ctx, project_dir): exit_code = SCons(project_dir).sim() ctx.exit(exit_code)
Launch the verilog simulation.
28,830
def get_consul(self, resource_type): consul = self.CONSUL_MAP.get(resource_type) if consul: return consul(self)
Returns an object that a :class:`~bang.deployers.deployer.Deployer` uses to control resources of :attr:`resource_type`. :param str service: Any of the resources defined in :mod:`bang.resources`.
28,831
def check_mq_connection(self): import pika from zengine.client_queue import BLOCKING_MQ_PARAMS from pika.exceptions import ProbableAuthenticationError, ConnectionClosed try: connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS) channel = connection.channel() if channel.is_open: print(__(u"{0}RabbitMQ is working{1}").format(CheckList.OKGREEN, CheckList.ENDC)) elif self.channel.is_closed or self.channel.is_closing: print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC)) except ConnectionClosed as e: print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC), e) except ProbableAuthenticationError as e: print(__(u"{0}RabbitMQ username and password wrong{1}").format(CheckList.FAIL, CheckList.ENDC))
RabbitMQ checks the connection It displays on the screen whether or not you have a connection.
28,832
def import_activity_to_graph(diagram_graph, process_id, process_attributes, element): BpmnDiagramGraphImport.import_flow_node_to_graph(diagram_graph, process_id, process_attributes, element) element_id = element.getAttribute(consts.Consts.id) diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \ if element.hasAttribute(consts.Consts.default) else None
Method that adds the new element that represents BPMN activity. Should not be used directly, only as a part of method, that imports an element which extends Activity element (task, subprocess etc.) :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML element which extends 'activity'.
28,833
def _getInstrumentsVoc(self): cfilter = {: , : True} if self.getMethod(): cfilter[] = {"query": self.getMethod().UID(), "operator": "or"} bsc = getToolByName(self, ) items = [(, )] + [ (o.UID, o.Title) for o in bsc(cfilter)] o = self.getInstrument() if o and o.UID() not in [i[0] for i in items]: items.append((o.UID(), o.Title())) items.sort(lambda x, y: cmp(x[1], y[1])) return DisplayList(list(items))
This function returns the registered instruments in the system as a vocabulary. The instruments are filtered by the selected method.
28,834
def create_replication_interface(self, sp, ip_port, ip_address, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None): return UnityReplicationInterface.create( self._cli, sp, ip_port, ip_address, netmask=netmask, v6_prefix_length=v6_prefix_length, gateway=gateway, vlan_id=vlan_id)
Creates a replication interface. :param sp: `UnityStorageProcessor` object. Storage processor on which the replication interface is running. :param ip_port: `UnityIpPort` object. Physical port or link aggregation on the storage processor on which the interface is running. :param ip_address: IP address of the replication interface. :param netmask: IPv4 netmask for the replication interface, if it uses an IPv4 address. :param v6_prefix_length: IPv6 prefix length for the interface, if it uses an IPv6 address. :param gateway: IPv4 or IPv6 gateway address for the replication interface. :param vlan_id: VLAN identifier for the interface. :return: the newly create replication interface.
28,835
def to_json(self): event_as_dict = copy.deepcopy(self.event_body) if self.timestamp: if "keen" in event_as_dict: event_as_dict["keen"]["timestamp"] = self.timestamp.isoformat() else: event_as_dict["keen"] = {"timestamp": self.timestamp.isoformat()} return json.dumps(event_as_dict)
Serializes the event to JSON. :returns: a string
28,836
def post_message2(consumers, lti_key, url, body, method=, content_type=): (response, _) = _post_patched_request( consumers, lti_key, body, url, method, content_type, ) is_success = response.status == 200 log.debug("is success %s", is_success) return is_success
Posts a signed message to LTI consumer using LTI 2.0 format :param: consumers: consumers from config :param: lti_key: key to find appropriate consumer :param: url: post url :param: body: xml body :return: success
28,837
def group_and_sort_nodes(self): if self.node_grouping and not self.node_order: if self.group_order == "alphabetically": self.nodes = [ n for n, d in sorted( self.graph.nodes(data=True), key=lambda x: x[1][self.node_grouping], ) ] elif self.group_order == "default": grp = [ d[self.node_grouping] for _, d in self.graph.nodes(data=True) ] grp_name = list(unique_everseen(grp)) nodes = [] for key in grp_name: nodes.extend( [ n for n, d in self.graph.nodes(data=True) if key in d.values() ] ) self.nodes = nodes elif self.node_order and not self.node_grouping: self.nodes = [ n for n, _ in sorted( self.graph.nodes(data=True), key=lambda x: x[1][self.node_order], ) ] elif self.node_grouping and self.node_order: if self.group_order == "alphabetically": self.nodes = [ n for n, d in sorted( self.graph.nodes(data=True), key=lambda x: ( x[1][self.node_grouping], x[1][self.node_order], ), ) ] elif self.group_order == "default": grp = [ d[self.node_grouping] for _, d in self.graph.nodes(data=True) ] grp_name = list(unique_everseen(grp)) nodes = [] for key in grp_name: nodes.extend( [ n for n, d in sorted( self.graph.nodes(data=True), key=lambda x: x[1][self.node_order], ) if key in d.values() ] ) self.nodes = nodes
Groups and then sorts the nodes according to the criteria passed into the Plot constructor.
28,838
def consumer(cfg_uri, queue, logger=None, fetchsize=1): from stompest.protocol import StompSpec _info, _exception = _deal_logger(logger) cfg_uri = _build_uri(cfg_uri) def decorator(function): def _build_conn(): client = _conn(cfg_uri, queue, _info) client.subscribe(queue, { StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL, : fetchsize }) return client @wraps(function) def wapper(): client = _build_conn() while True: try: frame = client.receiveFrame() _info( % frame) param = loads(frame.body) code, msg = function(param) _info( % (frame.body, code, msg)) except (KeyboardInterrupt, AssertionError, ConsumerFatalError), e: _exception(e) break except Exception, e: _exception(e) finally: try: client.ack(frame) except Exception, e: _exception(e) client.close() client = _build_conn() client.disconnect() _info( % cfg_uri) return wapper return decorator
分布式爬虫的爬虫端(具体爬虫部分) 被包装的函数必须满足如下要求: 1. 有且仅有一个参数 2. 对于每个任务,返回两个参数: code, message :param cfg_uri: 读取任务的路径 :param queue: Queue的名字 :param logger: 日志记录工具 :param fetchsize: 每次取出消息数量
28,839
def pckw02(handle, classid, frname, first, last, segid, intlen, n, polydg, cdata, btime): handle = ctypes.c_int(handle) classid = ctypes.c_int(classid) frame = stypes.stringToCharP(frname) first = ctypes.c_double(first) last = ctypes.c_double(last) segid = stypes.stringToCharP(segid) intlen = ctypes.c_double(intlen) n = ctypes.c_int(n) polydg = ctypes.c_int(polydg) cdata = stypes.toDoubleVector(cdata) btime = ctypes.c_double(btime) libspice.pckw02_c(handle, classid, frame, first, last, segid, intlen, n, polydg, cdata, btime)
Write a type 2 segment to a PCK binary file given the file handle, frame class ID, base frame, time range covered by the segment, and the Chebyshev polynomial coefficients. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckw02_c.html :param handle: Handle of binary PCK file open for writing. :type handle: int :param classid: Frame class ID of body-fixed frame. :type classid: int :param frname: Name of base reference frame. :type frname: str :param first: Start time of interval covered by segment. :type first: float :param last: End time of interval covered by segment. :type last: float :param segid: Segment identifier. :type segid: str :param intlen: Length of time covered by logical record. :type intlen: float :param n: Number of logical records in segment. :type n: int :param polydg: Chebyshev polynomial degree. :type polydg: int :param cdata: Array of Chebyshev coefficients. :type cdata: N-Element Array of floats :param btime: Begin time of first logical record. :type btime: float
28,840
def is_all_field_none(self): if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._year is not None: return False if self._alias_user is not None: return False return True
:rtype: bool
28,841
def load(stream, container=dict): ret = container() for line in stream.readlines(): line = line.rstrip() if line is None or not line: continue (key, val) = _parseline(line) if key is None: LOGGER.warning("Empty val in the line: %s", line) continue ret[key] = val return ret
Load and parse a file or file-like object 'stream' provides simple shell variables' definitions. :param stream: A file or file like object :param container: Factory function to create a dict-like object to store properties :return: Dict-like object holding shell variables' definitions >>> from anyconfig.compat import StringIO as to_strm >>> load(to_strm('')) {} >>> load(to_strm("# ")) {} >>> load(to_strm("aaa=")) {'aaa': ''} >>> load(to_strm("aaa=bbb")) {'aaa': 'bbb'} >>> load(to_strm("aaa=bbb # ...")) {'aaa': 'bbb'}
28,842
def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None: event_map = { pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE, } if event == pycurl.POLL_REMOVE: if fd in self._fds: self.io_loop.remove_handler(fd) del self._fds[fd] else: ioloop_event = event_map[event] if fd in self._fds: self.io_loop.remove_handler(fd) self.io_loop.add_handler(fd, self._handle_events, ioloop_event) self._fds[fd] = ioloop_event
Called by libcurl when it wants to change the file descriptors it cares about.
28,843
def stretch(arr, fields=None, return_indices=False): dtype = [] len_array = None flatten = False if fields is None: fields = arr.dtype.names elif isinstance(fields, string_types): fields = [fields] flatten = True for field in fields: dt = arr.dtype[field] if dt == or len(dt.shape): if dt == : lengths = VLEN(arr[field]) else: lengths = np.repeat(dt.shape[0], arr.shape[0]) if len_array is None: len_array = lengths elif not np.array_equal(lengths, len_array): raise ValueError( "inconsistent lengths of array columns in input") if dt == : dtype.append((field, arr[field][0].dtype)) else: dtype.append((field, arr[field].dtype, dt.shape[1:])) else: dtype.append((field, dt)) if len_array is None: raise RuntimeError("no array column in input") ret = np.empty(np.sum(len_array), dtype=dtype) for field in fields: dt = arr.dtype[field] if dt == or len(dt.shape) == 1: ret[field] = np.hstack(arr[field]) elif len(dt.shape): ret[field] = np.vstack(arr[field]) else: ret[field] = np.repeat(arr[field], len_array) if flatten: ret = ret[fields[0]] if return_indices: idx = np.concatenate(list(map(np.arange, len_array))) return ret, idx return ret
Stretch an array. Stretch an array by ``hstack()``-ing multiple array fields while preserving column names and record array structure. If a scalar field is specified, it will be stretched along with array fields. Parameters ---------- arr : NumPy structured or record array The array to be stretched. fields : list of strings or string, optional (default=None) A list of column names or a single column name to stretch. If ``fields`` is a string, then the output array is a one-dimensional unstructured array containing only the stretched elements of that field. If None, then stretch all fields. return_indices : bool, optional (default=False) If True, the array index of each stretched array entry will be returned in addition to the stretched array. This changes the return type of this function to a tuple consisting of a structured array and a numpy int64 array. Returns ------- ret : A NumPy structured array The stretched array. Examples -------- >>> import numpy as np >>> from root_numpy import stretch >>> arr = np.empty(2, dtype=[('scalar', np.int), ('array', 'O')]) >>> arr[0] = (0, np.array([1, 2, 3], dtype=np.float)) >>> arr[1] = (1, np.array([4, 5, 6], dtype=np.float)) >>> stretch(arr, ['scalar', 'array']) array([(0, 1.0), (0, 2.0), (0, 3.0), (1, 4.0), (1, 5.0), (1, 6.0)], dtype=[('scalar', '<i8'), ('array', '<f8')])
28,844
def get(ctx, job): def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(_experiment)) Printer.print_error(.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(_job)) Printer.print_error(.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header="Job resources:") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=[, , , , ] )) Printer.print_header("Job info:") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(), ctx.obj.get()) if job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment()
Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \b ```bash $ polyaxon experiment get # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples for getting an experiment job: \b ```bash $ polyaxon experiment get -j 1 # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get --job=10 ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2 ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2 ```
28,845
def vrrpe_vip(self, **kwargs): int_type = kwargs.pop().lower() name = kwargs.pop(,) vip = kwargs.pop(, ) get = kwargs.pop(, False) delete = kwargs.pop(, False) callback = kwargs.pop(, self._callback) valid_int_types = [, , , , , ] if vip != : ipaddress = ip_interface(unicode(vip)) version = ipaddress.version else: version = 4 if int_type not in valid_int_types: raise ValueError( % repr(valid_int_types)) if delete: vrid = kwargs.pop() rbridge_id = kwargs.pop(, ) vrrpe_args = dict(rbridge_id=rbridge_id, name=name, vrid=vrid, virtual_ipaddr=vip) elif get: rbridge_id = kwargs.pop(, ) vrrpe_args = dict(name=name, vrid=, virtual_ipaddr=) else: vrid = kwargs.pop() ipaddress = ip_interface(unicode(vip)) if int_type == : rbridge_id = kwargs.pop(, ) vrrpe_args = dict(name=name, vrid=vrid, virtual_ipaddr=str(ipaddress.ip)) method_name = None method_class = self._interface if version == 4: vrrpe_args[] = method_name = \ % int_type elif version == 6: method_name = \ % int_type if int_type == : method_name = % method_name if version == 6: method_name = method_name.replace(, ) method_class = self._rbridge vrrpe_args[] = rbridge_id if not pynos.utilities.valid_vlan_id(name): raise InvalidVlanId("`name` must be between `1` and `8191`") elif not pynos.utilities.valid_interface(int_type, name): raise ValueError( ) vrrpe_vip = getattr(method_class, method_name) config = vrrpe_vip(**vrrpe_args) result = [] if delete: config.find().set(, ) if get: output = callback(config, handler=) for item in output.data.findall(): vrid = item.find().text if item.find() is not None: vip = item.find().text else: vip = tmp = {"vrid": vrid, "vip": vip} result.append(tmp) else: result = callback(config) return result
Set vrrpe VIP. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, ve, etc). name (str): Name of interface. (1/0/5, 1/0/10, VE name etc). vrid (str): vrrpev3 ID. get (bool): Get config instead of editing config. (True, False) delete (bool): True, the VIP address is added and False if its to be deleted (True, False). Default value will be False if not specified. vip (str): IPv4/IPv6 Virtual IP Address. rbridge_id (str): rbridge-id for device. Only required when type is `ve`. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Raises: KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed. ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output =dev.interface.vrrpe_vip(int_type='ve', ... name='89', rbridge_id = '1', ... vrid='11', vip='10.0.1.10') ... output = dev.interface.vrrpe_vip(get=True, ... int_type='ve', name='89', rbridge_id = '1') ... output =dev.interface.vrrpe_vip(delete=True, ... int_type='ve', name='89', rbridge_id = '1',vrid='1', ... vip='10.0.0.10')
28,846
def lt(lt_value): def validate(value): if value >= lt_value: return e("{} is not less than {}", value, lt_value) return validate
Validates that a field value is less than the value given to this validator.
28,847
def lock_excl(self, timeout=): timeout = self.timeout if timeout == else timeout timeout = self._cleanup_timeout(timeout) self.visalib.lock(self.session, constants.AccessModes.exclusive_lock, timeout, None)
Establish an exclusive lock to the resource. :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. (Defaults to self.timeout)
28,848
def backup(self, paths=None): if not paths: paths = self._get_paths() try: self._backup_compresslevel(paths) except TypeError: try: self._backup_pb_gui(paths) except ImportError: self._backup_pb_tqdm(paths) if self.delete_source: shutil.rmtree(self.source) return self.zip_filename
Backup method driver.
28,849
def write_xml(self): key = None if self. language is not None: lang = {} lang[] = self.language key = etree.Element(, attrib=lang) else: key = etree.Element() name = etree.Element() name.text = self.name key.append(name) if self.family is not None: family = etree.Element() family.text = self.family key.append(family) if self.version is not None: version = etree.Element() version.text = self.version key.append(version) if self.code_value is not None: code_value = etree.Element() code_value.text = self.code_value key.append(code_value) return key
Writes a VocabularyKey Xml as per Healthvault schema. :returns: lxml.etree.Element representing a single VocabularyKey
28,850
def yesterday(hour=None, minute=None): if hour is None: hour = datetime.now().hour if minute is None: minute = datetime.now().minute yesterday_date = date.today() + timedelta(days=-1) return datetime(yesterday_date.year, yesterday_date.month, yesterday_date.day, hour, minute, 0)
Gives the ``datetime.datetime`` object corresponding to yesterday. The default value for optional parameters is the current value of hour and minute. I.e: when called without specifying values for parameters, the resulting object will refer to the time = now - 24 hours; when called with only hour specified, the resulting object will refer to yesterday at the specified hour and at the current minute. :param hour: the hour for yesterday, in the format *0-23* (defaults to ``None``) :type hour: int :param minute: the minute for yesterday, in the format *0-59* (defaults to ``None``) :type minute: int :returns: a ``datetime.datetime`` object :raises: *ValueError* when hour or minute have bad values
28,851
def _build_kwargs(keys, input_dict): kwargs = {} for key in keys: try: kwargs[key] = input_dict[key] except KeyError: pass return kwargs
Parameters ---------- keys : iterable Typically a list of strings. adict : dict-like A dictionary from which to attempt to pull each key. Returns ------- kwargs : dict A dictionary with only the keys that were in input_dict
28,852
def _linux_stp(br, state): brctl = _tool_path() return __salt__[](.format(brctl, br, state), python_shell=False)
Internal, sets STP state
28,853
def _take_screenshot(self, screenshot=False, name_prefix=): if isinstance(screenshot, bool): if not screenshot: return return self._save_screenshot(name_prefix=name_prefix) if isinstance(screenshot, Image.Image): return self._save_screenshot(screen=screenshot, name_prefix=name_prefix) raise TypeError("invalid type for func _take_screenshot: "+ type(screenshot))
This is different from _save_screenshot. The return value maybe None or the screenshot path Args: screenshot: bool or PIL image
28,854
def _start_thread(self): self._stopping_event = Event() self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,)) self._enqueueing_thread.start()
Start an enqueueing thread.
28,855
def qteDefVar(self, varName: str, value, module=None, doc: str=None): if module is None: module = qte_global if not hasattr(module, ): module._qte__variable__docstring__dictionary__ = {} setattr(module, varName, value) module._qte__variable__docstring__dictionary__[varName] = doc return True
Define and document ``varName`` in an arbitrary name space. If ``module`` is **None** then ``qte_global`` will be used. .. warning: If the ``varName`` was already defined in ``module`` then its value and documentation are overwritten without warning. |Args| * ``varName`` (**str**): variable name. * ``value`` (**object**): arbitrary data to store. * ``module`` (**Python module**): the module in which the variable should be defined. * ``doc`` (**str**): documentation string for variable. |Returns| **bool**: **True** if ``varName`` could be defined in ``module``. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
28,856
def collect_diagnostic_data_45(self, end_datetime, bundle_size_bytes, cluster_name=None, roles=None, collect_metrics=False, start_datetime=None): args = { : end_datetime.isoformat(), : bundle_size_bytes, : cluster_name } if self._get_resource_root().version >= 10: args[] = roles if self._get_resource_root().version >= 13: args[] = collect_metrics if start_datetime is not None: args[] = start_datetime.isoformat() return self._cmd(, data=args)
Issue the command to collect diagnostic data. If start_datetime is specified, diagnostic data is collected for the entire period between start_datetime and end_datetime provided that bundle size is less than or equal to bundle_size_bytes. Diagnostics data collection fails if the bundle size is greater than bundle_size_bytes. If start_datetime is not specified, diagnostic data is collected starting from end_datetime and collecting backwards upto a maximum of bundle_size_bytes. @param end_datetime: The end of the collection period. Type datetime. @param bundle_size_bytes: The target size for the support bundle in bytes @param cluster_name: The cluster to collect or None for all clusters @param roles: Role ids of roles to restrict log and metric collection to. Valid since v10. @param collect_metrics: Whether to collect metrics for viewing as charts. Valid since v13. @param start_datetime: The start of the collection period. Type datetime. Valid since v13.
28,857
def unregisterWalkthrough(self, walkthrough): if type(walkthrough) in (str, unicode): walkthrough = self.findWalkthrough(walkthrough) try: self._walkthroughs.remove(walkthrough) except ValueError: pass
Unregisters the inputed walkthrough from the application walkthroug list. :param walkthrough | <XWalkthrough>
28,858
def run_script(self, script, identifier=_DEFAULT_SCRIPT_NAME): assert isinstance(script, six.text_type) or _is_utf_8(script) assert isinstance(identifier, six.text_type) or _is_utf_8(identifier) if isinstance(script, six.text_type): script = script.encode() if isinstance(identifier, six.text_type): identifier = identifier.encode() with _String() as output: with _String() as error: code = lib.v8cffi_run_script( self._c_context[0], script, len(script), identifier, len(identifier), output.string_ptr, output.len_ptr, error.string_ptr, error.len_ptr) if code != lib.E_V8_OK: raise exceptions.get_exception(code)(six.text_type(error)) return six.text_type(output)
Run a JS script within the context.\ All code is ran synchronously,\ there is no event loop. It's thread-safe :param script: utf-8 encoded or unicode string :type script: bytes or str :param identifier: utf-8 encoded or unicode string.\ This is used as the name of the script\ (ie: in stack-traces) :type identifier: bytes or str :return: Result of running the JS script :rtype: str :raises V8Error: if there was\ an error running the JS script
28,859
def _interpret_as_minutes(sval, mdict): if ( sval.count() == 1 and not in sval and (( not in mdict) or (mdict[] is None)) and (( not in mdict) or (mdict[] is None)) and (( not in mdict) or (mdict[] is None)) ): mdict[] = mdict[] mdict[] = mdict[] mdict.pop() pass return mdict
Times like "1:22" are ambiguous; do they represent minutes and seconds or hours and minutes? By default, timeparse assumes the latter. Call this function after parsing out a dictionary to change that assumption. >>> import pprint >>> pprint.pprint(_interpret_as_minutes('1:24', {'secs': '24', 'mins': '1'})) {'hours': '1', 'mins': '24'}
28,860
def retrieve_version(self, obj, version): current_version = getattr(obj, VERSION_ID, None) if current_version is None: return obj if str(current_version) == str(version): return obj pr = api.get_tool("portal_repository") result = pr._retrieve( obj, selector=version, preserve=(), countPurged=True) return result.object
Retrieve the version of the object
28,861
def _build_input_args(input_filepath_list, input_format_list): if len(input_format_list) != len(input_filepath_list): raise ValueError( "input_format_list & input_filepath_list are not the same size" ) input_args = [] zipped = zip(input_filepath_list, input_format_list) for input_file, input_fmt in zipped: input_args.extend(input_fmt) input_args.append(input_file) return input_args
Builds input arguments by stitching input filepaths and input formats together.
28,862
def add(self, response, condition=None): self._log.info(.format(priority=response.priority)) if response.priority not in self._responses: self.sorted = False self._responses[response.priority] = [] if condition: self._log.debug() self._conditionals[response] = condition self._responses[response.priority].append(response)
Add a new Response object :param response: The Response object :type response: parser.trigger.response.Response :param condition: An optional Conditional statement for the Response :type condition: parser.condition.Condition or None
28,863
def execute(self): self.print_info() linters = [ l for l in [ self._config.lint, self._config.verifier.lint, self._config.provisioner.lint, ] if l ] for l in linters: l.execute()
Execute the actions necessary to perform a `molecule lint` and returns None. :return: None
28,864
def _pop_params(cls, kwargs): params = cls.params if not isinstance(params, Mapping): params = {k: NotSpecified for k in params} param_values = [] for key, default_value in params.items(): try: value = kwargs.pop(key, default_value) if value is NotSpecified: raise KeyError(key) hash(value) except KeyError: raise TypeError( "{typename} expected a keyword parameter {name!r}.".format( typename=cls.__name__, name=key ) ) except TypeError: raise TypeError( "{typename} expected a hashable value for parameter " "{name!r}, but got {value!r} instead.".format( typename=cls.__name__, name=key, value=value, ) ) param_values.append((key, value)) return tuple(param_values)
Pop entries from the `kwargs` passed to cls.__new__ based on the values in `cls.params`. Parameters ---------- kwargs : dict The kwargs passed to cls.__new__. Returns ------- params : list[(str, object)] A list of string, value pairs containing the entries in cls.params. Raises ------ TypeError Raised if any parameter values are not passed or not hashable.
28,865
def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs): self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs)
Changes the cache implementation for the named cache
28,866
def extract_yaml(yaml_files): loaded_yaml = [] for yaml_file in yaml_files: try: with open(yaml_file, ) as fd: loaded_yaml.append(yaml.safe_load(fd)) except IOError as e: print(, yaml_file) raise e except yaml.YAMLError as e: print(, yaml_file) raise e except Exception as e: print() raise e return loaded_yaml
Take a list of yaml_files and load them to return back to the testing program
28,867
def per_chunk(iterable, n=1, fillvalue=None): args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
From http://stackoverflow.com/a/8991553/610569 >>> list(per_chunk('abcdefghi', n=2)) [('a', 'b'), ('c', 'd'), ('e', 'f'), ('g', 'h'), ('i', None)] >>> list(per_chunk('abcdefghi', n=3)) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i')]
28,868
def argument_types(self): class ArgumentsIterator(collections.Sequence): def __init__(self, parent): self.parent = parent self.length = None def __len__(self): if self.length is None: self.length = conf.lib.clang_getNumArgTypes(self.parent) return self.length def __getitem__(self, key): if not isinstance(key, int): raise TypeError("Must supply a non-negative int.") if key < 0: raise IndexError("Only non-negative indexes are accepted.") if key >= len(self): raise IndexError("Index greater than container length: " "%d > %d" % ( key, len(self) )) result = conf.lib.clang_getArgType(self.parent, key) if result.kind == TypeKind.INVALID: raise IndexError("Argument could not be retrieved.") return result assert self.kind == TypeKind.FUNCTIONPROTO return ArgumentsIterator(self)
Retrieve a container for the non-variadic arguments for this type. The returned object is iterable and indexable. Each item in the container is a Type instance.
28,869
def create(self, data): if not in data: raise KeyError() if not in data: raise KeyError() if not in data: raise KeyError() if not in data: raise KeyError() if not re.match(r"^[A-Z]{3}$", data[]): raise ValueError() response = self._mc_client._post(url=self._build_path(), data=data) if response is not None: self.store_id = response[] else: self.store_id = None return response
Add a new store to your MailChimp account. Error checking on the currency code verifies that it is in the correct three-letter, all-caps format as specified by ISO 4217 but does not check that it is a valid code as the list of valid codes changes over time. :param data: The request body parameters :type data: :py:class:`dict` data = { "id": string*, "list_id": string*, "name": string*, "currency_code": string* }
28,870
def plot(self): plt.rcParams[] = plt.rcParams[] = plt.rcParams[] = 2 npoint = 1000 xs = np.linspace(0, 1, npoint) xs_reverse_converted = InterfacialReactivity._reverse_convert( xs, self.factor1, self.factor2) energies = [self._get_energy(x) for x in xs_reverse_converted] plt.plot(xs, energies, ) kinks = self.get_kinks() _, x_kink, energy_kink, _, _ = zip(*kinks) plt.scatter(x_kink, energy_kink, marker=, c=, s=20) plt.scatter(self.minimum()[0], self.minimum()[1], marker=, c=, s=300) for index, x, energy, _, _ in kinks: plt.annotate( index, xy=(x, energy), xytext=(5, 30), textcoords=, ha=, va=, arrowprops=dict(arrowstyle=, connectionstyle=)).draggable() plt.xlim([-0.05, 1.05]) if self.norm: plt.ylabel() else: plt.ylabel() plt.xlabel(.format( self.c1.reduced_formula, self.c2.reduced_formula)) return plt
Plots reaction energy as a function of mixing ratio x in self.c1 - self.c2 tie line using pylab. Returns: Pylab object that plots reaction energy as a function of mixing ratio x.
28,871
def watch_and_wait(self, poll_interval=10, idle_log_timeout=None, kill_on_timeout=False, stash_log_method=None, tag_instances=False, **kwargs): return wait_for_complete(self._job_queue, job_list=self.job_list, job_name_prefix=self.basename, poll_interval=poll_interval, idle_log_timeout=idle_log_timeout, kill_on_log_timeout=kill_on_timeout, stash_log_method=stash_log_method, tag_instances=tag_instances, **kwargs)
This provides shortcut access to the wait_for_complete_function.
28,872
def _translate_response(self, response, state): if self.encryption_keys: response.parse_assertion(self.encryption_keys) authn_info = response.authn_info()[0] auth_class_ref = authn_info[0] timestamp = response.assertion.authn_statement[0].authn_instant issuer = response.response.issuer.text auth_info = AuthenticationInformation( auth_class_ref, timestamp, issuer, ) subject = response.get_subject() name_id = subject.text if subject else None name_id_format = subject.format if subject else None attributes = self.converter.to_internal( self.attribute_profile, response.ava, ) internal_resp = InternalData( auth_info=auth_info, attributes=attributes, subject_type=name_id_format, subject_id=name_id, ) satosa_logging(logger, logging.DEBUG, "backend received attributes:\n%s" % json.dumps(response.ava, indent=4), state) return internal_resp
Translates a saml authorization response to an internal response :type response: saml2.response.AuthnResponse :rtype: satosa.internal.InternalData :param response: The saml authorization response :return: A translated internal response
28,873
def remove(self, experiment): try: project_path = self.projects[self[experiment][]][] except KeyError: return config_path = osp.join(project_path, , experiment + ) for f in [config_path, config_path + , config_path + ]: if os.path.exists(f): os.remove(f) del self[experiment]
Remove the configuration of an experiment
28,874
def mapping(self, struct, key_depth=1000, tree_depth=1, update_callable=None): if not tree_depth: return self._map_type() _struct = struct() add_struct = _struct.update if not update_callable \ else getattr(_struct, update_callable) for x in range(key_depth): add_struct({ self.randstr: self.mapping( struct, key_depth, tree_depth-1, update_callable) }) return _struct
Generates random values for dict-like objects @struct: the dict-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| @update_callable: #callable method which updates data in your dict-like structure - e.g. :meth:builtins.dict.update -> random @struct .. from collections import UserDict from vital.debug import RandData class MyDict(UserDict): pass rd = RandData(int) my_dict = MyDict() rd.dict(my_dict, 3, 1, my_dict.update) # -> { # 'SE0ZNy0F6O': 42078648993195761, # 'pbK': 70822820981335987, # '0A5Aa7': 17503122029338459} ..
28,875
def forwards(self, orm): "Write your forwards methods here." orm[].objects.get_or_create( app_name=, model_name=, field_name=, defaults={ : , : True })
Write your forwards methods here.
28,876
def as_binary(s, encoding=): if isinstance(s, six.text_type): return s.encode(encoding) elif isinstance(s, six.binary_type): return s.decode(encoding).encode(encoding) else: raise ValueError(.format(six.text_type, six.binary_type))
Force conversion of given string to binary type. Binary is ``bytes`` type for Python 3.x and ``str`` for Python 2.x . If the string is already in binary, then no conversion is done and the same string is returned and ``encoding`` argument is ignored. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to binary. encoding: str The encoding of the resulting binary string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``bytes`` for Python3 or ``str`` for Python 2.
28,877
def setup_pilotpoints_grid(ml=None,sr=None,ibound=None,prefix_dict=None, every_n_cell=4, use_ibound_zones=False, pp_dir=,tpl_dir=, shapename="pp.shp"): from . import pp_utils warnings.warn("setup_pilotpoint_grid has moved to pp_utils...",PyemuWarning) return pp_utils.setup_pilotpoints_grid(ml=ml,sr=sr,ibound=ibound, prefix_dict=prefix_dict, every_n_cell=every_n_cell, use_ibound_zones=use_ibound_zones, pp_dir=pp_dir,tpl_dir=tpl_dir, shapename=shapename)
setup regularly-spaced (gridded) pilot point parameterization Parameters ---------- ml : flopy.mbase a flopy mbase dervied type. If None, sr must not be None. sr : flopy.utils.reference.SpatialReference a spatial reference use to locate the model grid in space. If None, ml must not be None. Default is None ibound : numpy.ndarray the modflow ibound integer array. Used to set pilot points only in active areas. If None and ml is None, then pilot points are set in all rows and columns according to every_n_cell. Default is None. prefix_dict : dict a dictionary of pilot point parameter prefix, layer pairs. example : {"hk":[0,1,2,3]} would setup pilot points with the prefix "hk" for model layers 1 - 4 (zero based). If None, a generic set of pilot points with the "pp" prefix are setup for a generic nrowXncol grid. Default is None use_ibound_zones : bool a flag to use the greater-than-zero values in the ibound as pilot point zones. If False,ibound values greater than zero are treated as a single zone. Default is False. pp_dir : str directory to write pilot point files to. Default is '.' tpl_dir : str directory to write pilot point template file to. Default is '.' shapename : str name of shapefile to write that containts pilot point information. Default is "pp.shp" Returns ------- pp_df : pandas.DataFrame a dataframe summarizing pilot point information (same information written to shapename
28,878
def job_count_enabled(self): enabled = 0 for job_desc in self._jobs.values(): if job_desc[]: enabled += 1 return enabled
Return the number of enabled jobs. :return: The number of jobs that are enabled. :rtype: int
28,879
def upload(self, remote, reader): fd = self.open(remote, ) while True: chunk = reader.read(512 * 1024) if chunk == b: break self.write(fd, chunk) self.close(fd)
Uploads a file :param remote: remote file name :param reader: an object that implements the read(size) method (typically a file descriptor) :return:
28,880
def restore_all_edges(self): for edge in self.hidden_edges.keys(): try: self.restore_edge(edge) except GraphError: pass
Restores all hidden edges.
28,881
def nodata(self): if self._nodata is None: self._nodata = self[0].GetNoDataValue() return self._nodata
Returns read only property for band nodata value, assuming single band rasters for now.
28,882
def msg_filter(self): if self.query: msg_filter = None else: msg_filter = self._request_request(, ) available_msg_filters = {, , , } if msg_filter not in available_msg_filters: msg_filter = return msg_filter
Validate/return msg_filter from request (e.g. 'fuzzy', 'untranslated'), or a default. If a query is also specified in the request, then return None.
28,883
def spec_from_thresh(thresh, labels, scores, *args, **kwargs): r df = pd.DataFrame(list(zip(labels, np.array(scores > thresh).astype(int)))) c = Confusion(df, *args, **kwargs) return c._binary_specificity
r"""Compute the specifity that a particular threshold on the scores can acheive specificity = Num_True_Negative / (Num_True_Negative + Num_False_Positive) >>> scores = np.arange(0, 1, 0.1) >>> spec_from_thresh(0.5, labels=(scores > .5).astype(int), scores=scores) 1.0 >>> spec_from_thresh(0.5, labels=(scores > .4).astype(int), scores=scores) 1.0 >>> spec_from_thresh(0.5, labels=(scores > .9).astype(int), scores=scores) 0.6
28,884
def put(self, bucket=None, key=None, upload_id=None): if upload_id is not None: return self.multipart_uploadpart(bucket, key, upload_id) else: return self.create_object(bucket, key)
Update a new object or upload a part of a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response.
28,885
def ipv4_reassembly(packet, *, count=NotImplemented): if in packet: ipv4 = packet[] if ipv4.flags.DF: return False, None data = dict( bufid=( ipaddress.ip_address(ipv4.src), ipaddress.ip_address(ipv4.dst), ipv4.id, TP_PROTO.get(ipv4.proto).name, ), num=count, fo=ipv4.frag, ihl=ipv4.ihl, mf=bool(ipv4.flags.MF), tl=ipv4.len, header=bytearray(ipv4.raw_packet_cache), payload=bytearray(bytes(ipv4.payload)), ) return True, data return False, None
Make data for IPv4 reassembly.
28,886
def find_pore_to_pore_distance(network, pores1=None, pores2=None): rs OK if these indices are partially or completely duplicating ``pores``. Returns ------- A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns. The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is located at *(i, j)* and *(j, i)* in the distance matrix. pore.coords'] return cdist(coords[p1], coords[p2])
r''' Find the distance between all pores on set one to each pore in set 2 Parameters ---------- network : OpenPNM Network Object The network object containing the pore coordinates pores1 : array_like The pore indices of the first set pores2 : array_Like The pore indices of the second set. It's OK if these indices are partially or completely duplicating ``pores``. Returns ------- A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns. The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is located at *(i, j)* and *(j, i)* in the distance matrix.
28,887
def _convert_bundle(bundle): meta = bundle.dataset.config.metadata notes = for f in bundle.dataset.files: if f.path.endswith(): contents = f.unpacked_contents if isinstance(contents, six.binary_type): contents = contents.decode() notes = json.dumps(contents) break ret = { : bundle.dataset.vid.lower(), : meta.about.title, : meta.contacts.wrangler.name, : meta.contacts.wrangler.email, : meta.contacts.maintainer.name, : meta.contacts.maintainer.email, : , : notes, : meta.identity.source, : bundle.dataset.version, : , : CKAN_CONFIG[], } return ret
Converts ambry bundle to dict ready to send to CKAN API. Args: bundle (ambry.bundle.Bundle): bundle to convert. Returns: dict: dict to send to CKAN to create dataset. See http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.package_create
28,888
def save_session(self, sid, session, namespace=None): namespace = namespace or eio_session = self.eio.get_session(sid) eio_session[namespace] = session
Store the user session for a client. :param sid: The session id of the client. :param session: The session dictionary. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used.
28,889
def abbreviate(labels, rfill=): max_len = max(len(l) for l in labels) for i in range(1, max_len): abbrev = [l[:i].ljust(i, rfill) for l in labels] if len(abbrev) == len(set(abbrev)): break return abbrev
Abbreviate labels without introducing ambiguities.
28,890
def duplicate(self): instance = self.__class__(name=self.name, description=self.description) for line in self.lines: instance.lines.append(line.duplicate()) return instance
Returns a copy of the current group, including its lines. @returns: Group
28,891
def detect_mean_shift(self, ts, B=1000): x = np.arange(0, len(ts)) stat_ts_func = self.compute_balance_mean_ts null_ts_func = self.shuffle_timeseries stats_ts, pvals, nums = self.get_ts_stats_significance(x, ts, stat_ts_func, null_ts_func, B=B, permute_fast=True) return stats_ts, pvals, nums
Detect mean shift in a time series. B is number of bootstrapped samples to draw.
28,892
def maintainer(self): maintainer = namedtuple(, ) return maintainer(name=self._package[], email=self._package[])
>>> package = yarg.get('yarg') >>> package.maintainer Maintainer(name=u'Kura', email=u'[email protected]')
28,893
def process_climis_livestock_data(data_dir: str): records = [] livestock_data_dir = f"{data_dir}/Climis South Sudan Livestock Data" for filename in glob( f"{livestock_data_dir}/Livestock Body Condition/*2017.csv" ): records += process_file_with_single_table( filename, lambda ind: f"Percentage of {filename.split()[-3].lower()} with body condition {ind.lower()}", lambda f: f.split("_")[-2], ) for filename in glob( f"{livestock_data_dir}/Livestock Production/*2017.csv" ): records += process_file_with_single_table( filename, lambda ind: "Percentage of householding at least milking one of their livestocks", lambda f: f.split("_")[1], ) disease_acronym_dict = { "FMD": "Foot and Mouth Disease (FMD)", "LSD": "Lumpy Skin Disease (LSD)", "CBPP": "Contagious Bovine Pleuropneumonia (CBPP)", "CCPP": "Contagious Caprine Pleuropneumonia (CCPP)", "NC": "NC", "PPR": "Peste des Petits Ruminants (PPR)", "Others": "Other diseases", } func = ( lambda k, i: f"Percentage of livestock with {disease_acronym_dict[k]} that are {i.lower().strip()}" ) livestock_disease_header_dict = { k: partial(func, k) for k in disease_acronym_dict } livestock_migration_header_dict = { "Livestock migration": lambda i: f"Percentage of livestock migrating {i.split()[-1].lower()}", "Distance covered": lambda i: "Distance covered by migrating livestock", "Proportion of livestock that migrated": lambda i: "Percentage of livestock that migrated", "Migration normal at this time of the year": lambda i: f"Migration normal at this time of year, {i}", "Duration in months when the migrated animals are expected to be back after": lambda i: "Duration in months when the migrated animals are expected to be back after", "Reasons for livestock migration": lambda i: f"Percentage of livestock migrating due to {i.lower()}", } def process_directory(dirname, header_dict): return pd.concat( [ df for df in [ process_file_with_multiple_tables(f, header_dict) for f in glob(f"{livestock_data_dir}/{dirname}/*2017.csv") ] if df is not None ] ) func2 = ( lambda k, i: f"{k.replace(, i.lower()).replace(, +i.lower()).replace(, i.lower())}" ) livestock_ownership_headers = [ "Average current stock per household", "Average number of animals born per household during last 4 weeks", "Average number of animals acquired per household during last 4 weeks (dowry, purchase, gift)", "Average number of animals given out as bride price/gift per household during last 4 weeks per household", "Average number of animals sold per household during last 4 weeks household", "Average price of animal sold (SSP)", "Average number of animals exchanged for grain per household during last 4 weeks", "Average number of animals died/slaughtered/lost per household during last 4 weeks", ] livestock_ownership_header_dict = { k: partial(func2, k) for k in livestock_ownership_headers } ownership_df = process_directory( "Livestock Ownership", livestock_ownership_header_dict ) disease_df = process_directory( "Livestock Diseases", livestock_disease_header_dict ) livestock_migration_df = process_directory( "Livestock Migration", livestock_migration_header_dict ) livestock_pasture_header_dict = { "Pasture condtion": lambda i: f"Percentage of livestock pasture in {i.lower()} condition", "Pasture condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} condition compared to a similar time in a normal year", "Browse condition": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition", "Browse condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition compared to a similar time in a normal year", "Presence of constraints in accessing forage": lambda i: f"Percentage reporting the {( if i== else )} of constraints in accessing forage", "Main forage constraints": lambda i: f"Percentage reporting {i.lower()} as the main forage constraint", } livestock_pasture_df = process_directory( "Livestock Pasture", livestock_pasture_header_dict ) livestock_water_sources_header_dict = { "Main water sources": lambda i: f"Percentage of livestock whose main water source is {i.lower()}", "Number of days livestock have been watered in the last 7 days": lambda i: f"Number of days {i.lower()} have been watered in the last 7 days", } livestock_water_sources_df = process_directory( "Livestock Water Sources", livestock_water_sources_header_dict ) for filename in glob(f"{livestock_data_dir}/Livestock Loss/*2017.csv"): records += process_file_with_single_table( filename, lambda ind: f"Percentage of {filename.split()[-3].lower()} loss accounted for by {ind.lower()}", lambda f: f.split("_")[-2], ) for record in records: if isinstance(record["Value"], str): record["Value"] = record["Value"].replace("%", "") livestock_prices_df = pd.concat( [ make_livestock_prices_table(f) for f in glob( f"{livestock_data_dir}/Livestock Market Prices/*2017.csv" ) ] ) climis_livestock_data_df = pd.concat( [ pd.DataFrame(records), disease_df, ownership_df, livestock_prices_df, livestock_migration_df, livestock_pasture_df, livestock_water_sources_df, ] ) return climis_livestock_data_df
Process CliMIS livestock data.
28,894
def read(self, filepath): fp = codecs.open(filepath, , encoding=) try: self.read_file(fp) finally: fp.close()
Read the metadata values from a file path.
28,895
def down_alpha_beta(returns, factor_returns, **kwargs): return down(returns, factor_returns, function=alpha_beta_aligned, **kwargs)
Computes alpha and beta for periods when the benchmark return is negative. Parameters ---------- see documentation for `alpha_beta`. Returns ------- alpha : float beta : float
28,896
def add_toc_entry(self, title, level, slide_number): self.__toc.append({: title, : slide_number, : level})
Adds a new entry to current presentation Table of Contents.
28,897
def arp_suppression(self, **kwargs): name = kwargs.pop() enable = kwargs.pop(, True) get = kwargs.pop(, False) callback = kwargs.pop(, self._callback) method_class = self._interface arp_args = dict(name=name) if name: if not pynos.utilities.valid_vlan_id(name): raise InvalidVlanId("`name` must be between `1` and `8191`") arp_suppression = getattr(method_class, ) config = arp_suppression(**arp_args) if get: return callback(config, handler=) if not enable: config.find().set(, ) return callback(config)
Enable Arp Suppression on a Vlan. Args: name:Vlan name on which the Arp suppression needs to be enabled. enable (bool): If arp suppression should be enabled or disabled.Default:``True``. get (bool) : Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `name` is not passed. ValueError: if `name` is invalid. output2 = dev.interface.arp_suppression(name='89') Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.arp_suppression( ... name='89') ... output = dev.interface.arp_suppression( ... get=True,name='89') ... output = dev.interface.arp_suppression( ... enable=False,name='89') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
28,898
def _check_events(tk): used = False try: while True: try: method, args, kwargs, response_queue = tk.tk._event_queue.get_nowait() except queue.Empty: break else: used = True if tk.tk._debug >= 2: print(, method.__name__, args, kwargs) try: response_queue.put((False, method(*args, **kwargs))) except SystemExit: raise except Exception: if used: tk.after_idle(_check_events, tk) else: tk.after(tk.tk._check_period, _check_events, tk)
Checks events in the queue on a given Tk instance
28,899
def raise_thread_exception(thread_id, exception): if current_platform == "CPython": _raise_thread_exception_cpython(thread_id, exception) else: message = "Setting thread exceptions (%s) is not supported for your current platform (%r)." exctype = (exception if inspect.isclass(exception) else type(exception)).__name__ logger.critical(message, exctype, current_platform)
Raise an exception in a thread. Currently, this is only available on CPython. Note: This works by setting an async exception in the thread. This means that the exception will only get called the next time that thread acquires the GIL. Concretely, this means that this middleware can't cancel system calls.