code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def msvc14_get_vc_env(plat_spec): try: return get_unpatched(msvc14_get_vc_env)(plat_spec) except distutils.errors.DistutilsPlatformError: pass try: return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env() except distutils.errors.DistutilsPlatformError as exc: _augment_exception(exc, 14.0) raise
Patched "distutils._msvccompiler._get_vc_env" for support extra compilers. Set environment without use of "vcvarsall.bat". Known supported compilers ------------------------- Microsoft Visual C++ 14.0: Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) Microsoft Visual Studio 2017 (x86, x64, arm, arm64) Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) Parameters ---------- plat_spec: str Target architecture. Return ------ environment: dict
def get_request_message(cls, remote_info): if remote_info in cls.__remote_info_cache: return cls.__remote_info_cache[remote_info] else: return remote_info.request_type()
Gets request message or container from remote info. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. Returns: Either an instance of the request type from the remote or the ResourceContainer that was cached with the remote method.
def recalc_M(S, D_cba, Y, nr_sectors): Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors) Y_inv = np.linalg.inv(Y_diag) M = D_cba.dot(Y_inv) if type(D_cba) is pd.DataFrame: M.columns = D_cba.columns M.index = D_cba.index return M
Calculate Multipliers based on footprints. Parameters ---------- D_cba : pandas.DataFrame or numpy array Footprint per sector and country Y : pandas.DataFrame or numpy array Final demand: aggregated across categories or just one category, one column per country. This will be diagonalized per country block. The diagonolized form must be invertable for this method to work. nr_sectors : int Number of sectors in the MRIO Returns ------- pandas.DataFrame or numpy.array Multipliers M The type is determined by the type of D_cba. If DataFrame index/columns as D_cba
def _read_para_hip_transport_mode(self, code, cbit, clen, *, desc, length, version): if clen % 2 != 0: raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format') _port = self._read_unpack(2) _mdid = list() for _ in range((clen - 2) // 2): _mdid.append(_TP_MODE_ID.get(self._read_unpack(2), 'Unassigned')) hip_transport_mode = dict( type=desc, critical=cbit, length=clen, port=_port, id=tuple(_mdid), ) _plen = length - clen if _plen: self._read_fileng(_plen) return hip_transport_mode
Read HIP HIP_TRANSPORT_MODE parameter. Structure of HIP HIP_TRANSPORT_MODE parameter [RFC 6261]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Port | Mode ID #1 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Mode ID #2 | Mode ID #3 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Mode ID #n | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip_transport_mode.type Parameter Type 1 15 hip_transport_mode.critical Critical Bit 2 16 hip_transport_mode.length Length of Contents 4 32 hip_transport_mode.port Port 6 48 hip_transport_mode.id Mode ID ............ ? ? - Padding
def get_root_objective_ids(self): url_path = construct_url('rootids', bank_id=self._catalog_idstr) id_list = list() for identifier in self._get_request(url_path)['ids']: id_list.append(Id(idstr=identifier)) return id_objects.IdList(id_list)
Gets the root objective Ids in this hierarchy. return: (osid.id.IdList) - the root objective Ids raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented.
def _canonicalize_fraction(cls, non_repeating, repeating): if repeating == []: return (non_repeating, repeating) repeat_len = len(repeating) indices = range(len(non_repeating), -1, -repeat_len) end = next( i for i in indices if non_repeating[(i - repeat_len):i] != repeating ) indices = range(min(repeat_len - 1, end), 0, -1) index = next( (i for i in indices \ if repeating[-i:] == non_repeating[(end-i):end]), 0 ) return ( non_repeating[:(end - index)], repeating[-index:] + repeating[:-index] )
If the same fractional value can be represented by stripping repeating part from ``non_repeating``, do it. :param non_repeating: non repeating part of fraction :type non_repeating: list of int :param repeating: repeating part of fraction :type repeating: list of int :returns: new non_repeating and repeating parts :rtype: tuple of list of int * list of int Complexity: O(len(non_repeating))
def use_plenary_hierarchy_view(self): self._hierarchy_view = PLENARY for session in self._get_provider_sessions(): try: session.use_plenary_hierarchy_view() except AttributeError: pass
Pass through to provider HierarchyLookupSession.use_plenary_hierarchy_view
def _chk_docopt_exit(self, args, exp_letters): if args is None: args = sys.argv[1:] keys_all = self.exp_keys.union(self.exp_elems) if exp_letters: keys_all |= exp_letters unknown_args = self._chk_docunknown(args, keys_all) if unknown_args: raise RuntimeError("{USAGE}\n **FATAL: UNKNOWN ARGS: {UNK}".format( USAGE=self.doc, UNK=" ".join(unknown_args)))
Check if docopt exit was for an unknown argument.
def iterable(self, iterable_name, *, collection, attribute, word, func=None, operation=None): if func is None and operation is None: raise ValueError('Provide a function or an operation to apply') elif func is not None and operation is not None: raise ValueError( 'Provide either a function or an operation but not both') current_att = self._attribute self._attribute = iterable_name word = self._parse_filter_word(word) collection = self._get_mapping(collection) attribute = self._get_mapping(attribute) if func is not None: sentence = self._prepare_function(func, attribute, word) else: sentence = self._prepare_sentence(attribute, operation, word) filter_str, attrs = sentence filter_data = '{}/{}(a:a/{})'.format(collection, iterable_name, filter_str), attrs self._add_filter(*filter_data) self._attribute = current_att return self
Performs a filter with the OData 'iterable_name' keyword on the collection For example: q.iterable('any', collection='email_addresses', attribute='address', operation='eq', word='[email protected]') will transform to a filter such as: emailAddresses/any(a:a/address eq '[email protected]') :param str iterable_name: the OData name of the iterable :param str collection: the collection to apply the any keyword on :param str attribute: the attribute of the collection to check :param str word: the word to check :param str func: the logical function to apply to the attribute inside the collection :param str operation: the logical operation to apply to the attribute inside the collection :rtype: Query
def _plot_filter(filters, n, eigenvalues, sum, title, ax, **kwargs): r if eigenvalues is None: eigenvalues = (filters.G._e is not None) if sum is None: sum = filters.n_filters > 1 if title is None: title = repr(filters) return _plt_plot_filter(filters, n=n, eigenvalues=eigenvalues, sum=sum, title=title, ax=ax, **kwargs)
r"""Plot the spectral response of a filter bank. Parameters ---------- n : int Number of points where the filters are evaluated. eigenvalues : boolean Whether to show the eigenvalues of the graph Laplacian. The eigenvalues should have been computed with :meth:`~pygsp.graphs.Graph.compute_fourier_basis`. By default, the eigenvalues are shown if they are available. sum : boolean Whether to plot the sum of the squared magnitudes of the filters. Default True if there is multiple filters. title : str Title of the figure. ax : :class:`matplotlib.axes.Axes` Axes where to draw the graph. Optional, created if not passed. Only available with the matplotlib backend. kwargs : dict Additional parameters passed to the matplotlib plot function. Useful for example to change the linewidth, linestyle, or set a label. Only available with the matplotlib backend. Returns ------- fig : :class:`matplotlib.figure.Figure` The figure the plot belongs to. Only with the matplotlib backend. ax : :class:`matplotlib.axes.Axes` The axes the plot belongs to. Only with the matplotlib backend. Notes ----- This function is only implemented for the matplotlib backend at the moment. Examples -------- >>> import matplotlib >>> G = graphs.Logo() >>> mh = filters.MexicanHat(G) >>> fig, ax = mh.plot()
def _restore(self, builder): builder.with_trashed() return builder.update({builder.get_model().get_deleted_at_column(): None})
The restore extension. :param builder: The query builder :type builder: orator.orm.builder.Builder
def convert_numeric_id_to_id36(numeric_id): if not isinstance(numeric_id, six.integer_types) or numeric_id < 0: raise ValueError("must supply a positive int/long") alphabet = '0123456789abcdefghijklmnopqrstuvwxyz' alphabet_len = len(alphabet) current_number = numeric_id base36 = [] if 0 <= current_number < alphabet_len: return alphabet[current_number] while current_number != 0: current_number, rem = divmod(current_number, alphabet_len) base36.append(alphabet[rem]) return ''.join(reversed(base36))
Convert an integer into its base36 string representation. This method has been cleaned up slightly to improve readability. For more info see: https://github.com/reddit/reddit/blob/master/r2/r2/lib/utils/_utils.pyx https://www.reddit.com/r/redditdev/comments/n624n/submission_ids_question/ https://en.wikipedia.org/wiki/Base36
def update_ledger(self, ledger_id, description=None): arguments = {'description': description} return self.do_req('PUT', self.merchant_api_base_url + '/ledger/' + ledger_id + '/', arguments)
Update ledger info Arguments: ledger_id: Ledger id assigned by mCASH description: Description of the Ledger and it's usage
def _remove_ordered_from_queue(self, last_caught_up_3PC=None): to_remove = [] for i, msg in enumerate(self.outBox): if isinstance(msg, Ordered) and \ (not last_caught_up_3PC or compare_3PC_keys((msg.viewNo, msg.ppSeqNo), last_caught_up_3PC) >= 0): to_remove.append(i) self.logger.trace('{} going to remove {} Ordered messages from outbox'.format(self, len(to_remove))) removed = [] for i in reversed(to_remove): removed.insert(0, self.outBox[i]) del self.outBox[i] return removed
Remove any Ordered that the replica might be sending to node which is less than or equal to `last_caught_up_3PC` if `last_caught_up_3PC` is passed else remove all ordered, needed in catchup
def create_hierarchy(self, *args, **kwargs): return Hierarchy( self._provider_manager, self._get_provider_session('hierarchy_admin_session').create_hierarchy(*args, **kwargs), self._runtime, self._proxy)
Pass through to provider HierarchyAdminSession.create_hierarchy
def eval(self, iteration, feval): return self.bst.eval_set(self.watchlist, iteration, feval)
Evaluate the CVPack for one iteration.
def ParseFromUnicode(self, value): precondition.AssertType(value, Text) value = value.strip() super(ClientURN, self).ParseFromUnicode(value) match = self.CLIENT_ID_RE.match(self._string_urn) if not match: raise type_info.TypeValueError("Client urn malformed: %s" % value) clientid = match.group("clientid") clientid_correctcase = "".join((clientid[0].upper(), clientid[1:].lower())) self._string_urn = self._string_urn.replace(clientid, clientid_correctcase, 1)
Parse a string into a client URN. Convert case so that all URNs are of the form C.[0-9a-f]. Args: value: string value to parse
def depends (self, d): self.dependencies_ = unique (self.dependencies_ + d).sort ()
Adds additional instances of 'VirtualTarget' that this one depends on.
def get_min_distance(self, mesh): dists = [surf.get_min_distance(mesh) for surf in self.surfaces] return numpy.min(dists, axis=0)
For each point in ``mesh`` compute the minimum distance to each surface element and return the smallest value. See :meth:`superclass method <.base.BaseSurface.get_min_distance>` for spec of input and result values.
def set_model(self, model): self._model = model self._query.from_(model.get_table()) return self
Set a model instance for the model being queried. :param model: The model instance :type model: orator.orm.Model :return: The current Builder instance :rtype: Builder
def delete_policy(name, policy_name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not exists(name, region, key, keyid, profile): return True try: conn.delete_lb_policy(name, policy_name) log.info('Deleted policy %s on ELB %s', policy_name, name) return True except boto.exception.BotoServerError as e: log.error('Failed to delete policy %s on ELB %s: %s', policy_name, name, e.message, exc_info_on_loglevel=logging.DEBUG) return False
Delete an ELB policy. .. versionadded:: 2016.3.0 CLI example: .. code-block:: bash salt myminion boto_elb.delete_policy myelb mypolicy
def get_layout(self, name): layout_chain = [] try: json_data = self.json_files[self.layout_names[name]] except KeyError: log.error('Could not find layout: %s', name) log.error('Layouts path: %s', self.layout_path) raise layout_chain.append(Layout(name, json_data)) parent = layout_chain[-1].parent() while parent is not None: parent_path = None for path in self.json_file_paths: if os.path.normcase(os.path.normpath(parent)) in os.path.normcase(path): parent_path = path if parent_path is None: raise UnknownLayoutPathException('Could not find: {}'.format(parent_path)) json_data = self.json_files[parent_path] layout_chain.append(Layout(parent_path, json_data)) parent = layout_chain[-1].parent() layout = self.squash_layouts(layout_chain) return layout
Returns the layout with the given name
def use_active_composition_view(self): self._operable_views['composition'] = ACTIVE for session in self._get_provider_sessions(): try: session.use_active_composition_view() except AttributeError: pass
Pass through to provider CompositionLookupSession.use_active_composition_view
def get_rendering_cache_key(placeholder_name, contentitem): if not contentitem.pk: return None return "contentitem.@{0}.{1}.{2}".format( placeholder_name, contentitem.plugin.type_name, contentitem.pk, )
Return a cache key for the content item output. .. seealso:: The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function can be used to remove the cache keys of a retrieved object.
def strip_path_prefix(ipath, prefix): if prefix is None: return ipath return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar'
def startswith_field(field, prefix): if prefix.startswith("."): return True if field.startswith(prefix): if len(field) == len(prefix) or field[len(prefix)] == ".": return True return False
RETURN True IF field PATH STRING STARTS WITH prefix PATH STRING
def v_type_extension(ctx, stmt): (modulename, identifier) = stmt.keyword revision = stmt.i_extension_revision module = modulename_to_module(stmt.i_module, modulename, revision) if module is None: return if identifier not in module.i_extensions: if module.i_modulename == stmt.i_orig_module.i_modulename: if identifier not in stmt.i_orig_module.i_extensions: err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED', (identifier, module.arg)) return else: stmt.i_extension = stmt.i_orig_module.i_extensions[identifier] else: err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED', (identifier, module.arg)) return else: stmt.i_extension = module.i_extensions[identifier] ext_arg = stmt.i_extension.search_one('argument') if stmt.arg is not None and ext_arg is None: err_add(ctx.errors, stmt.pos, 'EXTENSION_ARGUMENT_PRESENT', identifier) elif stmt.arg is None and ext_arg is not None: err_add(ctx.errors, stmt.pos, 'EXTENSION_NO_ARGUMENT_PRESENT', identifier)
verify that the extension matches the extension definition
def content(self, output=None, str_output=None, **kwargs): if self.response.mimetype != "text/html": raise Failure(_("expected request to return HTML, but it returned {}").format( self.response.mimetype)) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) content = BeautifulSoup(self.response.data, "html.parser") return self._search_page( output, str_output, content, lambda regex, content: any(regex.search(str(tag)) for tag in content.find_all(**kwargs)))
Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags.
def create(handler, item_document): data = {'operation': 'create', 'item': json.load(item_document)} handler.invoke(data)
Create a new item from a JSON document
def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'): return self._call_method('set_vm_status', device, boot_option, write_protect)
Sets the Virtual Media drive status and allows the boot options for booting from the virtual media.
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken, knowledge_base): _ = stderr, time_taken, args, knowledge_base self.CheckReturn(cmd, return_val) plist = biplist.readPlist(io.BytesIO(stdout)) if len(plist) > 1: raise parser.ParseError("SPHardwareDataType plist has too many items.") hardware_list = plist[0]["_items"][0] serial_number = hardware_list.get("serial_number", None) system_product_name = hardware_list.get("machine_model", None) bios_version = hardware_list.get("boot_rom_version", None) yield rdf_client.HardwareInfo( serial_number=serial_number, bios_version=bios_version, system_product_name=system_product_name)
Parse the system profiler output. We get it in the form of a plist.
def inserir(self, id_brand, name): model_map = dict() model_map['name'] = name model_map['id_brand'] = id_brand code, xml = self.submit({'model': model_map}, 'POST', 'model/') return self.response(code, xml)
Inserts a new Model and returns its identifier :param id_brand: Identifier of the Brand. Integer value and greater than zero. :param name: Model name. String with a minimum 3 and maximum of 100 characters :return: Dictionary with the following structure: :: {'model': {'id': < id_model >}} :raise InvalidParameterError: The identifier of Brand or name is null and invalid. :raise NomeMarcaModeloDuplicadoError: There is already a registered Model with the value of name and brand. :raise MarcaNaoExisteError: Brand not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response
def coverage_interval_from_bed(bed_file, per_chrom=True): total_starts = {} total_ends = {} bed_bases = collections.defaultdict(int) with utils.open_gzipsafe(bed_file) as in_handle: for line in in_handle: parts = line.split() if len(parts) >= 3: chrom, start, end = parts[:3] if chromhacks.is_autosomal(chrom): start = int(start) end = int(end) bed_bases[chrom] += (end - start) total_starts[chrom] = min([start, total_starts.get(chrom, sys.maxsize)]) total_ends[chrom] = max([end, total_ends.get(chrom, 0)]) if per_chrom: freqs = [float(bed_bases[c]) / float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())] elif len(bed_bases) > 0: freqs = [sum([bed_bases[c] for c in sorted(bed_bases.keys())]) / sum([float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())])] else: freqs = [] if any([f >= 0.40 for f in freqs]): return "genome" else: return "targeted"
Calculate a coverage interval for the current region BED. This helps correctly work with cases of uneven coverage across an analysis genome. strelka2 and other model based callers have flags for targeted and non which depend on the local context. Checks coverage per chromosome, avoiding non-standard chromosomes, if per_chrom is set. Otherwise does a global check over all regions. The global check performs better for strelka2 but not for DeepVariant: https://github.com/bcbio/bcbio_validations/tree/master/deepvariant#deepvariant-v06-release-strelka2-stratification-and-initial-gatk-cnn
def download_shared_files(job, config): job.fileStore.logToMaster('Downloading shared reference files') shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'} nonessential_files = {'genome_fai', 'genome_dict'} if config.run_bwa: shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'} nonessential_files.add('alt') if config.preprocess: shared_files |= {'g1k_indel', 'mills', 'dbsnp'} if config.run_vqsr: shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} if config.run_oncotator: shared_files.add('oncotator_db') for name in shared_files: try: url = getattr(config, name, None) if url is None: continue setattr(config, name, job.addChildJobFn(download_url_job, url, name=name, s3_key_path=config.ssec, disk='15G' ).rv()) finally: if getattr(config, name, None) is None and name not in nonessential_files: raise ValueError("Necessary configuration parameter is missing:\n{}".format(name)) return job.addFollowOnJobFn(reference_preprocessing, config).rv()
Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace
def clean_docstring(docstring): docstring = docstring.strip() if '\n' in docstring: if docstring[0].isspace(): return textwrap.dedent(docstring) else: first, _, rest = docstring.partition('\n') return first + '\n' + textwrap.dedent(rest) return docstring
Dedent docstring, special casing the first line.
def _get_next_assessment_section(self, assessment_section_id): if self._my_map['sections'][-1] == str(assessment_section_id): section = self._get_assessment_section(assessment_section_id) next_part_id, level = get_next_part_id(section._assessment_part_id, runtime=self._runtime, proxy=self._proxy, sequestered=True) next_section = self._create_section(next_part_id) self._my_map['sections'].append(str(next_section.get_id())) self._save() return next_section else: return self._get_assessment_section( Id(self._my_map['sections'][self._my_map['sections'].index(str(assessment_section_id)) + 1]))
Gets the next section following section_id. Assumes that section list exists in taken and section_id is in section list. Assumes that Section parts only exist as children of Assessments
def _remove_none_values(dictionary): return list(map(dictionary.pop, [i for i in dictionary if dictionary[i] is None]))
Remove dictionary keys whose value is None
def periodic_callback(self): if self.stopped: return if not self.scanning and len(self._connections) == 0 and self.connecting_count == 0: self._logger.info("Restarting scan for devices") self.start_scan(self._active_scan) self._logger.info("Finished restarting scan for devices")
Periodic cleanup tasks to maintain this adapter, should be called every second
def daemon_connection_init(self, s_link, set_wait_new_conf=False): logger.debug("Daemon connection initialization: %s %s", s_link.type, s_link.name) if not s_link.active: logger.warning("%s '%s' is not active, do not initialize its connection!", s_link.type, s_link.name) return False s_link.create_connection() logger.debug("[%s] Getting running identifier for '%s'", self.name, s_link.name) s_link.alive = True s_link.reachable = True got_a_running_id = None for _ in range(0, s_link.max_check_attempts): got_a_running_id = s_link.get_running_id() if got_a_running_id: s_link.last_connection = time.time() if set_wait_new_conf: s_link.wait_new_conf() break time.sleep(0.3) return got_a_running_id
Initialize a connection with the daemon for the provided satellite link Initialize the connection (HTTP client) to the daemon and get its running identifier. Returns True if it succeeds else if any error occur or the daemon is inactive it returns False. Assume the daemon should be reachable because we are initializing the connection... as such, force set the link reachable property If set_wait_new_conf is set, the daemon is requested to wait a new configuration if we get a running identifier. This is used by the arbiter when a new configuration must be dispatched NB: if the daemon is configured as passive, or if it is a daemon link that is inactive then it returns False without trying a connection. :param s_link: link of the daemon to connect to :type s_link: SatelliteLink :param set_wait_new_conf: if the daemon must got the wait new configuration state :type set_wait_new_conf: bool :return: True if the connection is established, else False
def get_rm_conf(self): if not os.path.isfile(self.remove_file): return None parsedconfig = ConfigParser.RawConfigParser() parsedconfig.read(self.remove_file) rm_conf = {} for item, value in parsedconfig.items('remove'): if six.PY3: rm_conf[item] = value.strip().encode('utf-8').decode('unicode-escape').split(',') else: rm_conf[item] = value.strip().decode('string-escape').split(',') return rm_conf
Get excluded files config from remove_file.
def set_menu(self, menu): self.menu = menu wx_menu = menu.wx_menu() self.frame.SetMenuBar(wx_menu) self.frame.Bind(wx.EVT_MENU, self.on_menu)
add a menu from the parent
def stop(self, timeout=5): for worker in self._threads: self._queue.put(_SHUTDOWNREQUEST) current = threading.currentThread() if timeout is not None and timeout >= 0: endtime = time.time() + timeout while self._threads: worker = self._threads.pop() if worker is not current and worker.isAlive(): try: if timeout is None or timeout < 0: worker.join() else: remaining_time = endtime - time.time() if remaining_time > 0: worker.join(remaining_time) if worker.isAlive(): c = worker.conn if c and not c.rfile.closed: try: c.socket.shutdown(socket.SHUT_RD) except TypeError: c.socket.shutdown() worker.join() except ( AssertionError, KeyboardInterrupt, ): pass
Terminate all worker threads. Args: timeout (int): time to wait for threads to stop gracefully
def run_simulations(self, param_list, show_progress=True): if self.runner is None: raise Exception("No runner was ever specified" " for this CampaignManager.") if param_list == []: return desired_params = self.db.get_params() for p in param_list: passed = list(p.keys()) available = ['RngRun'] + desired_params if set(passed) != set(available): raise ValueError("Specified parameter combination does not " "match the supported parameters:\n" "Passed: %s\nSupported: %s" % (sorted(passed), sorted(available))) if self.check_repo: self.check_repo_ok() self.runner.configure_and_build(skip_configuration=True) shuffle(param_list) results = self.runner.run_simulations(param_list, self.db.get_data_dir()) if show_progress: result_generator = tqdm(results, total=len(param_list), unit='simulation', desc='Running simulations') else: result_generator = results for result in result_generator: self.db.insert_result(result)
Run several simulations specified by a list of parameter combinations. Note: this function does not verify whether we already have the required simulations in the database - it just runs all the parameter combinations that are specified in the list. Args: param_list (list): list of parameter combinations to execute. Items of this list are dictionaries, with one key for each parameter, and a value specifying the parameter value (which can be either a string or a number). show_progress (bool): whether or not to show a progress bar with percentage and expected remaining time.
def run_friedman_smooth(x, y, span): N = len(x) weight = numpy.ones(N) results = numpy.zeros(N) residuals = numpy.zeros(N) mace.smooth(x, y, weight, span, 1, 1e-7, results, residuals) return results, residuals
Run the FORTRAN smoother.
def bipartition_indices(N): result = [] if N <= 0: return result for i in range(2**(N - 1)): part = [[], []] for n in range(N): bit = (i >> n) & 1 part[bit].append(n) result.append((tuple(part[1]), tuple(part[0]))) return result
Return indices for undirected bipartitions of a sequence. Args: N (int): The length of the sequence. Returns: list: A list of tuples containing the indices for each of the two parts. Example: >>> N = 3 >>> bipartition_indices(N) [((), (0, 1, 2)), ((0,), (1, 2)), ((1,), (0, 2)), ((0, 1), (2,))]
def get_roles(username): info = sendline('show user-account {0}'.format(username)) roles = re.search(r'^\s*roles:(.*)$', info, re.MULTILINE) if roles: roles = roles.group(1).strip().split(' ') else: roles = [] return roles
Get roles that the username is assigned from switch .. code-block: bash salt '*' onyx.cmd get_roles username=admin
def seat_slot(self): if self.type == EventType.TOUCH_FRAME: raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_touch_get_seat_slot(self._handle)
The seat slot of the touch event. A seat slot is a non-negative seat wide unique identifier of an active touch point. Events from single touch devices will be represented as one individual touch point per device. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_UP`, :attr:`~libinput.constant.EventType.TOUCH_MOTION` or :attr:`~libinput.constant.EventType.TOUCH_CANCEL`, this property raises :exc:`AssertionError`. Returns: int: The seat slot of the touch event. Raises: AssertionError
def put(self, url: StrOrURL, *, data: Any=None, **kwargs: Any) -> '_RequestContextManager': return _RequestContextManager( self._request(hdrs.METH_PUT, url, data=data, **kwargs))
Perform HTTP PUT request.
def stop(ctx, description, f): description = ' '.join(description) try: timesheet_collection = get_timesheet_collection_for_context(ctx, f) current_timesheet = timesheet_collection.latest() current_timesheet.continue_entry( datetime.date.today(), datetime.datetime.now().time(), description ) except ParseError as e: ctx.obj['view'].err(e) except NoActivityInProgressError as e: ctx.obj['view'].err(e) except StopInThePastError as e: ctx.obj['view'].err(e) else: current_timesheet.save()
Use it when you stop working on the current task. You can add a description to what you've done.
def feed(self, data, o: int = 0, l: int = -1): if l == -1: l = len(data) - o if o < 0 or l < 0: raise ValueError("negative input") if o + l > len(data): raise ValueError("input is larger than buffer size") self._parser.buf.extend(data[o:o+l])
Feed data to parser.
def stdin_readable(): if not WINDOWS: try: return bool(select([sys.stdin], [], [], 0)[0]) except Exception: logger.log_exc() try: return not sys.stdin.isatty() except Exception: logger.log_exc() return False
Determine whether stdin has any data to read.
def to_array(self): array = super(PassportFile, self).to_array() array['file_id'] = u(self.file_id) array['file_size'] = int(self.file_size) array['file_date'] = int(self.file_date) return array
Serializes this PassportFile to a dictionary. :return: dictionary representation of this object. :rtype: dict
def get_field_schema(name, field): field_schema = { 'type': 'string', } if field.label: field_schema['title'] = str(field.label) if field.help_text: field_schema['description'] = str(field.help_text) if isinstance(field, (fields.URLField, fields.FileField)): field_schema['format'] = 'uri' elif isinstance(field, fields.EmailField): field_schema['format'] = 'email' elif isinstance(field, fields.DateTimeField): field_schema['format'] = 'date-time' elif isinstance(field, fields.DateField): field_schema['format'] = 'date' elif isinstance(field, (fields.DecimalField, fields.FloatField)): field_schema['type'] = 'number' elif isinstance(field, fields.IntegerField): field_schema['type'] = 'integer' elif isinstance(field, fields.NullBooleanField): field_schema['type'] = 'boolean' elif isinstance(field.widget, widgets.CheckboxInput): field_schema['type'] = 'boolean' if getattr(field, 'choices', []): field_schema['enum'] = sorted([choice[0] for choice in field.choices]) if isinstance(field.widget, (widgets.Select, widgets.ChoiceWidget)): if field.widget.allow_multiple_selected: field_schema['items'] = { 'type': field_schema['type'], } if 'enum' in field_schema: field_schema['items']['enum'] = field_schema.pop('enum') field_schema['type'] = 'array' return field_schema
Returns a JSON Schema representation of a form field.
def _maybe_create_new_template(self): ii = 0 while ii < len(self._possible_formats): number_format = self._possible_formats[ii] pattern = number_format.pattern if self._current_formatting_pattern == pattern: return False if self._create_formatting_template(number_format): self._current_formatting_pattern = pattern if number_format.national_prefix_formatting_rule is None: self._should_add_space_after_national_prefix = False else: self._should_add_space_after_national_prefix = bool(_NATIONAL_PREFIX_SEPARATORS_PATTERN.search(number_format.national_prefix_formatting_rule)) self._last_match_position = 0 return True else: del self._possible_formats[ii] ii -= 1 ii += 1 self._able_to_format = False return False
Returns True if a new template is created as opposed to reusing the existing template. When there are multiple available formats, the formatter uses the first format where a formatting template could be created.
def stop(self): self._hw_virtualization = False yield from self._stop_remote_console() yield from self._stop_ubridge() try: if (yield from self.is_running()): if self.acpi_shutdown: yield from self._control_vm("stop", "soft") else: yield from self._control_vm("stop") finally: self._started = False self.status = "stopped" self._read_vmx_file() self._vmnets.clear() for adapter_number in range(0, self._adapters): vnet = "ethernet{}.vnet".format(adapter_number) if self._get_vmx_setting(vnet) or self._get_vmx_setting("ethernet{}.connectiontype".format(adapter_number)) is None: if vnet in self._vmx_pairs: vmnet = os.path.basename(self._vmx_pairs[vnet]) if not self.manager.is_managed_vmnet(vmnet): continue log.debug("removing adapter {}".format(adapter_number)) self._vmx_pairs[vnet] = "vmnet1" self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom" for adapter_number in range(self._adapters, self._maximum_adapters): if self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"): log.debug("enabling remaining adapter {}".format(adapter_number)) self._vmx_pairs["ethernet{}.startconnected".format(adapter_number)] = "TRUE" self._write_vmx_file() yield from super().stop() log.info("VMware VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
Stops this VMware VM.
def eval_now(self, code): result = eval(self.reformat(code)) if result is None or isinstance(result, (bool, int, float, complex)): return repr(result) elif isinstance(result, bytes): return "b" + self.wrap_str_of(result) elif isinstance(result, str): return self.wrap_str_of(result) else: return None
Reformat and evaluate a code snippet and return code for the result.
def get_plugin_class(self, plugin_type, plugin_name): plugin = self._get_plugin_type(plugin_type) return plugin.get_plugin_class(plugin_name)
Return the class registered under the given plugin name.
def key_from_file(filename, passphrase): hexdigest = sha256_file(filename) if passphrase is None: passphrase = DEFAULT_HMAC_PASSPHRASE return keyed_hash(hexdigest, passphrase)
Calculate convergent encryption key. This takes a filename and an optional passphrase. If no passphrase is given, a default is used. Using the default passphrase means you will be vulnerable to confirmation attacks and learn-partial-information attacks. :param filename: The filename you want to create a key for. :type filename: str :param passphrase: The passphrase you want to use to encrypt the file. :type passphrase: str or None :returns: A convergent encryption key. :rtype: str
def set_position(self, key, latlon, layer=None, rotation=0): self.object_queue.put(SlipPosition(key, latlon, layer, rotation))
move an object on the map
def swo_flush(self, num_bytes=None): if num_bytes is None: num_bytes = self.swo_num_bytes() buf = ctypes.c_uint32(num_bytes) res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.FLUSH, ctypes.byref(buf)) if res < 0: raise errors.JLinkException(res) return None
Flushes data from the SWO buffer. After this method is called, the flushed part of the SWO buffer is empty. If ``num_bytes`` is not present, flushes all data currently in the SWO buffer. Args: self (JLink): the ``JLink`` instance num_bytes (int): the number of bytes to flush Returns: ``None`` Raises: JLinkException: on error
def last_commit(): try: root = subprocess.check_output( ['hg', 'parent', '--template={node}'], stderr=subprocess.STDOUT).strip() return root.decode('utf-8') except subprocess.CalledProcessError: return None
Returns the SHA1 of the last commit.
def close(self): if not self.is_open: return super(IndexCreator, self).close() self.fidx.close()
Closes the record and index files.
def organizations(self, organization, include=None): return self._query_zendesk(self.endpoint.organizations, 'ticket', id=organization, include=include)
Retrieve the tickets for this organization. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param organization: Organization object or id
def write_playlist_file(self, localdir): path = "{0}/playlists".format(localdir) if not os.path.exists(path): os.makedirs(path) filepath = "{0}/{1}".format(path, self.gen_filename()) playlist = open(filepath, "w") for track in self.get_tracks(): playlist.write("{0}/{1}.mp3\n".format( os.path.abspath(track.gen_localdir(localdir)), track.gen_filename())) playlist.close()
Check if playlist exists in local directory.
def _ParseFSMState(self, template): if not template: return state_name = '' for line in template: self._line_num += 1 line = line.rstrip() if line and not self.comment_regex.match(line): if (not self.state_name_re.match(line) or len(line) > self.MAX_NAME_LEN or line in TextFSMRule.LINE_OP or line in TextFSMRule.RECORD_OP): raise TextFSMTemplateError("Invalid state name: '%s'. Line: %s" % (line, self._line_num)) state_name = line if state_name in self.states: raise TextFSMTemplateError("Duplicate state name: '%s'. Line: %s" % (line, self._line_num)) self.states[state_name] = [] self.state_list.append(state_name) break for line in template: self._line_num += 1 line = line.rstrip() if not line: break if self.comment_regex.match(line): continue if not (line.startswith(' ^') or line.startswith('\t^')): raise TextFSMTemplateError( "Missing white space or carat ('^') before rule. Line: %s" % self._line_num) self.states[state_name].append( TextFSMRule(line, self._line_num, self.value_map)) return state_name
Extracts State and associated Rules from body of template file. After the Value definitions the remainder of the template is state definitions. The routine is expected to be called iteratively until no more states remain - indicated by returning None. The routine checks that the state names are a well formed string, do not clash with reserved names and are unique. Args: template: Valid template file after Value definitions have already been read. Returns: Name of the state parsed from file. None otherwise. Raises: TextFSMTemplateError: If any state definitions are invalid.
def nltk_stemmer(stemmer, token, i=None, tokens=None): def wrapped_stem(token, metadata=None): return stemmer.stem(token) return token.update(wrapped_stem)
Wrapper around a NLTK SnowballStemmer, which includes stop words for each language. Args: stemmer (SnowballStemmer): Stemmer instance that performs the stemming. token (lunr.Token): The token to stem. i (int): The index of the token in a set. tokens (list): A list of tokens representing the set.
def get_summary_metrics(self, align_metrics, dup_metrics, insert_metrics=None, hybrid_metrics=None, vrn_vals=None, rnaseq_metrics=None): with open(align_metrics) as in_handle: align_vals = self._parse_align_metrics(in_handle) if dup_metrics: with open(dup_metrics) as in_handle: dup_vals = self._parse_dup_metrics(in_handle) else: dup_vals = {} (insert_vals, hybrid_vals, rnaseq_vals) = (None, None, None) if insert_metrics and file_exists(insert_metrics): with open(insert_metrics) as in_handle: insert_vals = self._parse_insert_metrics(in_handle) if hybrid_metrics and file_exists(hybrid_metrics): with open(hybrid_metrics) as in_handle: hybrid_vals = self._parse_hybrid_metrics(in_handle) if rnaseq_metrics and file_exists(rnaseq_metrics): with open(rnaseq_metrics) as in_handle: rnaseq_vals = self._parse_rnaseq_metrics(in_handle) return self._tabularize_metrics(align_vals, dup_vals, insert_vals, hybrid_vals, vrn_vals, rnaseq_vals)
Retrieve a high level summary of interesting metrics.
def increment_error(self, error: Exception): _logger.debug('Increment error %s', error) for error_class in ERROR_PRIORITIES: if isinstance(error, error_class): self.errors[error_class] += 1 return self.errors[type(error)] += 1
Increment the error counter preferring base exceptions.
def _post_tags(self, fileobj): page = OggPage.find_last(fileobj, self.serial, finishing=True) if page is None: raise OggVorbisHeaderError self.length = page.position / float(self.sample_rate)
Raises ogg.error
def _check_item_type(item, field_name, allowed_types, expect_list=False, required_channels='all'): if expect_list: if not isinstance(item, list): raise TypeError('Field `%s` must be a list.' % field_name) if required_channels == 'all': required_channels = list(range(len(item))) for ch in range(len(item)): if ch in required_channels: allowed_types_ch = allowed_types else: allowed_types_ch = allowed_types + (type(None),) if not isinstance(item[ch], allowed_types_ch): raise TypeError('Channel %d of field `%s` must be one of the following types:' % (ch, field_name), allowed_types_ch) else: if not isinstance(item, allowed_types): raise TypeError('Field `%s` must be one of the following types:', allowed_types)
Check the item's type against a set of allowed types. Vary the print message regarding whether the item can be None. Helper to `BaseRecord.check_field`. Parameters ---------- item : any The item to check. field_name : str The field name. allowed_types : iterable Iterable of types the item is allowed to be. expect_list : bool, optional Whether the item is expected to be a list. required_channels : list, optional List of integers specifying which channels of the item must be present. May be set to 'all' to indicate all channels. Only used if `expect_list` is True, ie. item is a list, and its subelements are to be checked. Notes ----- This is called by `check_field`, which determines whether the item should be a list or not. This function should generally not be called by the user directly.
async def run_async(self): try: await self.run_loop_async() except Exception as err: _logger.error("Run loop failed %r", err) try: _logger.info("Shutting down all pumps %r", self.host.guid) await self.remove_all_pumps_async("Shutdown") except Exception as err: raise Exception("Failed to remove all pumps {!r}".format(err))
Starts the run loop and manages exceptions and cleanup.
def _broadcast_compat_variables(*variables): dims = tuple(_unified_dims(variables)) return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
Create broadcast compatible variables, with the same dimensions. Unlike the result of broadcast_variables(), some variables may have dimensions of size 1 instead of the the size of the broadcast dimension.
def get_tri_area(pts): a, b, c = pts[0], pts[1], pts[2] v1 = np.array(b) - np.array(a) v2 = np.array(c) - np.array(a) area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2) return area_tri
Given a list of coords for 3 points, Compute the area of this triangle. Args: pts: [a, b, c] three points
def resp_json(resp): if isinstance(resp, flask.Response): if 400 <= resp.status_code < 600: msg = resp.status try: result = loads(resp.data.decode("utf-8")) if isinstance(result, str): msg = "%s, %s" % (resp.status, result) else: msg = "%s %s, %s" % ( resp.status_code, result["error"], result["message"]) except Exception: pass raise requests.HTTPError(msg, response=resp) else: return loads(resp.data.decode("utf-8")) else: try: resp.raise_for_status() except requests.HTTPError as ex: try: result = resp.json() ex.args += (result["error"], result["message"]) except (ValueError, KeyError): pass raise return resp.json()
Get JSON from response if success, raise requests.HTTPError otherwise. Args: resp: requests.Response or flask.Response Retuens: JSON value
def get_gradebook_column(self, gradebook_column_id): collection = JSONClientValidated('grading', collection='GradebookColumn', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(gradebook_column_id, 'grading').get_identifier())}, **self._view_filter())) return objects.GradebookColumn(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
Gets the ``GradebookColumn`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``GradebookColumn`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to a ``GradebookColumn`` and retained for compatibility. arg: gradebook_column_id (osid.id.Id): ``Id`` of the ``GradebookColumn`` return: (osid.grading.GradebookColumn) - the gradebook column raise: NotFound - ``gradebook_column_id`` not found raise: NullArgument - ``gradebook_column_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.*
def has_table(table_name): return db.engine.dialect.has_table( db.engine.connect(), table_name )
Return True if table exists, False otherwise.
def measures(*measurements, **kwargs): def _maybe_make(meas): if isinstance(meas, Measurement): return meas elif isinstance(meas, six.string_types): return Measurement(meas, **kwargs) raise InvalidMeasurementType('Expected Measurement or string', meas) if kwargs and len(measurements) != 1: raise InvalidMeasurementType( 'If @measures kwargs are provided, a single measurement name must be ' 'provided as a positional arg first.') if 'outcome' in kwargs: raise ValueError('Cannot specify outcome in measurement declaration!') measurements = [_maybe_make(meas) for meas in measurements] def decorate(wrapped_phase): phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase) duplicate_names = (set(m.name for m in measurements) & set(m.name for m in phase.measurements)) if duplicate_names: raise DuplicateNameError('Measurement names duplicated', duplicate_names) phase.measurements.extend(measurements) return phase return decorate
Decorator-maker used to declare measurements for phases. See the measurements module docstring for examples of usage. Args: measurements: Measurement objects to declare, or a string name from which to create a Measurement. kwargs: Keyword arguments to pass to Measurement constructor if we're constructing one. Note that if kwargs are provided, the length of measurements must be 1, and that value must be a string containing the measurement name. For valid kwargs, see the definition of the Measurement class. Returns: A decorator that declares the measurement(s) for the decorated phase.
def get_storage(obj): annotation = IAnnotations(obj) if annotation.get(SNAPSHOT_STORAGE) is None: annotation[SNAPSHOT_STORAGE] = PersistentList() return annotation[SNAPSHOT_STORAGE]
Get or create the audit log storage for the given object :param obj: Content object :returns: PersistentList
def add_resources(self, resources): new_resources = self._build_resource_dictionary(resources) for key in new_resources: self._resources[key] = new_resources[key] self._dirty_attributes.add(u'resources')
Adds new resources to the event. *resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
def zoom_blur(x, severity=1): c = [ np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03) ][severity - 1] x = (np.array(x) / 255.).astype(np.float32) out = np.zeros_like(x) for zoom_factor in c: out += clipped_zoom(x, zoom_factor) x = (x + out) / (len(c) + 1) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
Zoom blurring to images. Applying zoom blurring to images by zooming the central part of the images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
def transform_login(config): verify = True if config.pop('smc_ssl', None): scheme = 'https' verify = config.pop('ssl_cert_file', None) if config.pop('verify_ssl', None): if not verify: verify = False else: verify = False else: scheme = 'http' config.pop('verify_ssl', None) config.pop('ssl_cert_file', None) verify = False transformed = {} url = '{}://{}:{}'.format( scheme, config.pop('smc_address', None), config.pop('smc_port', None)) timeout = config.pop('timeout', None) if timeout: try: timeout = int(timeout) except ValueError: timeout = None api_version = config.pop('api_version', None) if api_version: try: float(api_version) except ValueError: api_version = None transformed.update( url=url, api_key=config.pop('smc_apikey', None), api_version=api_version, verify=verify, timeout=timeout, domain=config.pop('domain', None)) if config: transformed.update(kwargs=config) return transformed
Parse login data as dict. Called from load_from_file and also can be used when collecting information from other sources as well. :param dict data: data representing the valid key/value pairs from smcrc :return: dict dict of settings that can be sent into session.login
def _generate_corpus_table(self, labels, ngrams): html = [] for label in labels: html.append(self._render_corpus_row(label, ngrams)) return '\n'.join(html)
Returns an HTML table containing data on each corpus' n-grams.
def _create_extractors(col_params): result = [] for col_param in col_params: result.append(_create_extractor(col_param)) return result
Creates extractors to extract properties corresponding to 'col_params'. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. Returns: A list of extractor functions. The ith element in the returned list extracts the column corresponding to the ith element of _request.col_params
def download(name, options): dire = os.path.dirname(name) fName = os.path.basename(name) fNameOnly, fExt = os.path.splitext(fName) dwn = 0 if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose): dwn += 1 elif dirExists(name): for filename in os.listdir(name): if options.recursive: dwn += download(os.path.join(name, filename), options) else: if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose): dwn += 1 return dwn
download a file or all files in a directory
def strace_data_access_event(self, operation, address, data, data_mask=None, access_width=4, address_range=0): cmd = enums.JLinkStraceCommand.TRACE_EVENT_SET event_info = structs.JLinkStraceEventInfo() event_info.Type = enums.JLinkStraceEvent.DATA_ACCESS event_info.Op = operation event_info.AccessSize = int(access_width) event_info.Addr = int(address) event_info.Data = int(data) event_info.DataMask = int(data_mask or 0) event_info.AddrRangeSize = int(address_range) handle = self._dll.JLINK_STRACE_Control(cmd, ctypes.byref(event_info)) if handle < 0: raise errors.JLinkException(handle) return handle
Sets an event to trigger trace logic when data access is made. Data access corresponds to either a read or write. Args: self (JLink): the ``JLink`` instance. operation (int): one of the operations in ``JLinkStraceOperation``. address (int): the address of the load/store data. data (int): the data to be compared the event data to. data_mask (int): optional bitmask specifying bits to ignore in comparison. acess_width (int): optional access width for the data. address_range (int): optional range of address to trigger event on. Returns: An integer specifying the trace event handle. This handle should be retained in order to clear the event at a later time. Raises: JLinkException: on error.
def spawn_thread(func, *args, **kwargs): thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread
Spawns a daemon thread.
def _get_net_runner_opts(): runner_opts = __opts__.get('runners', {}).get('net.find', {}) return { 'target': runner_opts.get('target', _DEFAULT_TARGET), 'expr_form': runner_opts.get('expr_form', _DEFAULT_EXPR_FORM), 'ignore_interfaces': runner_opts.get('ignore_interfaces', _DEFAULT_IGNORE_INTF), 'display': runner_opts.get('display', _DEFAULT_DISPLAY), 'outputter': runner_opts.get('outputter', _DEFAULT_OUTPUTTER), }
Return the net.find runner options.
def get_primary_or_secondary(self, component, return_ind=False): parent = self.get_parent_of(component) if parent is None: return 'primary' children_of_parent = self.get_children_of(parent) ind = children_of_parent.index(component) if ind > 1: return None if return_ind: return ind + 1 return ['primary', 'secondary'][ind]
return whether a given component is the 'primary' or 'secondary' component in its parent orbit
def current_app(self): current_focus = self.adb_shell(CURRENT_APP_CMD) if current_focus is None: return None current_focus = current_focus.replace("\r", "") matches = WINDOW_REGEX.search(current_focus) if matches: (pkg, activity) = matches.group("package", "activity") return {"package": pkg, "activity": activity} logging.warning("Couldn't get current app, reply was %s", current_focus) return None
Return the current app.
def install_package_command(package_name): if sys.platform == "win32": cmds = 'python -m pip install --user {0}'.format(package_name) else: cmds = 'python3 -m pip install --user {0}'.format(package_name) call(cmds, shell=True)
install python package from pip
def child_added(self, child): super(AndroidViewPager, self).child_added(child) self._notify_count += 1 self.get_context().timed_call( self._notify_delay, self._notify_change)
When a child is added, schedule a data changed notification
def hook_inform(self, inform_name, callback): if callback not in self._inform_hooks[inform_name]: self._inform_hooks[inform_name].append(callback)
Hookup a function to be called when an inform is received. Useful for interface-changed and sensor-status informs. Parameters ---------- inform_name : str The name of the inform. callback : function The function to be called.
def _get_script_args(cls, type_, name, header, script_text): if type_ == 'gui': launcher_type = 'gui' ext = '-script.pyw' old = ['.pyw'] else: launcher_type = 'cli' ext = '-script.py' old = ['.py', '.pyc', '.pyo'] hdr = cls._adjust_header(type_, header) blockers = [name + x for x in old] yield (name + ext, hdr + script_text, 't', blockers) yield ( name + '.exe', get_win_launcher(launcher_type), 'b' ) if not is_64bit(): m_name = name + '.exe.manifest' yield (m_name, load_launcher_manifest(name), 't')
For Windows, add a .py extension and an .exe launcher
def positional_encoding(inputs, num_units=None, zero_pad=True, scale=True, scope="positional_encoding", reuse=None): Shape = tf.shape(inputs) N = Shape[0] T = Shape[1] num_units = Shape[2] with tf.variable_scope(scope, reuse=reuse): position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) X = tf.expand_dims(tf.cast(tf.range(T), tf.float32), axis=1) Y = tf.expand_dims( tf.cast(10000 ** -(2 * tf.range(num_units) / num_units), tf.float32), axis=0) h1 = tf.cast((tf.range(num_units) + 1) % 2, tf.float32) h2 = tf.cast((tf.range(num_units) % 2), tf.float32) position_enc = tf.multiply(X, Y) position_enc = tf.sin(position_enc) * tf.multiply(tf.ones_like(X), h1) + \ tf.cos(position_enc) * tf.multiply(tf.ones_like(X), h2) lookup_table = position_enc if zero_pad: lookup_table = tf.concat((tf.zeros(shape=[1, num_units]), lookup_table[1:, :]), 0) outputs = tf.nn.embedding_lookup(lookup_table, position_ind) if scale: outputs = outputs * tf.sqrt(tf.cast(num_units, tf.float32)) return outputs
Return positinal embedding.
def table_qfiltered(self): description = 'MinIONQC statistics for quality filtered reads. ' + \ 'Quailty threshold used: {}.'.format(', '.join(list(self.q_threshold_list))) if len(self.q_threshold_list) > 1: description += log.warning('More than one quality thresholds were present. Thresholds: {}.'.format(', '.join(list(self.q_threshold_list)))) self.add_section ( name = 'Stats: Quality filtered reads', anchor = 'minionqc-stats-qFilt', description = description, plot = table.plot( self.qfilt_data, self.headers_to_use(), { 'namespace': 'MinIONQC', 'id': 'minionqc-stats-qFilt-table', 'table_title': 'MinIONQC Stats: Quality filtered reads' } ) )
Table showing stats for q-filtered reads
def static_url(redis, path): file_hash = get_cache_buster(redis, path) return "%s/%s?v=%s" % (oz.settings["static_host"], path, file_hash)
Gets the static path for a file
def __impl_read_chain(self, start, read_sector_f, read_fat_f): sector = start check = [ sector ] buffer = StringIO() while sector != ENDOFCHAIN: buffer.write(read_sector_f(sector)) next = read_fat_f(sector) if next in check: logging.error('infinite loop detected at {0} to {1} starting at {2}'.format( sector, next, sector_start)) return buffer.getvalue() check.append(next) sector = next return buffer.getvalue()
Returns the entire contents of a chain starting at the given sector.
def transform_position(data, trans_x=None, trans_y=None): X = {'x', 'xmin', 'xmax', 'xend', 'xintercept'} Y = {'y', 'ymin', 'ymax', 'yend', 'yintercept'} if trans_x: xs = [name for name in data.columns if name in X] data[xs] = data[xs].apply(trans_x) if trans_y: ys = [name for name in data.columns if name in Y] data[ys] = data[ys].apply(trans_y) return data
Transform all the variables that map onto the x and y scales. Parameters ---------- data : dataframe trans_x : function Transforms x scale mappings Takes one argument, either a scalar or an array-type trans_y : function Transforms y scale mappings Takes one argument, either a scalar or an array-type
def validate(self): for header in self._requiredHeaders: if not self.headers.get(header, False): raise errors.ParseError('Missing Registration Header: ' + header) for notice in self.notifications: for header in self._requiredNotificationHeaders: if not notice.get(header, False): raise errors.ParseError('Missing Notification Header: ' + header)
Validate required headers and validate notification headers
def duration( days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0, years=0, months=0, ): return Duration( days=days, seconds=seconds, microseconds=microseconds, milliseconds=milliseconds, minutes=minutes, hours=hours, weeks=weeks, years=years, months=months, )
Create a Duration instance.