code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def multilayer_fully_connected(images, labels): images = pt.wrap(images) with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001): return (images.flatten().fully_connected(100).fully_connected(100) .softmax_classifier(10, labels))
Creates a multi layer network of fully_connected layers. Each layer is 100 neurons. Please change this to experiment with architectures. Args: images: The input images. labels: The labels as dense one-hot vectors. Returns: A softmax result.
def pare(text, size, etc='...'): size = int(size) text = text.strip() if len(text)>size: to_be_stripped = not whitespace_re.findall(text[size-1:size+2]) text = text[:size] if to_be_stripped: half = size//2 last = None for mo in whitespace_re.finditer(text[half:]): last = mo if last is not None: text = text[:half+last.start()+1] return text.rstrip() + etc else: return text
Pare text to have maximum size and add etc to the end if it's changed
def wait_for_close( raiden: 'RaidenService', payment_network_id: PaymentNetworkID, token_address: TokenAddress, channel_ids: List[ChannelID], retry_timeout: float, ) -> None: return wait_for_channel_in_states( raiden=raiden, payment_network_id=payment_network_id, token_address=token_address, channel_ids=channel_ids, retry_timeout=retry_timeout, target_states=CHANNEL_AFTER_CLOSE_STATES, )
Wait until all channels are closed. Note: This does not time out, use gevent.Timeout.
def contains_duplicates(values: Iterable[Any]) -> bool: for v in Counter(values).values(): if v > 1: return True return False
Does the iterable contain any duplicate values?
def srun_nodes(self): count = self.execution.get('srun_nodes', 0) if isinstance(count, six.string_types): tag = count count = 0 elif isinstance(count, SEQUENCES): return count else: assert isinstance(count, int) tag = self.tag nodes = self._srun_nodes(tag, count) if 'srun_nodes' in self.execution: self.execution['srun_nodes'] = nodes self.execution['srun_nodes_count'] = len(nodes) return nodes
Get list of nodes where to execute the command
def reassign_ids(doc, verbose = False): for n, elem in enumerate(doc.childNodes): if verbose: print >>sys.stderr, "reassigning row IDs: %.1f%%\r" % (100.0 * (n + 1) / len(doc.childNodes)), if elem.tagName == ligolw.LIGO_LW.tagName: table.reassign_ids(elem) if verbose: print >>sys.stderr, "reassigning row IDs: 100.0%" return doc
Assign new IDs to all rows in all LSC tables in doc so that there are no collisions when the LIGO_LW elements are merged.
def _get_manager(cluster_info, host, executor_id): for node in cluster_info: if node['host'] == host and node['executor_id'] == executor_id: addr = node['addr'] authkey = node['authkey'] TFSparkNode.mgr = TFManager.connect(addr, authkey) break if TFSparkNode.mgr is None: msg = "No TFManager found on this node, please ensure that:\n" + \ "1. Spark num_executors matches TensorFlow cluster_size\n" + \ "2. Spark cores/tasks per executor is 1.\n" + \ "3. Spark dynamic allocation is disabled." raise Exception(msg) logging.info("Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}".format(host, executor_id, str(TFSparkNode.mgr.get('state')))) return TFSparkNode.mgr
Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed. Args: :cluster_info: cluster node reservations :host: host IP address :executor_id: unique id per executor (created during initial call to run()) Returns: TFManager instance for this executor/python-worker
def flatten(self, redact=False): od = OrderedDict() for key, view in self.items(): if redact and view.redact: od[key] = REDACTED_TOMBSTONE else: try: od[key] = view.flatten(redact=redact) except ConfigTypeError: od[key] = view.get() return od
Create a hierarchy of OrderedDicts containing the data from this view, recursively reifying all views to get their represented values. If `redact` is set, then sensitive values are replaced with the string "REDACTED".
def check_incoming(self, message, addr, protocol): component = protocol.local_candidate.component remote_candidate = None for c in self._remote_candidates: if c.host == addr[0] and c.port == addr[1]: remote_candidate = c assert remote_candidate.component == component break if remote_candidate is None: remote_candidate = Candidate( foundation=random_string(10), component=component, transport='udp', priority=message.attributes['PRIORITY'], host=addr[0], port=addr[1], type='prflx') self._remote_candidates.append(remote_candidate) self.__log_info('Discovered peer reflexive candidate %s', remote_candidate) pair = self._find_pair(protocol, remote_candidate) if pair is None: pair = CandidatePair(protocol, remote_candidate) pair.state = CandidatePair.State.WAITING self._check_list.append(pair) self.sort_check_list() if pair.state in [CandidatePair.State.WAITING, CandidatePair.State.FAILED]: pair.handle = asyncio.ensure_future(self.check_start(pair)) if 'USE-CANDIDATE' in message.attributes and not self.ice_controlling: pair.remote_nominated = True if pair.state == CandidatePair.State.SUCCEEDED: pair.nominated = True self.check_complete(pair)
Handle a succesful incoming check.
def set_default_calibrator(self, parameter, type, data): req = mdb_pb2.ChangeParameterRequest() req.action = mdb_pb2.ChangeParameterRequest.SET_DEFAULT_CALIBRATOR if type: _add_calib(req.defaultCalibrator, type, data) url = '/mdb/{}/{}/parameters/{}'.format( self._instance, self._processor, parameter) response = self._client.post_proto(url, data=req.SerializeToString())
Apply a calibrator while processing raw values of the specified parameter. If there is already a default calibrator associated to this parameter, that calibrator gets replaced. .. note:: Contextual calibrators take precedence over the default calibrator See :meth:`set_calibrators` for setting contextual calibrators. Two types of calibrators can be applied: * Polynomial calibrators apply a polynomial expression of the form: `y = a + bx + cx^2 + ...`. The `data` argument must be an array of floats ``[a, b, c, ...]``. * Spline calibrators interpolate the raw value between a set of points which represent a linear curve. The `data` argument must be an array of ``[x, y]`` points. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param str type: One of ``polynomial`` or ``spline``. :param data: Calibration definition for the selected type.
def to_disk(self, path, exclude=tuple(), disable=None): if disable is not None: deprecation_warning(Warnings.W014) exclude = disable path = util.ensure_path(path) serializers = OrderedDict() serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(p, exclude=["vocab"]) serializers["meta.json"] = lambda p: p.open("w").write(srsly.json_dumps(self.meta)) for name, proc in self.pipeline: if not hasattr(proc, "name"): continue if name in exclude: continue if not hasattr(proc, "to_disk"): continue serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"]) serializers["vocab"] = lambda p: self.vocab.to_disk(p) util.to_disk(path, serializers, exclude)
Save the current state to a directory. If a model is loaded, this will include the model. path (unicode or Path): Path to a directory, which will be created if it doesn't exist. exclude (list): Names of components or serialization fields to exclude. DOCS: https://spacy.io/api/language#to_disk
def diffuse_advanced(self, heatColumnName=None, time=None, verbose=False): PARAMS=set_param(["heatColumnName","time"],[heatColumnName,time]) response=api(url=self.__url+"/diffuse_advanced", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Diffusion will send the selected network view and its selected nodes to a web-based REST service to calculate network propagation. Results are returned and represented by columns in the node table. Columns are created for each execution of Diffusion and their names are returned in the response. :param heatColumnName (string, optional): A node column name intended to override the default table column 'diffusion_input'. This represents the query vector and corresponds to h in the diffusion equation. = ['HEKScore', 'JurkatScore', '(Use selected nodes)'] :param time (string, optional): The extent of spread over the network. This corresponds to t in the diffusion equation. :param verbose: print more
def save(filepath=None, **kwargs): if filepath is None: filepath = os.path.join('.env') with open(filepath, 'wb') as file_handle: file_handle.writelines( '{0}={1}\n'.format(key.upper(), val) for key, val in kwargs.items() )
Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file.
def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True): if not outdir: outdir = self.data_dir if not outdir: raise ValueError('Output directory must be specified') outfile = op.join(outdir, outname + '.faa') tmp = [] for x in self.genes_with_a_representative_sequence: repseq = x.protein.representative_sequence copied_seq_record = copy(repseq) if set_ids_from_model: copied_seq_record.id = x.id tmp.append(copied_seq_record) SeqIO.write(tmp, outfile, "fasta") log.info('{}: wrote all representative sequences to file'.format(outfile)) self.genome_path = outfile return self.genome_path
Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs. Args: outname (str): Name of the output FASTA file without the extension outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially set_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID
def get_thumbnail(self, file_, geometry_string, **options): logger.debug('Getting thumbnail for file [%s] at [%s]', file_, geometry_string) if file_: source = ImageFile(file_) else: raise ValueError('falsey file_ argument in get_thumbnail()') if settings.THUMBNAIL_PRESERVE_FORMAT: options.setdefault('format', self._get_format(source)) for key, value in self.default_options.items(): options.setdefault(key, value) for key, attr in self.extra_options: value = getattr(settings, attr) if value != getattr(default_settings, attr): options.setdefault(key, value) name = self._get_thumbnail_filename(source, geometry_string, options) thumbnail = ImageFile(name, default.storage) cached = default.kvstore.get(thumbnail) if cached: return cached if settings.THUMBNAIL_FORCE_OVERWRITE or not thumbnail.exists(): try: source_image = default.engine.get_image(source) except IOError as e: logger.exception(e) if settings.THUMBNAIL_DUMMY: return DummyImageFile(geometry_string) else: logger.warning( 'Remote file [%s] at [%s] does not exist', file_, geometry_string, ) return thumbnail image_info = default.engine.get_image_info(source_image) options['image_info'] = image_info size = default.engine.get_image_size(source_image) source.set_size(size) try: self._create_thumbnail(source_image, geometry_string, options, thumbnail) self._create_alternative_resolutions(source_image, geometry_string, options, thumbnail.name) finally: default.engine.cleanup(source_image) default.kvstore.get_or_set(source) default.kvstore.set(thumbnail, source) return thumbnail
Returns thumbnail as an ImageFile instance for file with geometry and options given. First it will try to get it from the key value store, secondly it will create it.
def set_include_entities(self, include): if not isinstance(include, bool): raise TwitterSearchException(1008) self.arguments.update( {'include_entities': 'true' if include else 'false'} )
Sets 'include entities' parameter to either \ include or exclude the entities node within the results :param include: Boolean to trigger the 'include entities' parameter :raises: TwitterSearchException
def compute_checksum(filename, max_length=2**20): fid = open(filename, 'rb') crcval = safe_crc(fid.read(max_length)) fid.close() return crcval
Compute the CRC32 checksum for specified file Optional parameter max_length sets the maximum number of bytes used to limit time used with large files. Default = 2**20 (1MB)
def remove_user_from_group(self, username, group_name): log.warning('Removing user from a group...') url = 'rest/api/2/group/user' params = {'groupname': group_name, 'username': username} return self.delete(url, params=params)
Remove given user from a group :param username: str :param group_name: str :return:
def smart_search(cls, query_string, search_options=None, extra_query = None): if search_options is None: search_options = {} xmlrpc = XMLRPCConnection() try: smart_result = xmlrpc.connection.smart_search_prefix( { 'query_string': query_string, 'search_options': search_options, 'auth': AuthOptions().options, 'extra_query': extra_query }) except xmlrpclib.Fault as xml_fault: raise _fault_to_exception(xml_fault) result = dict() result['interpretation'] = smart_result['interpretation'] result['search_options'] = smart_result['search_options'] result['error'] = smart_result['error'] if 'error_message' in smart_result: result['error_message'] = smart_result['error_message'] result['result'] = list() for prefix in smart_result['result']: p = Prefix.from_dict(prefix) result['result'].append(p) return result
Perform a smart prefix search. Maps to the function :py:func:`nipap.backend.Nipap.smart_search_prefix` in the backend. Please see the documentation for the backend function for information regarding input arguments and return values.
def init(confdir="/etc/cslbot"): multiprocessing.set_start_method('spawn') parser = argparse.ArgumentParser() parser.add_argument('-d', '--debug', help='Enable debug logging.', action='store_true') parser.add_argument('--validate', help='Initialize the db and perform other sanity checks.', action='store_true') args = parser.parse_args() loglevel = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=loglevel, format="%(asctime)s %(levelname)s:%(module)s:%(message)s") logging.getLogger("requests").setLevel(logging.WARNING) cslbot = IrcBot(confdir) if args.validate: cslbot.shutdown_mp() print("Everything is ready to go!") return try: cslbot.start() except KeyboardInterrupt: cslbot.disconnect('Bot received a Ctrl-C') cslbot.shutdown_mp() sys.exit(0) except Exception as ex: cslbot.shutdown_mp(False) logging.error("The bot died! %s", ex) output = "".join(traceback.format_exc()).strip() for line in output.split('\n'): logging.error(line) sys.exit(1)
The bot's main entry point. | Initialize the bot and start processing messages.
def delete_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False): tenant_name = fw_dict.get('tenant_name') ret = self._delete_service_nwk(tenant_id, tenant_name, 'out') if ret: res = fw_const.DCNM_OUT_NETWORK_DEL_SUCCESS LOG.info("out Service network deleted for tenant %s", tenant_id) else: res = fw_const.DCNM_OUT_NETWORK_DEL_FAIL LOG.info("out Service network deleted failed for tenant %s", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret
Delete the DCNM OUT network and update the result.
def end_task(self, name, success=True): self._running.remove(name) if success: self._completed.add(name) self._graph.remove(name, strategy=Strategy.orphan) else: self._cascade_failure(name)
End a running task. Raises an exception if the task isn't running. name: The name of the task to complete. success: (optional, True) Whether the task was successful.
def cdf(data,mode='continuous',**kwargs): return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
Return cumulative density. :arguments: **data** (``<numpy.ndarray>``) Input data, to plot the distribution for. :returns: **P** (``<numpy.ndarray>``) Cumulative probability. **x** (``<numpy.ndarray>``) Data points.
def add_path(self, path): if not os.path.exists(path): raise RuntimeError('Path does not exists: %s.' % path) self.paths.append(path)
Load translations from an existing path.
async def _wrap_http(self, handler, *args, **kwargs): try: method = self.request_method() if method == 'OPTIONS': return self.build_http_response(None, status=NO_CONTENT) data = await handler(self, *args, **kwargs) formatted = self.format(method, data) status = self.responses.get(method, OK) return self.build_http_response(formatted, status=status) except Exception as ex: return self.dispatch_error(ex)
wraps a handler with an HTTP request-response cycle
def run_apidoc(_): import os dirname = os.path.dirname(__file__) ignore_paths = [os.path.join(dirname, '../../aaf2/model'),] argv = [ '--force', '--no-toc', '--separate', '--module-first', '--output-dir', os.path.join(dirname, 'api'), os.path.join(dirname, '../../aaf2'), ] + ignore_paths from sphinx.ext import apidoc apidoc.main(argv)
This method is required by the setup method below.
def do_raise(self, arg): self.do_continue(arg) _, exc_value, _ = self.exc_info exc_value._ipdbugger_let_raise = True raise_(*self.exc_info)
Raise the last exception caught.
def shamelessly_promote(): click.echo("Need " + click.style("help", fg='green', bold=True) + "? Found a " + click.style("bug", fg='green', bold=True) + "? Let us " + click.style("know", fg='green', bold=True) + "! :D") click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: " + click.style("https://github.com/Miserlou/Zappa", fg='cyan', bold=True)) click.echo("And join our " + click.style("Slack", bold=True) + " channel here: " + click.style("https://slack.zappa.io", fg='cyan', bold=True)) click.echo("Love!,") click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
Shamelessly promote our little community.
def get_processing_block(block_id): identifiers = block_id.split(':') scheduling_block_id = identifiers[0] scheduling_block_config = get_scheduling_block(scheduling_block_id) for processing_block in scheduling_block_config['processing_blocks']: if processing_block['id'] == block_id: return processing_block raise KeyError('Unknown Processing Block id: {} ({})' .format(identifiers[-1], block_id))
Return the Processing Block Configuration for the specified ID
def _get_cubic_root(self): assert_array = [ tf.Assert( tf.logical_not(tf.is_nan(self._dist_to_opt_avg)), [self._dist_to_opt_avg,]), tf.Assert( tf.logical_not(tf.is_nan(self._h_min)), [self._h_min,]), tf.Assert( tf.logical_not(tf.is_nan(self._grad_var)), [self._grad_var,]), tf.Assert( tf.logical_not(tf.is_inf(self._dist_to_opt_avg)), [self._dist_to_opt_avg,]), tf.Assert( tf.logical_not(tf.is_inf(self._h_min)), [self._h_min,]), tf.Assert( tf.logical_not(tf.is_inf(self._grad_var)), [self._grad_var,]) ] with tf.control_dependencies(assert_array): p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0 w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0) y = w - p / 3.0 / w x = y + 1 return x
Get the cubic root.
def absolute_extreme_coefficient_ratio(model): s_matrix, _, _ = con_helpers.stoichiometry_matrix( model.metabolites, model.reactions ) abs_matrix = np.abs(s_matrix) return abs_matrix.max(), abs_matrix[abs_matrix > 0].min()
Return the maximum and minimum absolute, non-zero coefficients. Parameters ---------- model : cobra.Model The metabolic model under investigation.
def save_snapshot(self, si, logger, vm_uuid, snapshot_name, save_memory): vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid) snapshot_path_to_be_created = SaveSnapshotCommand._get_snapshot_name_to_be_created(snapshot_name, vm) save_vm_memory_to_snapshot = SaveSnapshotCommand._get_save_vm_memory_to_snapshot(save_memory) SaveSnapshotCommand._verify_snapshot_uniquness(snapshot_path_to_be_created, vm) task = self._create_snapshot(logger, snapshot_name, vm, save_vm_memory_to_snapshot) self.task_waiter.wait_for_task(task=task, logger=logger, action_name='Create Snapshot') return snapshot_path_to_be_created
Creates a snapshot of the current state of the virtual machine :param vim.ServiceInstance si: py_vmomi service instance :type si: vim.ServiceInstance :param logger: Logger :type logger: cloudshell.core.logger.qs_logger.get_qs_logger :param vm_uuid: UUID of the virtual machine :type vm_uuid: str :param snapshot_name: Snapshot name to save the snapshot to :type snapshot_name: str :param save_memory: Snapshot the virtual machine's memory. Lookup, Yes / No :type save_memory: str
def dispatch_event(self, event_, **kwargs): if self.settings.hooks_enabled: result = self.hooks.dispatch_event(event_, **kwargs) if result is not None: return result if self.section: return self.section.dispatch_event(event_, **kwargs) elif self.section: self.section.dispatch_event(event_, **kwargs)
Dispatch section event. Notes: You MUST NOT call event.trigger() directly because it will circumvent the section settings as well as ignore the section tree. If hooks are disabled somewhere up in the tree, and enabled down below, events will still be dispatched down below because that's where they originate.
def __addLocationsToURL(self, locations): for l in self.__locations: self.__urlLocations += "+location:\""\ + str(quote(l)) + "\""
Format all locations to GitHub's URL API. :param locations: locations where to search users. :type locations: list(str).
def add_current_user_is_applied_representation(func): @wraps(func) def _impl(self, instance): ret = func(self, instance) user = self.context["request"].user applied = False if not user.is_anonymous(): try: applied = models.Apply.objects.filter(user=user, project=instance).count() > 0 except: pass ret["current_user_is_applied"] = applied return ret return _impl
Used to decorate Serializer.to_representation method. It sets the field "current_user_is_applied" if the user is applied to the project
def _p_iteration(self, P, Bp_solver, Vm, Va, pvpq): dVa = -Bp_solver.solve(P) Va[pvpq] = Va[pvpq] + dVa V = Vm * exp(1j * Va) return V, Vm, Va
Performs a P iteration, updates Va.
def stop(self): self.keep_reading = False if self.readthread is not None: self.readthread.join() self.readthread = None
Stops and joins readthread. :return: Nothing
def context_new(zap_helper, name): console.info('Creating context with name: {0}'.format(name)) res = zap_helper.zap.context.new_context(contextname=name) console.info('Context "{0}" created with ID: {1}'.format(name, res))
Create a new context.
def with_source(self, lease): super().with_source(lease) self.offset = lease.offset self.sequence_number = lease.sequence_number
Init Azure Blob Lease from existing.
def run(self, stim, merge=True, **merge_kwargs): results = list(chain(*[self.run_node(n, stim) for n in self.roots])) results = list(flatten(results)) self._results = results return merge_results(results, **merge_kwargs) if merge else results
Executes the graph by calling all Transformers in sequence. Args: stim (str, Stim, list): One or more valid inputs to any Transformer's 'transform' call. merge (bool): If True, all results are merged into a single pandas DataFrame before being returned. If False, a list of ExtractorResult objects is returned (one per Extractor/Stim combination). merge_kwargs: Optional keyword arguments to pass onto the merge_results() call.
def add(self, left_column, right_column, indexes=None): left_list, right_list = self._get_lists(left_column, right_column, indexes) return [l + r for l, r in zip(left_list, right_list)]
Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math on that sub-set of the columns. :param left_column: first column name :param right_column: second column name :param indexes: list of index values or list of booleans. If a list of booleans then the list must be the same\ length as the DataFrame :return: list
def require_debian_packages(packages: List[str]) -> None: present = are_debian_packages_installed(packages) missing_packages = [k for k, v in present.items() if not v] if missing_packages: missing_packages.sort() msg = ( "Debian packages are missing, as follows. Suggest:\n\n" "sudo apt install {}".format(" ".join(missing_packages)) ) log.critical(msg) raise ValueError(msg)
Ensure specific packages are installed under Debian. Args: packages: list of packages Raises: ValueError: if any are missing
def set_avatar(self, asset_id): if self.get_avatar_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(asset_id): raise errors.InvalidArgument() self._my_map['avatarId'] = str(asset_id)
Sets the avatar asset. arg: asset_id (osid.id.Id): an asset ``Id`` raise: InvalidArgument - ``asset_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def elements(self): offset = self.EXTRA_DIGITS if offset: return (self._id[:offset], self.company_prefix, self._reference, self.check_digit) else: return (self.company_prefix, self._reference, self.check_digit)
Return the identifier's elements as tuple.
def _method_call(self, method, category, **kwargs): session = requests.Session() try: response = session.get("http://" + self._api_address) except requests.exceptions.ConnectionError: raise FantasyDataError('Error: Cannot connect to the FantasyData API') method = method.format(format=self._response_format, **kwargs) request_url = "/v3/{game_type}/{category}/{format}/{method}?{get_params}".format( game_type=self.game_type, category=category, format=self._response_format, method=method, get_params=self._get_params) response = session.get(self._api_schema + self._api_address + request_url, headers=self._headers) result = response.json() if isinstance(result, dict) and response.status_code: if response.status_code == 401: raise FantasyDataError('Error: Invalid API key') elif response.status_code == 200: pass else: raise FantasyDataError('Error: Failed to get response') return result
Call API method. Generate request. Parse response. Process errors `method` str API method url for request. Contains parameters `params` dict parameters for method url
def _build_session(username, password, trans_label=None): bigip = requests.session() bigip.auth = (username, password) bigip.verify = False bigip.headers.update({'Content-Type': 'application/json'}) if trans_label: trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=trans_label)) if trans_id: bigip.headers.update({'X-F5-REST-Coordination-Id': trans_id}) else: bigip.headers.update({'X-F5-REST-Coordination-Id': None}) return bigip
Create a session to be used when connecting to iControl REST.
def _import(func): func_name = func.__name__ if func_name in globals(): return func_name module_name = func.__module__ submodules = module_name.split('.') if submodules[0] in globals(): return module_name + '.' + func_name for i in range(len(submodules)): m = submodules[i] if m in globals(): return '.'.join(submodules[i:]) + '.' + func_name module_ref = sys.modules[func.__module__] all_globals = globals() for n in all_globals: if all_globals[n] == module_ref: return n + '.' + func_name return func_name
Return the namespace path to the function
def ground_motion_fields(rupture, sites, imts, gsim, truncation_level, realizations, correlation_model=None, seed=None): cmaker = ContextMaker(rupture.tectonic_region_type, [gsim]) gc = GmfComputer(rupture, sites, [str(imt) for imt in imts], cmaker, truncation_level, correlation_model) res, _sig, _eps = gc.compute(gsim, realizations, seed) return {imt: res[imti] for imti, imt in enumerate(gc.imts)}
Given an earthquake rupture, the ground motion field calculator computes ground shaking over a set of sites, by randomly sampling a ground shaking intensity model. A ground motion field represents a possible 'realization' of the ground shaking due to an earthquake rupture. .. note:: This calculator is using random numbers. In order to reproduce the same results numpy random numbers generator needs to be seeded, see http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html :param openquake.hazardlib.source.rupture.Rupture rupture: Rupture to calculate ground motion fields radiated from. :param openquake.hazardlib.site.SiteCollection sites: Sites of interest to calculate GMFs. :param imts: List of intensity measure type objects (see :mod:`openquake.hazardlib.imt`). :param gsim: Ground-shaking intensity model, instance of subclass of either :class:`~openquake.hazardlib.gsim.base.GMPE` or :class:`~openquake.hazardlib.gsim.base.IPE`. :param truncation_level: Float, number of standard deviations for truncation of the intensity distribution, or ``None``. :param realizations: Integer number of GMF realizations to compute. :param correlation_model: Instance of correlation model object. See :mod:`openquake.hazardlib.correlation`. Can be ``None``, in which case non-correlated ground motion fields are calculated. Correlation model is not used if ``truncation_level`` is zero. :param int seed: The seed used in the numpy random number generator :returns: Dictionary mapping intensity measure type objects (same as in parameter ``imts``) to 2d numpy arrays of floats, representing different realizations of ground shaking intensity for all sites in the collection. First dimension represents sites and second one is for realizations.
def get_context_data(self): self.get_context() self.context_data.update(self.get_extra_context()) return self.context_data
Context Data is equal to context + extra_context Merge the dicts context_data and extra_context and update state
def cache_file(self, template): saltpath = salt.utils.url.create(template) self.file_client().get_file(saltpath, '', True, self.saltenv)
Cache a file from the salt master
def image_resources(package=None, directory='resources'): if not package: package = calling_package() package_dir = '.'.join([package, directory]) images = [] for i in resource_listdir(package, directory): if i.startswith('__') or i.endswith('.egg-info'): continue fname = resource_filename(package_dir, i) if resource_isdir(package_dir, i): images.extend(image_resources(package_dir, i)) elif what(fname): images.append(fname) return images
Returns all images under the directory relative to a package path. If no directory or package is specified then the resources module of the calling package will be used. Images are recursively discovered. :param package: package name in dotted format. :param directory: path relative to package path of the resources directory. :return: a list of images under the specified resources path.
def imsave(filename, data, maxval=None, pam=False): try: netpbm = NetpbmFile(data, maxval=maxval) netpbm.write(filename, pam=pam) finally: netpbm.close()
Write image data to Netpbm file. Examples -------- >>> image = numpy.array([[0, 1],[65534, 65535]], dtype=numpy.uint16) >>> imsave('_tmp.pgm', image)
def get_expiration_seconds_v4(expiration): if not isinstance(expiration, _EXPIRATION_TYPES): raise TypeError( "Expected an integer timestamp, datetime, or " "timedelta. Got %s" % type(expiration) ) now = NOW().replace(tzinfo=_helpers.UTC) if isinstance(expiration, six.integer_types): seconds = expiration if isinstance(expiration, datetime.datetime): if expiration.tzinfo is None: expiration = expiration.replace(tzinfo=_helpers.UTC) expiration = expiration - now if isinstance(expiration, datetime.timedelta): seconds = int(expiration.total_seconds()) if seconds > SEVEN_DAYS: raise ValueError( "Max allowed expiration interval is seven days (%d seconds)".format( SEVEN_DAYS ) ) return seconds
Convert 'expiration' to a number of seconds offset from the current time. :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] :param expiration: Point in time when the signed URL should expire. :raises: :exc:`TypeError` when expiration is not a valid type. :raises: :exc:`ValueError` when expiration is too large. :rtype: Integer :returns: seconds in the future when the signed URL will expire
def magenta(cls): "Make the text foreground color magenta." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK wAttributes |= win32.FOREGROUND_MAGENTA cls._set_text_attributes(wAttributes)
Make the text foreground color magenta.
def pack_header_extensions(extensions: List[Tuple[int, bytes]]) -> Tuple[int, bytes]: extension_profile = 0 extension_value = b'' if not extensions: return extension_profile, extension_value one_byte = True for x_id, x_value in extensions: x_length = len(x_value) assert x_id > 0 and x_id < 256 assert x_length >= 0 and x_length < 256 if x_id > 14 or x_length == 0 or x_length > 16: one_byte = False if one_byte: extension_profile = 0xBEDE extension_value = b'' for x_id, x_value in extensions: x_length = len(x_value) extension_value += pack('!B', (x_id << 4) | (x_length - 1)) extension_value += x_value else: extension_profile = 0x1000 extension_value = b'' for x_id, x_value in extensions: x_length = len(x_value) extension_value += pack('!BB', x_id, x_length) extension_value += x_value extension_value += b'\x00' * padl(len(extension_value)) return extension_profile, extension_value
Serialize header extensions according to RFC 5285.
def sasutil(self) -> 'SASutil': if not self._loaded_macros: self._loadmacros() self._loaded_macros = True return SASutil(self)
This methods creates a SASutil object which you can use to run various analytics. See the sasutil.py module. :return: sasutil object
def display_ioc(self, width=120, sep=' ', params=False): s = 'Name: {}\n'.format(self.metadata.findtext('short_description', default='No Name')) s += 'ID: {}\n'.format(self.root.attrib.get('id')) s += 'Created: {}\n'.format(self.metadata.findtext('authored_date', default='No authored_date')) s += 'Updated: {}\n\n'.format(self.root.attrib.get('last-modified', default='No last-modified attrib')) s += 'Author: {}\n'.format(self.metadata.findtext('authored_by', default='No authored_by')) desc = self.metadata.findtext('description', default='No Description') desc = textwrap.wrap(desc, width=width) desc = '\n'.join(desc) s += 'Description:\n{}\n\n'.format(desc) links = self.link_text() if links: s += '{}'.format(links) content_text = self.criteria_text(sep=sep, params=params) s += '\nCriteria:\n{}'.format(content_text) return s
Get a string representation of an IOC. :param width: Width to print the description too. :param sep: Separator used for displaying the contents of the criteria nodes. :param params: Boolean, set to True in order to display node parameters. :return:
def stop(self, reason=''): key = '/status/sessions/terminate?sessionId=%s&reason=%s' % (self.session[0].id, quote_plus(reason)) return self._server.query(key)
Stop playback for a media item.
def use_plenary_log_view(self): self._log_view = PLENARY for session in self._get_provider_sessions(): try: session.use_plenary_log_view() except AttributeError: pass
Pass through to provider LogEntryLogSession.use_plenary_log_view
def get_cache_key(path): try: path_hash = hashlib.md5(path).hexdigest() except TypeError: path_hash = hashlib.md5(path.encode('utf-8')).hexdigest() return settings.cache_key_prefix + path_hash
Create a cache key by concatenating the prefix with a hash of the path.
def add_fields(self, form, index): super(ColumnFormSet, self).add_fields(form, index) form.fields['column'].choices = self.get_choices()
Filter the form's column choices This is done at the formset level as there's no other way i could find to get the parent object (stored in self.instance), and the form at the same time.
def print_single_instruction_callback(self, address, size, branch_delay_insn, insn_type, target, target2, disassembly): print "0x%X SZ=%d BD=%d IT=%d\t%s" % \ (address, size, branch_delay_insn, insn_type, disassembly) return PYBFD_DISASM_CONTINUE
Callack on each disassembled instruction to print its information.
def form_valid(self, redirect_to=None): session = db.session() with session.no_autoflush: self.before_populate_obj() self.form.populate_obj(self.obj) session.add(self.obj) self.after_populate_obj() try: session.flush() self.send_activity() session.commit() except ValidationError as e: rv = self.handle_commit_exception(e) if rv is not None: return rv session.rollback() flash(str(e), "error") return self.get() except sa.exc.IntegrityError as e: rv = self.handle_commit_exception(e) if rv is not None: return rv session.rollback() logger.error(e) flash(_("An entity with this name already exists in the system."), "error") return self.get() else: self.commit_success() flash(self.message_success(), "success") if redirect_to: return redirect(redirect_to) else: return self.redirect_to_view()
Save object. Called when form is validated. :param redirect_to: real url (created with url_for) to redirect to, instead of the view by default.
def _BlobToChunks(blob_id, blob): chunk_begins = list(range(0, len(blob), BLOB_CHUNK_SIZE)) or [0] chunks = [] for i, chunk_begin in enumerate(chunk_begins): chunks.append({ "blob_id": blob_id, "chunk_index": i, "blob_chunk": blob[chunk_begin:chunk_begin + BLOB_CHUNK_SIZE] }) return chunks
Splits a Blob into chunks of size BLOB_CHUNK_SIZE.
def setSeed(self, value): self.seed = value random.seed(self.seed) if self.verbosity >= 0: print("Conx using seed:", self.seed)
Sets the seed to value.
def qtiling(fseries, qrange, frange, mismatch=0.2): qplane_tile_dict = {} qs = list(_iter_qs(qrange, deltam_f(mismatch))) for q in qs: qtilefreq = _iter_frequencies(q, frange, mismatch, fseries.duration) qplane_tile_dict[q] = numpy.array(list(qtilefreq)) return qplane_tile_dict
Iterable constructor of QTile tuples Parameters ---------- fseries: 'pycbc FrequencySeries' frequency-series data set qrange: upper and lower bounds of q range frange: upper and lower bounds of frequency range mismatch: percentage of desired fractional mismatch Returns ------- qplane_tile_dict: 'dict' dictionary containing Q-tile tuples for a set of Q-planes
def get_title(self): def fformat(x): if isinstance(x, (list, tuple)): return '[{0}]'.format(', '.join(map(fformat, x))) if isinstance(x, Quantity): x = x.value elif isinstance(x, str): warnings.warn('WARNING: fformat called with a' + ' string. This has ' + 'been depricated and may disappear ' + 'in a future release.') x = float(x) return '{0:.2f}'.format(x) bits = [('Q', fformat(self.result.q))] bits.append(('tres', '{:.3g}'.format(self.qxfrm_args['tres']))) if self.qxfrm_args.get('qrange'): bits.append(('q-range', fformat(self.qxfrm_args['qrange']))) if self.qxfrm_args['whiten']: bits.append(('whitened',)) bits.extend([ ('f-range', fformat(self.result.yspan)), ('e-range', '[{:.3g}, {:.3g}]'.format(self.result.min(), self.result.max())), ]) return ', '.join([': '.join(bit) for bit in bits])
Default title for plot
def _analyze_case(model_dir, bench_dir, config): bundle = livvkit.verification_model_module model_out = functions.find_file(model_dir, "*"+config["output_ext"]) bench_out = functions.find_file(bench_dir, "*"+config["output_ext"]) model_config = functions.find_file(model_dir, "*"+config["config_ext"]) bench_config = functions.find_file(bench_dir, "*"+config["config_ext"]) model_log = functions.find_file(model_dir, "*"+config["logfile_ext"]) el = [ bit_for_bit(model_out, bench_out, config), diff_configurations(model_config, bench_config, bundle, bundle), bundle.parse_log(model_log) ] return el
Runs all of the verification checks on a particular case
def create_model(self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT): role = role or self.role return ChainerModel(self.model_data, role, self.entry_point, source_dir=self._model_source_dir(), enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, name=self._current_job_name, container_log_level=self.container_log_level, code_location=self.code_location, py_version=self.py_version, framework_version=self.framework_version, model_server_workers=model_server_workers, image=self.image_name, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override), dependencies=self.dependencies)
Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``. Args: role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during transform jobs. If not specified, the role from the Estimator will be used. model_server_workers (int): Optional. The number of worker processes used by the inference server. If None, server will use one worker per vCPU. vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model. Default: use subnets and security groups from this Estimator. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. Returns: sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object. See :func:`~sagemaker.chainer.model.ChainerModel` for full details.
def _check_lr(name, optimizer, lr): n = len(optimizer.param_groups) if not isinstance(lr, (list, tuple)): return lr * np.ones(n) if len(lr) != n: raise ValueError("{} lr values were passed for {} but there are " "{} param groups.".format(n, name, len(lr))) return np.array(lr)
Return one learning rate for each param group.
def GET_conditionitemvalues(self) -> None: for item in state.conditionitems: self._outputs[item.name] = item.value
Get the values of all |ChangeItem| objects handling |StateSequence| or |LogSequence| objects.
def resample_data(self, data, freq, resampler='mean'): if resampler == 'mean': data = data.resample(freq).mean() elif resampler == 'max': data = data.resample(freq).max() else: raise ValueError('Resampler can be \'mean\' or \'max\' only.') return data
Resample dataframe. Note ---- 1. Figure out how to apply different functions to different columns .apply() 2. This theoretically work in upsampling too, check docs http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html Parameters ---------- data : pd.DataFrame() Dataframe to resample freq : str Resampling frequency i.e. d, h, 15T... resampler : str Resampling type i.e. mean, max. Returns ------- pd.DataFrame() Dataframe containing resampled data
def getConnectionStats(self): cur = self._conn.cursor() cur.execute() rows = cur.fetchall() if rows: return dict(rows) else: return {}
Returns dictionary with number of connections for each database. @return: Dictionary of database connection statistics.
def convert_to_foldable(input_filename: str, output_filename: str, slice_horiz: int, slice_vert: int, overwrite: bool = False, longedge: bool = False, latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> bool: if not os.path.isfile(input_filename): log.warning("Input file does not exist or is not a file") return False if not overwrite and os.path.isfile(output_filename): log.error("Output file exists; not authorized to overwrite (use " "--overwrite if you are sure)") return False log.info("Processing {!r}", input_filename) with tempfile.TemporaryDirectory() as tmpdir: log.debug("Using temporary directory {!r}", tmpdir) intermediate_num = 0 def make_intermediate() -> str: nonlocal intermediate_num intermediate_num += 1 return os.path.join(tmpdir, "intermediate_{}.pdf".format(intermediate_num)) input_filename = slice_pdf( input_filename=input_filename, output_filename=make_intermediate(), slice_horiz=slice_horiz, slice_vert=slice_vert ) input_filename = booklet_nup_pdf( input_filename=input_filename, output_filename=make_intermediate(), latex_paper_size=latex_paper_size ) if longedge: input_filename = rotate_even_pages_180( input_filename=input_filename, output_filename=make_intermediate(), ) log.info("Writing to {!r}", output_filename) shutil.move(input_filename, output_filename) return True
Runs a chain of tasks to convert a PDF to a useful booklet PDF.
def __setuptools_version(self): f = open(self.script_path(), "r") try: matcher = re.compile(r'\s*DEFAULT_VERSION\s*=\s*"([^"]*)"\s*$') for i, line in enumerate(f): if i > 50: break match = matcher.match(line) if match: return match.group(1) finally: f.close() self.__error("error parsing setuptools installation script '%s'" % ( self.script_path(),))
Read setuptools version from the underlying ez_setup script.
def eye(ax, p0, size=1.0, alpha=0, format=None, **kwds): r if format is None: format = 'k-' N = 100 ang0 = pi-3*pi/16; angf = pi+3*pi/16 angstep = (angf-ang0)/(N-1) x1 = [size*(cos(i*angstep+ang0)+1) for i in range(N)] y1 = [size*sin(i*angstep+ang0) for i in range(N)] ang2 = ang0+pi/16 x2 = [size, size*(1.2*cos(ang2)+1)] y2 = [0, 1.2*size*(sin(ang2))] y3 = [0, -1.2*size*(sin(ang2))] N = 100 ang0 = ang2; angf = ang2+4*pi/16 angstep = (angf-ang0)/(N-1) x4 = [size*(0.85*cos(i*angstep+ang0)+1) for i in range(N)] y4 = [size*0.85*sin(i*angstep+ang0) for i in range(N)] cur_list = [(x1, y1), (x2, y2), (x2, y3), (x4, y4)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
r"""Draw an eye.
def PrintGroup(name, codes): range16 = MakeRanges([c for c in codes if c < 65536]) range32 = MakeRanges([c for c in codes if c >= 65536]) global n16 global n32 n16 += len(range16) n32 += len(range32) ugroup = "{ \"%s\", +1" % (name,) if len(range16) > 0: PrintRanges("URange16", name+"_range16", range16) ugroup += ", %s_range16, %d" % (name, len(range16)) else: ugroup += ", 0, 0" if len(range32) > 0: PrintRanges("URange32", name+"_range32", range32) ugroup += ", %s_range32, %d" % (name, len(range32)) else: ugroup += ", 0, 0" ugroup += " }" return ugroup
Print the data structures for the group of codes. Return a UGroup literal for the group.
def query_metadata_pypi(self): if self.version and self.version in self.all_versions: metadata = self.pypi.release_data(self.project_name, self.version) else: metadata = self.pypi.release_data(self.project_name, \ self.all_versions[0]) if metadata: for key in metadata.keys(): if not self.options.fields or (self.options.fields and \ self.options.fields==key): print("%s: %s" % (key, metadata[key])) return 0
Show pkg metadata queried from PyPI @returns: 0
async def create_lease_if_not_exists_async(self, partition_id): return_lease = None try: return_lease = AzureBlobLease() return_lease.partition_id = partition_id serializable_lease = return_lease.serializable() json_lease = json.dumps(serializable_lease) _logger.info("Creating Lease %r %r %r", self.lease_container_name, partition_id, json.dumps({k:v for k, v in serializable_lease.items() if k != 'event_processor_context'})) await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, partition_id, json_lease)) except Exception: try: return_lease = await self.get_lease_async(partition_id) except Exception as err: _logger.error("Failed to create lease %r", err) raise err return return_lease
Create in the store the lease info for the given partition, if it does not exist. Do nothing if it does exist in the store already. :param partition_id: The ID of a given parition. :type partition_id: str :return: the existing or newly-created lease info for the partition. :rtype: ~azure.eventprocessorhost.lease.Lease
def as_dict(self): tags_dict = dict(self) tags_dict['@module'] = self.__class__.__module__ tags_dict['@class'] = self.__class__.__name__ return tags_dict
Dict representation. Returns: Dictionary of parameters from fefftags object
def window(self, begin, end=None): if self._name_parts.decorator != '': raise Exception("Cannot use window() on an already decorated table") start = Table._convert_decorator_time(begin) if end is None: if isinstance(begin, datetime.timedelta): end = datetime.timedelta(0) else: end = datetime.datetime.utcnow() stop = Table._convert_decorator_time(end) if (start > 0 >= stop) or (stop > 0 >= start): raise Exception("window: Between arguments must both be absolute or relative: %s, %s" % (str(begin), str(end))) if start > stop: raise Exception("window: Between arguments: begin must be before end: %s, %s" % (str(begin), str(end))) return Table("%s@%s-%s" % (self._full_name, str(start), str(stop)), context=self._context)
Return a new Table limited to the rows added to this Table during the specified time range. Args: begin: the start time of the window. This can be a Python datetime (absolute) or timedelta (relative to current time). The result must be after the table was created and no more than seven days in the past. Note that using a relative value will provide a varying snapshot, not a fixed snapshot; any queries issued against such a Table will be done against a snapshot that has an age relative to the execution time of the query. end: the end time of the snapshot; if None, then the current time is used. The types and interpretation of values is as for start. Returns: A new Table object referencing the window. Raises: An exception if this Table is already decorated, or if the time specified is invalid.
def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5, center=True, scale=True, **unused_kwargs): mean = np.mean(x, axis, keepdims=True) m1 = np.mean(x**2, axis, keepdims=True) var = m1 - mean**2 z = (x - mean) / np.sqrt(var + epsilon) beta, gamma = params ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x))) beta = beta[ed] gamma = gamma[ed] if center and scale: return gamma * z + beta if center: return z + beta if scale: return gamma * z return z
Layer construction function for a batch normalization layer.
def summary(self, h): _, losses, _ = self.run(h=h) df = pd.DataFrame(losses) df.index = ['Ensemble'] + self.model_names df.columns = [self.loss_name] return df
Summarize the results for each model for h steps of the algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of losses for each model
def crop_image(image, label_image=None, label=1): inpixeltype = image.pixeltype ndim = image.dimension if image.pixeltype != 'float': image = image.clone('float') if label_image is None: label_image = get_mask(image) if label_image.pixeltype != 'float': label_image = label_image.clone('float') libfn = utils.get_lib_fn('cropImageF%i' % ndim) itkimage = libfn(image.pointer, label_image.pointer, label, 0, [], []) return iio.ANTsImage(pixeltype='float', dimension=ndim, components=image.components, pointer=itkimage).clone(inpixeltype)
Use a label image to crop a smaller ANTsImage from within a larger ANTsImage ANTsR function: `cropImage` Arguments --------- image : ANTsImage image to crop label_image : ANTsImage image with label values. If not supplied, estimated from data. label : integer the label value to use Returns ------- ANTsImage Example ------- >>> import ants >>> fi = ants.image_read( ants.get_ants_data('r16') ) >>> cropped = ants.crop_image(fi) >>> cropped = ants.crop_image(fi, fi, 100 )
def _condense(self, data): if data: data = filter(None,data.values()) if data: return data[-1] return None
Condense by returning the last real value of the gauge.
def _updateNamespace(item, new_namespace): temp_item = '' i = item.tag.find('}') if i >= 0: temp_item = item.tag[i+1:] else: temp_item = item.tag item.tag = '{{{0}}}{1}'.format(new_namespace, temp_item) for child in item.getiterator(): if isinstance(child.tag, six.string_types): temp_item = '' i = child.tag.find('}') if i >= 0: temp_item = child.tag[i+1:] else: temp_item = child.tag child.tag = '{{{0}}}{1}'.format(new_namespace, temp_item) return item
helper function to recursively update the namespaces of an item
def verify_path(self, mold_id_path): try: path = self.lookup_path(mold_id_path) if not exists(path): raise KeyError except KeyError: raise_os_error(ENOENT) return path
Lookup and verify path.
def is_micropython_usb_port(portName): for port in serial.tools.list_ports.comports(): if port.device == portName: return is_micropython_usb_device(port) return False
Checks to see if the indicated portname is a MicroPython device or not.
def disassemble(nex): rev_opcodes = {} for op in interpreter.opcodes: rev_opcodes[interpreter.opcodes[op]] = op r_constants = 1 + len(nex.signature) r_temps = r_constants + len(nex.constants) def getArg(pc, offset): if sys.version_info[0] < 3: arg = ord(nex.program[pc + offset]) op = rev_opcodes.get(ord(nex.program[pc])) else: arg = nex.program[pc + offset] op = rev_opcodes.get(nex.program[pc]) try: code = op.split(b'_')[1][offset - 1] except IndexError: return None if sys.version_info[0] > 2: code = bytes([code]) if arg == 255: return None if code != b'n': if arg == 0: return b'r0' elif arg < r_constants: return ('r%d[%s]' % (arg, nex.input_names[arg - 1])).encode('ascii') elif arg < r_temps: return ('c%d[%s]' % (arg, nex.constants[arg - r_constants])).encode('ascii') else: return ('t%d' % (arg,)).encode('ascii') else: return arg source = [] for pc in range(0, len(nex.program), 4): if sys.version_info[0] < 3: op = rev_opcodes.get(ord(nex.program[pc])) else: op = rev_opcodes.get(nex.program[pc]) dest = getArg(pc, 1) arg1 = getArg(pc, 2) arg2 = getArg(pc, 3) source.append((op, dest, arg1, arg2)) return source
Given a NumExpr object, return a list which is the program disassembled.
def __write_to_character_device(self, event_list, timeval=None): pos = self._character_device.tell() self._character_device.seek(0, 2) for event in event_list: self._character_device.write(event) sync = self.create_event_object("Sync", 0, 0, timeval) self._character_device.write(sync) self._character_device.seek(pos)
Emulate the Linux character device on other platforms such as Windows.
def get_alignak_macros(self): macros = self.get_alignak_configuration(macros=True) sections = self._search_sections('pack.') for name, _ in list(sections.items()): section_macros = self.get_alignak_configuration(section=name, macros=True) macros.update(section_macros) return macros
Get the Alignak macros. :return: a dict containing the Alignak macros
def get(self, url, ignore_access_time=False): key = hashlib.md5(url).hexdigest() accessed = self._cache_meta_get(key) if not accessed: self.debug("From inet {}".format(url)) return None, None if isinstance(accessed, dict): cached = CacheInfo.from_dict(accessed) else: cached = CacheInfo(accessed) now = now_utc() if now - cached.access_time > self.duration and not ignore_access_time: self.debug("From inet (expired) {}".format(url)) return None, cached try: res = self._cache_get(key) except: self.exception("Failed to read cache") self.debug("From inet (failure) {}".format(url)) return None, None self.debug("From cache {}".format(url)) return res, cached
Try to retrieve url from cache if available :param url: Url to retrieve :type url: str | unicode :param ignore_access_time: Should ignore the access time :type ignore_access_time: bool :return: (data, CacheInfo) None, None -> not found in cache None, CacheInfo -> found, but is expired data, CacheInfo -> found in cache :rtype: (None | str | unicode, None | floscraper.models.CacheInfo)
def publish(self): publisher_doc = self.get_publisher_doc() with self.published_context(): published = self.one(Q._uid == self._uid) if not published: published = self.__class__() for field, value in publisher_doc.items(): setattr(published, field, value) published.upsert() now = datetime.now() with self.draft_context(): self.get_collection().update( {'_uid': self._uid}, {'$set': {'revision': now}} ) with self.published_context(): self.get_collection().update( {'_uid': self._uid}, {'$set': {'revision': now}} )
Publish the current document. NOTE: You must have saved any changes to the draft version of the document before publishing, unsaved changes wont be published.
def copy(self, empty=False): newobject = self.__new__(self.__class__) if empty: return for prop in ["_properties","_side_properties", "_derived_properties","_build_properties" ]: if prop not in dir(self): continue try: newobject.__dict__[prop] = copy.deepcopy(self.__dict__[prop]) except: newobject.__dict__[prop] = copy.copy(self.__dict__[prop]) newobject._update_() return newobject
returns an independent copy of the current object.
def organization_fields(self, organization): return self._query_zendesk(self.endpoint.organization_fields, 'organization_field', id=organization)
Retrieve the organization fields for this organization. :param organization: Organization object or id
def _compress_tokens(tokens): recorder = None def _edge_case_stray_end_quoted(tokens, index): tokens[index] = Token(type=TokenType.UnquotedLiteral, content=tokens[index].content, line=tokens[index].line, col=tokens[index].col) tokens_len = len(tokens) index = 0 with _EdgeCaseStrayParens() as edge_case_stray_parens: edge_cases = [ (_is_paren_type, edge_case_stray_parens), (_is_end_quoted_type, _edge_case_stray_end_quoted), ] while index < tokens_len: recorder = _find_recorder(recorder, tokens, index) if recorder is not None: result = recorder.consume_token(tokens, index, tokens_len) if result is not None: (index, tokens_len, tokens) = result recorder = None else: for matcher, handler in edge_cases: if matcher(tokens[index].type): handler(tokens, index) index += 1 return tokens
Paste multi-line strings, comments, RST etc together. This function works by iterating over each over the _RECORDERS to determine if we should start recording a token sequence for pasting together. If it finds one, then we keep recording until that recorder is done and returns a pasted together token sequence. Keep going until we reach the end of the sequence. The sequence is modified in place, so any function that modifies it must return its new length. This is also why we use a while loop here.
def update_value(self, workspace_id, entity, value, new_value=None, new_metadata=None, new_value_type=None, new_synonyms=None, new_patterns=None, **kwargs): if workspace_id is None: raise ValueError('workspace_id must be provided') if entity is None: raise ValueError('entity must be provided') if value is None: raise ValueError('value must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('conversation', 'V1', 'update_value') headers.update(sdk_headers) params = {'version': self.version} data = { 'value': new_value, 'metadata': new_metadata, 'type': new_value_type, 'synonyms': new_synonyms, 'patterns': new_patterns } url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( *self._encode_path_vars(workspace_id, entity, value)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
Update entity value. Update an existing entity value with new or modified data. You must provide component objects defining the content of the updated entity value. This operation is limited to 1000 requests per 30 minutes. For more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. :param str value: The text of the entity value. :param str new_value: The text of the entity value. This string must conform to the following restrictions: - It cannot contain carriage return, newline, or tab characters. - It cannot consist of only whitespace characters. - It must be no longer than 64 characters. :param dict new_metadata: Any metadata related to the entity value. :param str new_value_type: Specifies the type of entity value. :param list[str] new_synonyms: An array of synonyms for the entity value. A value can specify either synonyms or patterns (depending on the value type), but not both. A synonym must conform to the following resrictions: - It cannot contain carriage return, newline, or tab characters. - It cannot consist of only whitespace characters. - It must be no longer than 64 characters. :param list[str] new_patterns: An array of patterns for the entity value. A value can specify either synonyms or patterns (depending on the value type), but not both. A pattern is a regular expression no longer than 512 characters. For more information about how to specify a pattern, see the [documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#entities-create-dictionary-based). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def reshape(self, input_shapes): indptr = [0] sdata = [] keys = [] for k, v in input_shapes.items(): if not isinstance(v, tuple): raise ValueError("Expect input_shapes to be dict str->tuple") keys.append(c_str(k)) sdata.extend(v) indptr.append(len(sdata)) new_handle = PredictorHandle() _check_call(_LIB.MXPredReshape( mx_uint(len(indptr) - 1), c_array(ctypes.c_char_p, keys), c_array(mx_uint, indptr), c_array(mx_uint, sdata), self.handle, ctypes.byref(new_handle))) _check_call(_LIB.MXPredFree(self.handle)) self.handle = new_handle
Change the input shape of the predictor. Parameters ---------- input_shapes : dict of str to tuple The new shape of input data. Examples -------- >>> predictor.reshape({'data':data_shape_tuple})
def check_return_types(self, method): mn = method.__name__ retanno = method.__annotations__.get('return', None) if not retanno: return ['Missing return types for method "{}"'.format(mn)] if not isinstance(retanno, (list, tuple)): msg = 'Return annotation for method "{}" not tuple nor list' return [msg.format(mn)] if (any(map(lambda t: not isinstance(t, (list, tuple)), retanno)) or any(map(lambda t: not (2 <= len(t) <= 3), retanno))): msg = ('Return values series for "{}" should be composed of ' '2 or 3-items tuples (code, msg, type).') return [msg.format(mn)] errors = [] declared = set([t[0] for t in retanno]) actual = set(int(s) for s in HTTP_STATUSES_REGEX.findall(method.source)) if declared != actual: if declared.issubset(actual): msg = 'Method {} returns undeclared codes: {}.' errors.append(msg.format(mn, actual - declared)) elif actual.issubset(declared): msg = 'Method {} declares codes {} that are never used.' errors.append(msg.format(mn, declared - actual)) else: msg = 'Declared {} and Used {} codes mismatch.' errors.append(msg.format(declared, actual)) ret_with_types = filter(lambda t: len(t) == 3, retanno) msg = 'Method {} return type for code {} must be class (not instance).' msg_mod = 'Method {} return type for code {} must subclass from Model.' for code, _, type_ in ret_with_types: try: if Model not in type_.__bases__: errors.append(msg_mod.format(mn, code)) except AttributeError: errors.append(msg.format(mn, code)) return errors
Return types must be correct, their codes must match actual use.
def _check_for_problem_somatic_batches(items, config): to_check = [] for data in items: data = copy.deepcopy(data) data["config"] = config_utils.update_w_custom(config, data) to_check.append(data) data_by_batches = collections.defaultdict(list) for data in to_check: batches = dd.get_batches(data) if batches: for batch in batches: data_by_batches[batch].append(data) for batch, items in data_by_batches.items(): if vcfutils.get_paired(items): vcfutils.check_paired_problems(items) elif len(items) > 1: vcs = vcfutils.get_somatic_variantcallers(items) if "vardict" in vcs: raise ValueError("VarDict does not support pooled non-tumor/normal calling, in batch %s: %s" % (batch, [dd.get_sample_name(data) for data in items])) elif "mutect" in vcs or "mutect2" in vcs: raise ValueError("MuTect and MuTect2 require a 'phenotype: tumor' sample for calling, " "in batch %s: %s" % (batch, [dd.get_sample_name(data) for data in items]))
Identify problem batch setups for somatic calling. We do not support multiple tumors in a single batch and VarDict(Java) does not handle pooled calling, only tumor/normal.