code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def main(): parser, args = parser_factory() try: action_runner(parser, args) except Exception as uncaught: unhandled(uncaught, args) sys.exit(1)
Entrypoint, sweet Entrypoint
def encode_setid(uint128): hi, lo = divmod(uint128, 2**64) return b32encode(struct.pack('<QQ', lo, hi))[:-6].lower()
Encode uint128 setid as stripped b32encoded string
def __to_browser(self, message_no): filename = self.__to_file(message_no) try: command = self.config.get('General', 'browser_command') except (ConfigParser.NoOptionError, AttributeError): print 'Incorrect or missing .ini file. See --help.' sys.exit(5) command = str(command).format(filename) command_list = command.split(' ') try: subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: print 'Unable to execute the browsercommand:' print command print 'Exiting!' sys.exit(21)
Write a single message to file and open the file in a browser
def run(self): salt.utils.process.appendproctitle(self.__class__.__name__) halite.start(self.hopts)
Fire up halite!
def show_env(self, env): self.dialog_manager.show(RemoteEnvDialog(env, parent=self))
Show environment variables.
def delete(self, alias_name, timeout=-1): uri = self.URI + "/" + alias_name return self._client.delete(uri, timeout=timeout)
Revokes a certificate signed by the internal CA. If client certificate to be revoked is RabbitMQ_readonly, then the internal CA root certificate, RabbitMQ client certificate and RabbitMQ server certificate will be regenerated. This will invalidate the previous version of RabbitMQ client certificate and the RabbitMQ server will be restarted to read the latest certificates. Args: alias_name (str): Alias name. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion.
def block(self, warn_only=False): self._state = "not finished" if self._return_code is None: proc = subprocess.Popen( self.cmd, cwd=self.cwd, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) self._stdout, self._stderr = proc.communicate(timeout=self.timeout) self._return_code = proc.returncode self._state = FINISHED if not warn_only: self.raise_for_error()
blocked executation.
def config_read(): config_file = (u"{0}config.ini".format(CONFIG_DIR)) if not os.path.isfile(config_file): config_make(config_file) config = configparser.ConfigParser(allow_no_value=True) try: config.read(config_file, encoding='utf-8') except IOError: print("Error reading config file: {}".format(config_file)) sys.exit() providers = config_prov(config) (cred, to_remove) = config_cred(config, providers) for item in to_remove: providers.remove(item) return cred, providers
Read config info from config file.
def get_calling_file(file_path=None, result='name'): if file_path is None: path = inspect.stack()[1][1] else: path = file_path name = path.split('/')[-1].split('.')[0] if result == 'name': return name elif result == 'path': return path else: return path, name
Retrieve file_name or file_path of calling Python script
def download(self): if not self.can_update(): self._tcex.handle_error(910, [self.type]) return self.tc_requests.download(self.api_type, self.api_sub_type, self.unique_id)
Downloads the signature. Returns:
def _time_query_parms(begin_time, end_time): query_parms = [] if begin_time is not None: begin_ts = timestamp_from_datetime(begin_time) qp = 'begin-time={}'.format(begin_ts) query_parms.append(qp) if end_time is not None: end_ts = timestamp_from_datetime(end_time) qp = 'end-time={}'.format(end_ts) query_parms.append(qp) query_parms_str = '&'.join(query_parms) if query_parms_str: query_parms_str = '?' + query_parms_str return query_parms_str
Return the URI query paramterer string for the specified begin time and end time.
def event_date(self, event_date): self._group_data['eventDate'] = self._utils.format_datetime( event_date, date_format='%Y-%m-%dT%H:%M:%SZ' )
Set the Events "event date" value.
def version(self, event): name = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) return "%s [%s]" % (settings.GNOTTY_VERSION_STRING, name)
Shows version information.
def init_app(self, app): self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY) if not self._key: return self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI) sender = AsynchronousSender(self._endpoint_uri) queue = AsynchronousQueue(sender) self._channel = TelemetryChannel(None, queue) self._init_request_logging(app) self._init_trace_logging(app) self._init_exception_logging(app)
Initializes the extension for the provided Flask application. Args: app (flask.Flask). the Flask application for which to initialize the extension.
def load(file_object, *transformers, **kwargs): ignore_remaining_data = kwargs.get("ignore_remaining_data", False) marshaller = JavaObjectUnmarshaller( file_object, kwargs.get("use_numpy_arrays", False) ) for transformer in transformers: marshaller.add_transformer(transformer) marshaller.add_transformer(DefaultObjectTransformer()) return marshaller.readObject(ignore_remaining_data=ignore_remaining_data)
Deserializes Java primitive data and objects serialized using ObjectOutputStream from a file-like object. :param file_object: A file-like object :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object
def image_feature_engineering(features, feature_tensors_dict): engineered_features = {} for name, feature_tensor in six.iteritems(feature_tensors_dict): if name in features and features[name]['transform'] == IMAGE_TRANSFORM: with tf.name_scope(name, 'Wx_plus_b'): hidden = tf.contrib.layers.fully_connected( feature_tensor, IMAGE_HIDDEN_TENSOR_SIZE) engineered_features[name] = hidden else: engineered_features[name] = feature_tensor return engineered_features
Add a hidden layer on image features. Args: features: features dict feature_tensors_dict: dict of feature-name: tensor
def save(self, path=None): if path==None: if self._autosettings_path == None: return self gui_settings_dir = _os.path.join(_cwd, 'egg_settings') if not _os.path.exists(gui_settings_dir): _os.mkdir(gui_settings_dir) path = _os.path.join(gui_settings_dir, self._autosettings_path) d = _d.databox() keys, dictionary = self.get_dictionary() for k in keys: d.insert_header(k, dictionary[k]) try: d.save_file(path, force_overwrite=True, header_only=True) except: print('Warning: could not save '+path.__repr__()+' once. Could be that this is being called too rapidly.') return self
Saves all the parameters to a text file using the databox functionality. If path=None, saves to self._autosettings_path. If self._autosettings_path=None, does not save.
def _modify_eni_properties(eni_id, properties=None, vm_=None): if not isinstance(properties, dict): raise SaltCloudException( 'ENI properties must be a dictionary' ) params = {'Action': 'ModifyNetworkInterfaceAttribute', 'NetworkInterfaceId': eni_id} for k, v in six.iteritems(properties): params[k] = v result = aws.query(params, return_root=True, location=get_location(vm_), provider=get_provider(), opts=__opts__, sigver='4') if isinstance(result, dict) and result.get('error'): raise SaltCloudException( 'Could not change interface <{0}> attributes <\'{1}\'>'.format( eni_id, properties ) ) else: return result
Change properties of the interface with id eni_id to the values in properties dict
def is_system_rpm(self): sys_rpm_paths = [ '/usr/bin/rpm', '/bin/rpm', ] matched = False for sys_rpm_path in sys_rpm_paths: if self.rpm_path.startswith(sys_rpm_path): matched = True break return matched
Check if the RPM is system RPM.
def reset(self): logger.debug('StackInABoxService ({0}): Reset' .format(self.__id, self.name)) self.base_url = '/{0}'.format(self.name) logger.debug('StackInABoxService ({0}): Hosting Service {1}' .format(self.__id, self.name))
Reset the service to its' initial state.
def find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'): serviceType = "GSCConvert/GSCconvert.aspx" spec_str = "TRANSFORM={}-{}&IPPPSSOOT={}" if 'rootname' in pf.getheader(image): ippssoot = pf.getval(image, 'rootname').upper() else: ippssoot = fu.buildNewRootname(image).upper() spec = spec_str.format(input_catalog, output_catalog, ippssoot) serviceUrl = "{}/{}?{}".format(SERVICELOCATION, serviceType, spec) rawcat = requests.get(serviceUrl) if not rawcat.ok: log.info("Problem accessing service with:\n{{}".format(serviceUrl)) raise ValueError delta_ra = delta_dec = None tree = BytesIO(rawcat.content) for _, element in etree.iterparse(tree): if element.tag == 'deltaRA': delta_ra = float(element.text) elif element.tag == 'deltaDEC': delta_dec = float(element.text) return delta_ra, delta_dec
Find the GSC to GAIA offset based on guide star coordinates Parameters ---------- image : str Filename of image to be processed. Returns ------- delta_ra, delta_dec : tuple of floats Offset in decimal degrees of image based on correction to guide star coordinates relative to GAIA.
def get_init_args(inst_type, init_args: dict, add_kwargs=False) -> Tuple[List, Dict]: try: parameters = signature(inst_type).parameters.values() args_keys = [p.name for p in parameters \ if p.kind in [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD] \ and p.default == Parameter.empty] args = [init_args[key] for key in args_keys] kwargs = _get_var_kwargs(parameters, args_keys, init_args)\ if add_kwargs else\ _get_kwargs(parameters, init_args) except KeyError as key_error: msg_format = 'parameter with key "{0}" is not found in node args' raise RenderingError(msg_format.format(key_error.args[0])) return (args, kwargs)
Returns tuple with args and kwargs to pass it to inst_type constructor
def jwt_grant(request, token_uri, assertion): body = { 'assertion': assertion, 'grant_type': _JWT_GRANT_TYPE, } response_data = _token_endpoint_request(request, token_uri, body) try: access_token = response_data['access_token'] except KeyError as caught_exc: new_exc = exceptions.RefreshError( 'No access token in response.', response_data) six.raise_from(new_exc, caught_exc) expiry = _parse_expiry(response_data) return access_token, expiry, response_data
Implements the JWT Profile for OAuth 2.0 Authorization Grants. For more details, see `rfc7523 section 4`_. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. token_uri (str): The OAuth 2.0 authorizations server's token endpoint URI. assertion (str): The OAuth 2.0 assertion. Returns: Tuple[str, Optional[datetime], Mapping[str, str]]: The access token, expiration, and additional data returned by the token endpoint. Raises: google.auth.exceptions.RefreshError: If the token endpoint returned an error. .. _rfc7523 section 4: https://tools.ietf.org/html/rfc7523#section-4
def limit(self, v): if not (v is None or isinstance(v, six.integer_types)): raise TypeError if v == self._limit: return self if v < 0: raise QueryException("Negative limit is not allowed") clone = copy.deepcopy(self) clone._limit = v return clone
Sets the limit on the number of results returned CQL has a default limit of 10,000
def rank_member(self, member, score, member_data=None): self.rank_member_in(self.leaderboard_name, member, score, member_data)
Rank a member in the leaderboard. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member data.
def mutate_json_record(self, json_record): for attr_name in json_record: attr = json_record[attr_name] if isinstance(attr, datetime): json_record[attr_name] = attr.isoformat() return json_record
Override it to convert fields of `json_record` to needed types. Default implementation converts `datetime` to string in ISO8601 format.
def _chk_docopts(self, kws): outfile = kws['outfile'] if len(kws) == 2 and os.path.basename(kws['obo']) == "go-basic.obo" and \ kws['outfile'] == self.dflt_outfile: self._err("NO GO IDS SPECFIED", err=False) if 'obo' in outfile: self._err("BAD outfile({O})".format(O=outfile)) if 'gaf' in kws and 'gene2go' in kws: self._err("SPECIFY ANNOTAIONS FROM ONE FILE") if 'gene2go' in kws: if 'taxid' not in kws: self._err("SPECIFIY taxid WHEN READ NCBI'S gene2go FILE")
Check for common user command-line errors.
def save_matpower(self, fd): from pylon.io import MATPOWERWriter MATPOWERWriter(self).write(fd)
Serialize the case as a MATPOWER data file.
def validate_digit(value, start, end): if not str(value).isdigit() or int(value) < start or int(value) > end: raise ValueError('%s must be a digit from %s to %s' % (value, start, end))
validate if a digit is valid
def remove_folder(self, tree, prefix): while True: child = tree tree = tree.parent if not child.folders and not child.files: del self.cache[tuple(prefix)] if tree: del tree.folders[prefix.pop()] if not tree or tree.folders or tree.files: break
Used to remove any empty folders If this folder is empty then it is removed. If the parent is empty as a result, then the parent is also removed, and so on.
def debug(self, i: int=None) -> str: head = "[" + colors.yellow("debug") + "]" if i is not None: head = str(i) + " " + head return head
Returns a debug message
def insert_child(self, child_pid): self._check_child_limits(child_pid) try: with db.session.begin_nested(): if not isinstance(child_pid, PersistentIdentifier): child_pid = resolve_pid(child_pid) return PIDRelation.create( self._resolved_pid, child_pid, self.relation_type.id, None ) except IntegrityError: raise PIDRelationConsistencyError("PID Relation already exists.")
Add the given PID to the list of children PIDs.
async def list_state(self, request): paging_controls = self._get_paging_controls(request) head, root = await self._head_to_root(request.url.query.get( 'head', None)) validator_query = client_state_pb2.ClientStateListRequest( state_root=root, address=request.url.query.get('address', None), sorting=self._get_sorting_message(request, "default"), paging=self._make_paging_message(paging_controls)) response = await self._query_validator( Message.CLIENT_STATE_LIST_REQUEST, client_state_pb2.ClientStateListResponse, validator_query) return self._wrap_paginated_response( request=request, response=response, controls=paging_controls, data=response.get('entries', []), head=head)
Fetches list of data entries, optionally filtered by address prefix. Request: query: - head: The id of the block to use as the head of the chain - address: Return entries whose addresses begin with this prefix Response: data: An array of leaf objects with address and data keys head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block paging: Paging info and nav, like total resources and a next link
def context(self): parent = _ACTION_CONTEXT.set(self) try: yield self finally: _ACTION_CONTEXT.reset(parent)
Create a context manager that ensures code runs within action's context. The action does NOT finish when the context is exited.
def parse_alert(output): for x in output.splitlines(): match = ALERT_PATTERN.match(x) if match: rec = {'timestamp': datetime.strptime(match.group('timestamp'), '%m/%d/%y-%H:%M:%S.%f'), 'sid': int(match.group('sid')), 'revision': int(match.group('revision')), 'priority': int(match.group('priority')), 'message': match.group('message'), 'source': match.group('src'), 'destination': match.group('dest'), 'protocol': match.group('protocol'), } if match.group('classtype'): rec['classtype'] = match.group('classtype') yield rec
Parses the supplied output and yields any alerts. Example alert format: 01/28/14-22:26:04.885446 [**] [1:1917:11] INDICATOR-SCAN UPnP service discover attempt [**] [Classification: Detection of a Network Scan] [Priority: 3] {UDP} 10.1.1.132:58650 -> 239.255.255.250:1900 :param output: A string containing the output of running snort :returns: Generator of snort alert dicts
def set_row_gap(self, value): value = str(value) + 'px' value = value.replace('pxpx', 'px') self.style['grid-row-gap'] = value
Sets the gap value between rows Args: value (int or str): gap value (i.e. 10 or "10px")
def conditional_expected_average_profit(self, frequency=None, monetary_value=None): if monetary_value is None: monetary_value = self.data["monetary_value"] if frequency is None: frequency = self.data["frequency"] p, q, v = self._unload_params("p", "q", "v") individual_weight = p * frequency / (p * frequency + q - 1) population_mean = v * p / (q - 1) return (1 - individual_weight) * population_mean + individual_weight * monetary_value
Conditional expectation of the average profit. This method computes the conditional expectation of the average profit per transaction for a group of one or more customers. Parameters ---------- frequency: array_like, optional a vector containing the customers' frequencies. Defaults to the whole set of frequencies used for fitting the model. monetary_value: array_like, optional a vector containing the customers' monetary values. Defaults to the whole set of monetary values used for fitting the model. Returns ------- array_like: The conditional expectation of the average profit per transaction
def _ParseDistributedTrackingIdentifier( self, parser_mediator, uuid_object, origin): if uuid_object.version == 1: event_data = windows_events.WindowsDistributedLinkTrackingEventData( uuid_object, origin) date_time = dfdatetime_uuid_time.UUIDTime(timestamp=uuid_object.time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) return '{{{0!s}}}'.format(uuid_object)
Extracts data from a Distributed Tracking identifier. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. uuid_object (uuid.UUID): UUID of the Distributed Tracking identifier. origin (str): origin of the event (event source). Returns: str: UUID string of the Distributed Tracking identifier.
def allow_port(port, proto='tcp', direction='both'): ports = get_ports(proto=proto, direction=direction) direction = direction.upper() _validate_direction_and_proto(direction, proto) directions = build_directions(direction) results = [] for direction in directions: _ports = ports[direction] _ports.append(port) results += allow_ports(_ports, proto=proto, direction=direction) return results
Like allow_ports, but it will append to the existing entry instead of replacing it. Takes a single port instead of a list of ports. CLI Example: .. code-block:: bash salt '*' csf.allow_port 22 proto='tcp' direction='in'
def _save(self): if self.__modified_flag: self.__filename_rep.update_id_counter() indexfilename = os.path.join(self.__dir, "index.dat") self._write_file( indexfilename, (self.__index, self.__filename_rep)) self.__modified_flag = False
save the cache index, in case it was modified. Saves the index table and the file name repository in the file `index.dat`
def order_by(self, *field_names): obj = self._clone() obj._clear_ordering() self._insert_ordering(obj, *field_names) return obj
Returns a new QuerySet instance with the ordering changed. We have a special field "_random"
def check_updates(self, startup=False): from spyder.workers.updates import WorkerUpdates self.check_updates_action.setDisabled(True) if self.thread_updates is not None: self.thread_updates.terminate() self.thread_updates = QThread(self) self.worker_updates = WorkerUpdates(self, startup=startup) self.worker_updates.sig_ready.connect(self._check_updates_ready) self.worker_updates.sig_ready.connect(self.thread_updates.quit) self.worker_updates.moveToThread(self.thread_updates) self.thread_updates.started.connect(self.worker_updates.start) self.thread_updates.start()
Check for spyder updates on github releases using a QThread.
def add_metadata_defaults(md): defaults = {"batch": None, "phenotype": ""} for k, v in defaults.items(): if k not in md: md[k] = v return md
Central location for defaults for algorithm inputs.
def add_batch_parser(subparsers, parent_parser): parser = subparsers.add_parser( 'batch', help='Displays information about batches and submit new batches', description='Provides subcommands to display Batch information and ' 'submit Batches to the validator via the REST API.') grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand') grand_parsers.required = True add_batch_list_parser(grand_parsers, parent_parser) add_batch_show_parser(grand_parsers, parent_parser) add_batch_status_parser(grand_parsers, parent_parser) add_batch_submit_parser(grand_parsers, parent_parser)
Adds arguments parsers for the batch list, batch show and batch status commands Args: subparsers: Add parsers to this subparser object parent_parser: The parent argparse.ArgumentParser object
def _warn_on_old_config(self): old_sections = ['Allowed Applications', 'Ignored Applications'] for old_section in old_sections: if self._parser.has_section(old_section): error("Old config file detected. Aborting.\n" "\n" "An old section (e.g. [Allowed Applications]" " or [Ignored Applications] has been detected" " in your {} file.\n" "I'd rather do nothing than do something you" " do not want me to do.\n" "\n" "Please read the up to date documentation on" " <https://github.com/lra/mackup> and migrate" " your configuration file." .format(MACKUP_CONFIG_FILE))
Warn the user if an old config format is detected.
def update_cache(force=False, cache_file=None): if not cache_file: cache_file = find_config() cache_config = devpipeline_configure.parser.read_config(cache_file) cache = devpipeline_configure.cache._CachedConfig(cache_config, cache_file) if force or _is_outdated(cache_file, cache): cache = devpipeline_configure.config.process_config( cache_config.get("DEFAULT", "dp.build_config"), os.path.dirname(cache_file), "build.cache", profiles=cache_config.get("DEFAULT", "dp.profile_name", fallback=None), overrides=cache_config.get("DEFAULT", "dp.overrides", fallback=None), ) devpipeline_core.sanitizer.sanitize( cache, lambda n, m: print("{} [{}]".format(m, n)) ) return cache
Load a build cache, updating it if necessary. A cache is considered outdated if any of its inputs have changed. Arguments force -- Consider a cache outdated regardless of whether its inputs have been modified.
def load_data(): digits = load_digits() X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, random_state=99, test_size=0.25) ss = StandardScaler() X_train = ss.fit_transform(X_train) X_test = ss.transform(X_test) return X_train, X_test, y_train, y_test
Load dataset, use 20newsgroups dataset
def _vertical_x(axis, ticks=None, max_width=5): if ticks is None: ticks = axis.get_xticks() if (np.array(ticks) == np.rint(ticks)).all(): ticks = np.rint(ticks).astype(np.int) if max([len(str(tick)) for tick in ticks]) > max_width: axis.set_xticklabels(ticks, rotation='vertical')
Switch labels to vertical if they are long.
def read_dataset(args, dataset): path = os.path.join(vars(args)[dataset]) logger.info('reading data from {}'.format(path)) examples = [line.strip().split('\t') for line in open(path)] if args.max_num_examples > 0: examples = examples[:args.max_num_examples] dataset = gluon.data.SimpleDataset([(e[0], e[1], LABEL_TO_IDX[e[2]]) for e in examples]) dataset = dataset.transform(lambda s1, s2, label: ( ['NULL'] + s1.lower().split(), ['NULL'] + s2.lower().split(), label), lazy=False) logger.info('read {} examples'.format(len(dataset))) return dataset
Read dataset from tokenized files.
def writeline(self, data, crlf="\r\n"): if self.read_thread: if self.read_thread.has_error(): raise RuntimeError("Error writing PIPE") if self.proc.poll() is not None: raise RuntimeError("Process stopped") if self.__print_io: self.logger.info(data, extra={'type': '-->'}) self.proc.stdin.write(bytearray(data + crlf, 'ascii')) self.proc.stdin.flush()
Writeline implementation. :param data: Data to write :param crlf: Line end characters, defailt is \r\n :return: Nothing :raises: RuntimeError if errors happen while writing to PIPE or process stops.
def ngram_similarity(samegrams, allgrams, warp=1.0): if abs(warp - 1.0) < 1e-9: similarity = float(samegrams) / allgrams else: diffgrams = float(allgrams - samegrams) similarity = ((allgrams ** warp - diffgrams ** warp) / (allgrams ** warp)) return similarity
Similarity for two sets of n-grams. :note: ``similarity = (a**e - d**e)/a**e`` where `a` is \ "all n-grams", `d` is "different n-grams" and `e` is the warp. :param samegrams: number of n-grams shared by the two strings. :param allgrams: total of the distinct n-grams across the two strings. :return: similarity in the range 0.0 to 1.0. >>> from ngram import NGram >>> NGram.ngram_similarity(5, 10) 0.5 >>> NGram.ngram_similarity(5, 10, warp=2) 0.75 >>> NGram.ngram_similarity(5, 10, warp=3) 0.875 >>> NGram.ngram_similarity(2, 4, warp=2) 0.75 >>> NGram.ngram_similarity(3, 4) 0.75
def register_endpoint(self, path, app=None): if is_running_from_reloader() and not os.environ.get('DEBUG_METRICS'): return if app is None: app = self.app or current_app @app.route(path) @self.do_not_track() def prometheus_metrics(): from prometheus_client import multiprocess, CollectorRegistry if 'prometheus_multiproc_dir' in os.environ: registry = CollectorRegistry() else: registry = self.registry if 'name[]' in request.args: registry = registry.restricted_registry(request.args.getlist('name[]')) if 'prometheus_multiproc_dir' in os.environ: multiprocess.MultiProcessCollector(registry) headers = {'Content-Type': CONTENT_TYPE_LATEST} return generate_latest(registry), 200, headers
Register the metrics endpoint on the Flask application. :param path: the path of the endpoint :param app: the Flask application to register the endpoint on (by default it is the application registered with this class)
def setOverlayWidthInMeters(self, ulOverlayHandle, fWidthInMeters): fn = self.function_table.setOverlayWidthInMeters result = fn(ulOverlayHandle, fWidthInMeters) return result
Sets the width of the overlay quad in meters. By default overlays are rendered on a quad that is 1 meter across
def _norm_squared(args: Dict[str, Any]) -> float: state = _state_shard(args) return np.sum(np.abs(state) ** 2)
Returns the norm for each state shard.
def open(self): if self.is_active: raise ValueError("Can not open an already open stream.") request_generator = _RequestQueueGenerator( self._request_queue, initial_request=self._initial_request ) call = self._start_rpc(iter(request_generator), metadata=self._rpc_metadata) request_generator.call = call if hasattr(call, "_wrapped"): call._wrapped.add_done_callback(self._on_call_done) else: call.add_done_callback(self._on_call_done) self._request_generator = request_generator self.call = call
Opens the stream.
def randomize(length=6, choices=None): if type(choices) == str: choices = list(choices) choices = choices or ascii_lowercase return "".join(choice(choices) for _ in range(length))
Returns a random string of the given length.
def delete(self, force=False): if self.model is None: raise WorkflowsMissingModel() with db.session.begin_nested(): db.session.delete(self.model) return self
Delete a workflow object. If `force` is ``False``, the record is soft-deleted, i.e. the record stays in the database. This ensures e.g. that the same record identifier cannot be used twice, and that you can still retrieve the history of an object. If `force` is True, the record is completely removed from the database. :param force: Completely remove record from database.
def to_dict(self): if self.version < VERSION_3: if len(self._caveat_data) > 0: raise ValueError('cannot serialize pre-version3 macaroon with ' 'external caveat data') return json.loads(self._macaroon.serialize( json_serializer.JsonSerializer())) serialized = { 'm': json.loads(self._macaroon.serialize( json_serializer.JsonSerializer())), 'v': self._version, } if self._namespace is not None: serialized['ns'] = self._namespace.serialize_text().decode('utf-8') caveat_data = {} for id in self._caveat_data: key = base64.b64encode(id).decode('utf-8') value = base64.b64encode(self._caveat_data[id]).decode('utf-8') caveat_data[key] = value if len(caveat_data) > 0: serialized['cdata'] = caveat_data return serialized
Return a dict representation of the macaroon data in JSON format. @return a dict
def data_shape(self): if not self.header: return -1 try: nx = self.header['nx']['value'] ny = self.header['ny']['value'] nz = self.header['nz']['value'] except KeyError: return -1 else: return tuple(int(n) for n in (nx, ny, nz))
Shape tuple of the whole data block as determined from `header`. If no header is available (i.e., before it has been initialized), or any of the header entries ``'nx', 'ny', 'nz'`` is missing, -1 is returned, which makes reshaping a no-op. Otherwise, the returned shape is ``(nx, ny, nz)``. Note: this is the shape of the data as defined by the header. For a non-trivial axis ordering, the shape of actual data will be different. See Also -------- data_storage_shape data_axis_order
def output_row(self, name): "Output a scoring row." print("%10s %4d %0.3f %0.3f %0.3f"%( name, self.gold, self.precision(), self.recall(), self.fscore()))
Output a scoring row.
def get_mutations(study_id, gene_list, mutation_type=None, case_id=None): genetic_profile = get_genetic_profiles(study_id, 'mutation')[0] gene_list_str = ','.join(gene_list) data = {'cmd': 'getMutationData', 'case_set_id': study_id, 'genetic_profile_id': genetic_profile, 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) if case_id: df = df[df['case_id'] == case_id] res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'], 'mutation_type', mutation_type) mutations = {'gene_symbol': list(res['gene_symbol'].values()), 'amino_acid_change': list(res['amino_acid_change'].values())} return mutations
Return mutations as a list of genes and list of amino acid changes. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site case_id : Optional[str] The case ID within the study to filter to. Returns ------- mutations : tuple[list] A tuple of two lists, the first one containing a list of genes, and the second one a list of amino acid changes in those genes.
def url_for(obj, **kw): if isinstance(obj, str): return flask_url_for(obj, **kw) try: return current_app.default_view.url_for(obj, **kw) except KeyError: if hasattr(obj, "_url"): return obj._url elif hasattr(obj, "url"): return obj.url raise BuildError(repr(obj), kw, "GET")
Polymorphic variant of Flask's `url_for` function. Behaves like the original function when the first argument is a string. When it's an object, it
def compare_lists(old=None, new=None): ret = dict() for item in new: if item not in old: ret['new'] = item for item in old: if item not in new: ret['old'] = item return ret
Compare before and after results from various salt functions, returning a dict describing the changes that were made
def _configure_buffer_sizes(): global PIPE_BUF_BYTES global OS_PIPE_SZ PIPE_BUF_BYTES = 65536 OS_PIPE_SZ = None if not hasattr(fcntl, 'F_SETPIPE_SZ'): import platform if platform.system() == 'Linux': fcntl.F_SETPIPE_SZ = 1031 try: with open('/proc/sys/fs/pipe-max-size', 'r') as f: OS_PIPE_SZ = min(int(f.read()), 1024 * 1024) PIPE_BUF_BYTES = max(OS_PIPE_SZ, PIPE_BUF_BYTES) except Exception: pass
Set up module globals controlling buffer sizes
def get_dimension_by_name(dimension_name,**kwargs): try: if dimension_name is None: dimension_name = '' dimension = db.DBSession.query(Dimension).filter(func.lower(Dimension.name)==func.lower(dimension_name.strip())).one() return get_dimension(dimension.id) except NoResultFound: raise ResourceNotFoundError("Dimension %s not found"%(dimension_name))
Given a dimension name returns all its data. Used in convert functions
def show_driver(devname): try: module = ethtool.get_module(devname) except IOError: log.error('Driver information not implemented on %s', devname) return 'Not implemented' try: businfo = ethtool.get_businfo(devname) except IOError: log.error('Bus information no available on %s', devname) return 'Not available' ret = { 'driver': module, 'bus_info': businfo, } return ret
Queries the specified network device for associated driver information CLI Example: .. code-block:: bash salt '*' ethtool.show_driver <devname>
def _notify_fn(self): self._notifyrunning = True while self._notifyrunning: try: with IHCController._mutex: if self._newnotifyids: self.client.enable_runtime_notifications( self._newnotifyids) self._newnotifyids = [] changes = self.client.wait_for_resource_value_changes() if changes is False: self.re_authenticate(True) continue for ihcid in changes: value = changes[ihcid] if ihcid in self._ihcevents: for callback in self._ihcevents[ihcid]: callback(ihcid, value) except Exception as exp: self.re_authenticate(True)
The notify thread function.
def inMicrolensRegion_main(args=None): import argparse parser = argparse.ArgumentParser( description="Check if a celestial coordinate is " "inside the K2C9 microlensing superstamp.") parser.add_argument('ra', nargs=1, type=float, help="Right Ascension in decimal degrees (J2000).") parser.add_argument('dec', nargs=1, type=float, help="Declination in decimal degrees (J2000).") args = parser.parse_args(args) if inMicrolensRegion(args.ra[0], args.dec[0]): print("Yes! The coordinate is inside the K2C9 superstamp.") else: print("Sorry, the coordinate is NOT inside the K2C9 superstamp.")
Exposes K2visible to the command line.
def create_tarfile(files: List[str], tar_path: str) -> None: with tarfile.open(tar_path, "w:gz") as tar: for f in files: tar.add(f)
Create a tar file based on the list of files passed
def model_fields(model, allow_pk=False, only=None, exclude=None, field_args=None, converter=None): converter = converter or ModelConverter() field_args = field_args or {} model_fields = list(model._meta.sorted_fields) if not allow_pk: model_fields.pop(0) if only: model_fields = [x for x in model_fields if x.name in only] elif exclude: model_fields = [x for x in model_fields if x.name not in exclude] field_dict = {} for model_field in model_fields: name, field = converter.convert( model, model_field, field_args.get(model_field.name)) field_dict[name] = field return field_dict
Generate a dictionary of fields for a given Peewee model. See `model_form` docstring for description of parameters.
def receive(self, content, **kwargs): self.connection_context = DjangoChannelConnectionContext(self.message) self.subscription_server = DjangoChannelSubscriptionServer(graphene_settings.SCHEMA) self.subscription_server.on_open(self.connection_context) self.subscription_server.handle(content, self.connection_context)
Called when a message is received with either text or bytes filled out.
def add_specification(self, specification): name = specification.name() if name in self.__specs: raise ValueError('WStrictURIQuery object already has specification for parameter "%s" ' % name) self.__specs[name] = specification
Add a new query parameter specification. If this object already has a specification for the specified parameter - exception is raised. No checks for the specified or any parameter are made regarding specification appending :param specification: new specification that will be added :return: None
def __xinclude_libxml2(target, source, env): doc = libxml2.readFile(str(source[0]), None, libxml2.XML_PARSE_NOENT) doc.xincludeProcessFlags(libxml2.XML_PARSE_NOENT) doc.saveFile(str(target[0])) doc.freeDoc() return None
Resolving XIncludes, using the libxml2 module.
def open(cls, grammar_filename, rel_to=None, **options): if rel_to: basepath = os.path.dirname(rel_to) grammar_filename = os.path.join(basepath, grammar_filename) with open(grammar_filename, encoding='utf8') as f: return cls(f, **options)
Create an instance of Lark with the grammar given by its filename If rel_to is provided, the function will find the grammar filename in relation to it. Example: >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr") Lark(...)
def fetch(self, x, y, w, h): if not at_least_libvips(8, 8): raise Error('libvips too old') psize = ffi.new('size_t *') pointer = vips_lib.vips_region_fetch(self.pointer, x, y, w, h, psize) if pointer == ffi.NULL: raise Error('unable to fetch from region') pointer = ffi.gc(pointer, glib_lib.g_free) return ffi.buffer(pointer, psize[0])
Fill a region with pixel data. Pixels are filled with data! Returns: Pixel data. Raises: :class:`.Error`
def transform(self, attrs): self.collect(attrs) self.add_missing_implementations() self.fill_attrs(attrs)
Perform all actions on a given attribute dict.
def __validate_path_parameters(self, field, path_parameters): for param in path_parameters: segment_list = param.split('.') if segment_list[0] != field.name: raise TypeError('Subfield %r can\'t come from field %r.' % (param, field.name)) self.__validate_simple_subfield(field.name, field, segment_list[1:])
Verifies that all path parameters correspond to an existing subfield. Args: field: An instance of a subclass of messages.Field. Should be the root level property name in each path parameter in path_parameters. For example, if the field is called 'foo', then each path parameter should begin with 'foo.'. path_parameters: A list of Strings representing URI parameter variables. Raises: TypeError: If one of the path parameters does not start with field.name.
def group_shelf_fqfn(self): if self._group_shelf_fqfn is None: self._group_shelf_fqfn = os.path.join( self.tcex.args.tc_temp_path, 'groups-{}'.format(str(uuid.uuid4())) ) if self.saved_groups: self._group_shelf_fqfn = os.path.join(self.tcex.args.tc_temp_path, 'groups-saved') return self._group_shelf_fqfn
Return groups shelf fully qualified filename. For testing/debugging a previous shelf file can be copied into the tc_temp_path directory instead of creating a new shelf file.
def __call_api(self, path, params=None, api_url=FORECAST_URL): if not params: params = dict() payload = {'key': self.api_key} payload.update(params) url = "%s/%s" % (api_url, path) sess = self.__retry_session() req = sess.get(url, params=payload, timeout=1) try: data = req.json() except ValueError: raise APIException("DataPoint has not returned any data, this could be due to an incorrect API key") self.call_response = data if req.status_code != 200: msg = [data[m] for m in ("message", "error_message", "status") \ if m in data][0] raise Exception(msg) return data
Call the datapoint api using the requests module
def _dehex(s): import re import binascii s = re.sub(br'[^a-fA-F\d]', b'', s) return binascii.unhexlify(s)
Liberally convert from hex string to binary string.
def hosting_devices_assigned_to_cfg_agent(self, context, ids, host): self._host_notification(context, 'hosting_devices_assigned_to_cfg_agent', {'hosting_device_ids': ids}, host)
Notify cfg agent to now handle some hosting devices. This notification relieves the cfg agent in <host> of responsibility to monitor and configure hosting devices with id specified in <ids>.
def show_tables(): _State.connection() _State.reflect_metadata() metadata = _State.metadata response = select('name, sql from sqlite_master where type="table"') return {row['name']: row['sql'] for row in response}
Return the names of the tables currently in the database.
def binary_dumps(obj, alt_format=False): return b''.join(_binary_dump_gen(obj, alt_format=alt_format))
Serialize ``obj`` to a binary VDF formatted ``bytes``.
def _compute_quads(self, element, data, mapping): quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'} quad_data = dict(data['scatter_1']) quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []}) for node in element._sankey['nodes']: quad_data['x0'].append(node['x0']) quad_data['y0'].append(node['y0']) quad_data['x1'].append(node['x1']) quad_data['y1'].append(node['y1']) data['scatter_1'].update(quad_data) data['quad_1'] = data['scatter_1'] mapping['quad_1'] = quad_mapping
Computes the node quad glyph data.x
def architecture(self): arch = {'class': self.__class__, 'n_in': self.n_in, 'n_units': self.n_units, 'activation_function': self.activation_function if hasattr(self, 'activation_function') else None} return arch
Returns a dictionary describing the architecture of the layer.
def handle(self, record): record = self.prepare(record) for handler in self.handlers: handler(record)
Handle an item. This just loops through the handlers offering them the record to handle.
def createComment(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
Post a comment on a given GitHub Issue or Pull Request For a given Issue or Pull Request of a repository, this will write a new message. This method takes input: ``v1/create-comment.json#`` This method is ``experimental``
def Cinv(self): try: return np.linalg.inv(self.c) except np.linalg.linalg.LinAlgError: print('Warning: non-invertible noise covariance matrix c.') return np.eye(self.c.shape[0])
Inverse of the noise covariance.
def resize_psf(psf, input_pixel_scale, output_pixel_scale, order=3): from scipy.ndimage import zoom ratio = input_pixel_scale / output_pixel_scale return zoom(psf, ratio, order=order) / ratio**2
Resize a PSF using spline interpolation of the requested order. Parameters ---------- psf : 2D `~numpy.ndarray` The 2D data array of the PSF. input_pixel_scale : float The pixel scale of the input ``psf``. The units must match ``output_pixel_scale``. output_pixel_scale : float The pixel scale of the output ``psf``. The units must match ``input_pixel_scale``. order : float, optional The order of the spline interpolation (0-5). The default is 3. Returns ------- result : 2D `~numpy.ndarray` The resampled/interpolated 2D data array.
def psychrometric_vapor_pressure_wet(dry_bulb_temperature, wet_bulb_temperature, pressure, psychrometer_coefficient=6.21e-4 / units.kelvin): r return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient * pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))
r"""Calculate the vapor pressure with wet bulb and dry bulb temperatures. This uses a psychrometric relationship as outlined in [WMO8-2014]_, with coefficients from [Fan1987]_. Parameters ---------- dry_bulb_temperature: `pint.Quantity` Dry bulb temperature wet_bulb_temperature: `pint.Quantity` Wet bulb temperature pressure: `pint.Quantity` Total atmospheric pressure psychrometer_coefficient: `pint.Quantity`, optional Psychrometer coefficient. Defaults to 6.21e-4 K^-1. Returns ------- `pint.Quantity` Vapor pressure Notes ----- .. math:: e' = e'_w(T_w) - A p (T - T_w) * :math:`e'` is vapor pressure * :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature :math:`T_w` * :math:`p` is the pressure of the wet bulb * :math:`T` is the temperature of the dry bulb * :math:`T_w` is the temperature of the wet bulb * :math:`A` is the psychrometer coefficient Psychrometer coefficient depends on the specific instrument being used and the ventilation of the instrument. See Also -------- saturation_vapor_pressure
def flags(self, index): if not index.isValid(): return Qt.ItemIsEnabled return Qt.ItemFlags(QAbstractTableModel.flags(self, index)| Qt.ItemIsEditable)
Overriding method flags
def rm(self, path): resp = self._sendRequest("DELETE", path) if not (resp.status_code in (200, 204)): raise YaDiskException(resp.status_code, resp.content)
Delete file or directory.
def put(self, rownr, value, matchingfields=True): self._put(rownr, value, matchingfields)
Put the values into the given row. The value should be a dict (as returned by method :func:`get`. The names of the fields in the dict should match the names of the columns used in the `tablerow` object. `matchingfields=True` means that the value may contain more fields and only fields matching a column name will be used.
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]: def from_list(strings): if len(strings) > 1: return [int(d) for d in strings] elif len(strings) == 1: return int(strings[0]) else: return -1 if isinstance(cuda_device, str): return from_list(re.split(r',\s*', cuda_device)) elif isinstance(cuda_device, int): return cuda_device elif isinstance(cuda_device, list): return from_list(cuda_device) else: return int(cuda_device)
Disambiguates single GPU and multiple GPU settings for cuda_device param.
def _compile_set(self, schema): type_ = type(schema) type_name = type_.__name__ def validate_set(path, data): if not isinstance(data, type_): raise er.Invalid('expected a %s' % type_name, path) _compiled = [self._compile(s) for s in schema] errors = [] for value in data: for validate in _compiled: try: validate(path, value) break except er.Invalid: pass else: invalid = er.Invalid('invalid value in %s' % type_name, path) errors.append(invalid) if errors: raise er.MultipleInvalid(errors) return data return validate_set
Validate a set. A set is an unordered collection of unique elements. >>> validator = Schema({int}) >>> validator(set([42])) == set([42]) True >>> with raises(er.Invalid, 'expected a set'): ... validator(42) >>> with raises(er.MultipleInvalid, 'invalid value in set'): ... validator(set(['a']))
def _QueryHash(self, nsrl_socket, digest): try: query = 'QUERY {0:s}\n'.format(digest).encode('ascii') except UnicodeDecodeError: logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest)) return False response = None try: nsrl_socket.sendall(query) response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE) except socket.error as exception: logger.error('Unable to query nsrlsvr with error: {0!s}.'.format( exception)) if not response: return False response = response.strip() return response == b'OK 1'
Queries nsrlsvr for a specific hash. Args: nsrl_socket (socket._socketobject): socket of connection to nsrlsvr. digest (str): hash to look up. Returns: bool: True if the hash was found, False if not or None on error.
def rounding(price, currency): currency = validate_currency(currency) price = validate_price(price) if decimals(currency) == 0: return round(int(price), decimals(currency)) return round(price, decimals(currency))
rounding currency value based on its max decimal digits
def attach(self, api_object, on_cloud=False): if self.on_cloud: return True if api_object and getattr(api_object, 'attachments', None): if on_cloud: if not api_object.object_id: raise RuntimeError( 'A valid object id is needed in order to attach a file') url = api_object.build_url(self._endpoints.get('attach').format( id=api_object.object_id)) response = api_object.con.post(url, data=self.to_api_data()) return bool(response) else: if self.attachment_type == 'file': api_object.attachments.add([{ 'attachment_id': self.attachment_id, 'path': str( self.attachment) if self.attachment else None, 'name': self.name, 'content': self.content, 'on_disk': self.on_disk }]) else: raise RuntimeError('Only file attachments can be attached')
Attach this attachment to an existing api_object. This BaseAttachment object must be an orphan BaseAttachment created for the sole purpose of attach it to something and therefore run this method. :param api_object: object to attach to :param on_cloud: if the attachment is on cloud or not :return: Success / Failure :rtype: bool
def commands(self): (self._commands, value) = self.get_cached_attr_set(self._commands, 'commands') return value
Returns a list of commands that are supported by the motor controller. Possible values are `run-forever`, `run-to-abs-pos`, `run-to-rel-pos`, `run-timed`, `run-direct`, `stop` and `reset`. Not all commands may be supported. - `run-forever` will cause the motor to run until another command is sent. - `run-to-abs-pos` will run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. - `run-to-rel-pos` will run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`. - `run-timed` will run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. - `run-direct` will run the motor at the duty cycle specified by `duty_cycle_sp`. Unlike other run commands, changing `duty_cycle_sp` while running *will* take effect immediately. - `stop` will stop any of the run commands before they are complete using the action specified by `stop_action`. - `reset` will reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor.
def snapshot(domain, name=None, suffix=None, **kwargs): if name and name.lower() == domain.lower(): raise CommandExecutionError('Virtual Machine {name} is already defined. ' 'Please choose another name for the snapshot'.format(name=name)) if not name: name = "{domain}-{tsnap}".format(domain=domain, tsnap=time.strftime('%Y%m%d-%H%M%S', time.localtime())) if suffix: name = "{name}-{suffix}".format(name=name, suffix=suffix) doc = ElementTree.Element('domainsnapshot') n_name = ElementTree.SubElement(doc, 'name') n_name.text = name conn = __get_conn(**kwargs) _get_domain(conn, domain).snapshotCreateXML( salt.utils.stringutils.to_str(ElementTree.tostring(doc)) ) conn.close() return {'name': name}
Create a snapshot of a VM. :param domain: domain name :param name: Name of the snapshot. If the name is omitted, then will be used original domain name with ISO 8601 time as a suffix. :param suffix: Add suffix for the new name. Useful in states, where such snapshots can be distinguished from manually created. :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' virt.snapshot <domain>