code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def flow_pipeline(diameters, lengths, k_minors, target_headloss, nu=con.WATER_NU, pipe_rough=mats.PVC_PIPE_ROUGH): n = diameters.size flow = pc.flow_pipe(diameters[0], target_headloss, lengths[0], nu, pipe_rough, k_minors[0]) err = 1.0 while abs(err) > 0.01 : headloss = sum([pc.headloss(flow, diameters[i], lengths[i], nu, pipe_rough, k_minors[i]).to(u.m).magnitude for i in range(n)]) err = (target_headloss - headloss) / (target_headloss + headloss) flow = flow + err * flow return flow
This function takes a single pipeline with multiple sections, each potentially with different diameters, lengths and minor loss coefficients and determines the flow rate for a given headloss. :param diameters: list of diameters, where the i_th diameter corresponds to the i_th pipe section :type diameters: numpy.ndarray :param lengths: list of diameters, where the i_th diameter corresponds to the i_th pipe section :type lengths: numpy.ndarray :param k_minors: list of diameters, where the i_th diameter corresponds to the i_th pipe section :type k_minors: numpy.ndarray :param target_headloss: a single headloss describing the total headloss through the system :type target_headloss: float :param nu: The fluid dynamic viscosity of the fluid. Defaults to water at room temperature (1 * 10**-6 * m**2/s) :type nu: float :param pipe_rough: The pipe roughness. Defaults to PVC roughness. :type pipe_rough: float :return: the total flow through the system :rtype: float
def add2python(self, module=None, up=0, down=None, front=False, must_exist=True): if module: try: return import_module(module) except ImportError: pass dir = self.dir().ancestor(up) if down: dir = dir.join(*down) if dir.isdir(): if dir not in sys.path: if front: sys.path.insert(0, dir) else: sys.path.append(dir) elif must_exist: raise ImportError('Directory {0} not available'.format(dir)) else: return None if module: try: return import_module(module) except ImportError: if must_exist: raise
Add a directory to the python path. :parameter module: Optional module name to try to import once we have found the directory :parameter up: number of level to go up the directory three from :attr:`local_path`. :parameter down: Optional tuple of directory names to travel down once we have gone *up* levels. :parameter front: Boolean indicating if we want to insert the new path at the front of ``sys.path`` using ``sys.path.insert(0,path)``. :parameter must_exist: Boolean indicating if the module must exists.
def get_attr_value(self, attr_key, el_idx=0): return self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key]
Return the value of the selected attribute in the selected element. Args: attr_key : str Name of attribute for which to search el_idx : int Index of element to use in the event that there are multiple sibling elements with the same name. Returns: str : Value of the selected attribute in the selected element.
def _build_resource_from_properties(obj, filter_fields): partial = {} for filter_field in filter_fields: api_field = obj._PROPERTY_TO_API_FIELD.get(filter_field) if api_field is None and filter_field not in obj._properties: raise ValueError("No property %s" % filter_field) elif api_field is not None: partial[api_field] = obj._properties.get(api_field) else: partial[filter_field] = obj._properties[filter_field] return partial
Build a resource based on a ``_properties`` dictionary, filtered by ``filter_fields``, which follow the name of the Python object.
def replace_env_vars(conf): d = deepcopy(conf) for key, value in d.items(): if type(value) == dict: d[key] = replace_env_vars(value) elif type(value) == str: if value[0] == '$': var_name = value[1:] d[key] = os.environ[var_name] return d
Fill `conf` with environment variables, where appropriate. Any value of the from $VAR will be replaced with the environment variable VAR. If there are sub dictionaries, this function will recurse. This will preserve the original dictionary, and return a copy.
def unroot(self): if len(self.children)==2: if not self.children[0].is_leaf(): self.children[0].delete() elif not self.children[1].is_leaf(): self.children[1].delete() else: raise TreeError("Cannot unroot a tree with only two leaves")
Unroots current node. This function is expected to be used on the absolute tree root node, but it can be also be applied to any other internal node. It will convert a split into a multifurcation.
def _isValidQuery(self, query, mode="phonefy"): try: validator = self.modes[mode].get("query_validator") if validator: try: compiledRegexp = re.compile( "^{expr}$".format( expr=validator ) ) return compiledRegexp.match(query) except AttributeError as e: return True except AttributeError as e: compiledRegexp = re.compile("^{r}$".format(r=self.validQuery[mode])) return compiledRegexp.match(query)
Method to verify if a given query is processable by the platform. The system looks for the forbidden characters in self.Forbidden list. Args: ----- query: The query to be launched. mode: To be chosen amongst mailfy, phonefy, usufy, searchfy. Return: ------- True | False
def get_name(key): definition_dict = definition(key) if definition_dict: return definition_dict.get('name', key) return key
Given a keyword, try to get the name of it. .. versionadded:: 4.2 Definition dicts are defined in keywords.py. We try to return the name if present, otherwise we return none. keyword = 'layer_purpose' kio = safe.utilities.keyword_io.Keyword_IO() name = kio.get_name(keyword) print name :param key: A keyword key. :type key: str :returns: The name of the keyword :rtype: str
def pick_env_and_run_and_report(self, env: env_tools.PreparedEnv, env_py2: Optional[env_tools.PreparedEnv], verbose: bool, previous_failures: Set['Check'] ) -> CheckResult: env.report_status_to_github('pending', 'Running...', self.context()) chosen_env = cast(env_tools.PreparedEnv, env_py2 if self.needs_python2_env() else env) os.chdir(cast(str, chosen_env.destination_directory)) result = self.run(chosen_env, verbose, previous_failures) if result.unexpected_error is not None: env.report_status_to_github('error', 'Unexpected error.', self.context()) else: env.report_status_to_github( 'success' if result.success else 'failure', result.message, self.context()) return result
Evaluates this check in python 3 or 2.7, and reports to github. If the prepared environments are not linked to a github repository, with a known access token, reporting to github is skipped. Args: env: A prepared python 3 environment. env_py2: A prepared python 2.7 environment. verbose: When set, more progress output is produced. previous_failures: Checks that have already run and failed. Returns: A CheckResult instance.
def _handle_watch_message(self, message): if self.log_protocol_level is not None: logger.log(self.log_protocol_level, "<- %s", hexlify(message).decode()) message = self.pending_bytes + message while len(message) >= 4: try: packet, length = PebblePacket.parse_message(message) except IncompleteMessage: self.pending_bytes = message break except: expected_length, = struct.unpack('!H', message[:2]) if expected_length == 0: self.pending_bytes = b'' else: self.pending_bytes = message[expected_length + 4:] raise self.event_handler.broadcast_event("raw_inbound", message[:length]) if self.log_packet_level is not None: logger.log(self.log_packet_level, "<- %s", packet) message = message[length:] self.event_handler.broadcast_event((_EventType.Watch, type(packet)), packet) if length == 0: break self.pending_bytes = message
Processes a binary message received from the watch and broadcasts the relevant events. :param message: A raw message from the watch, without any transport framing. :type message: bytes
def mount(self, url, app): "Mount a sub-app at the url of current app." app.url = url self.mounts.append(app)
Mount a sub-app at the url of current app.
def add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, quantize_layer, is_training): batch_size, bottleneck_tensor_size = bottleneck_tensor.get_shape().as_list() assert batch_size is None, 'We want to work with arbitrary batch size.' with tf.name_scope('input'): bottleneck_input = tf.placeholder_with_default( bottleneck_tensor, shape=[batch_size, bottleneck_tensor_size], name='BottleneckInputPlaceholder') ground_truth_input = tf.placeholder( tf.int64, [batch_size], name='GroundTruthInput') layer_name = 'final_retrain_ops' with tf.name_scope(layer_name): with tf.name_scope('weights'): initial_value = tf.truncated_normal( [bottleneck_tensor_size, class_count], stddev=0.001) layer_weights = tf.Variable(initial_value, name='final_weights') variable_summaries(layer_weights) with tf.name_scope('biases'): layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases') variable_summaries(layer_biases) with tf.name_scope('Wx_plus_b'): logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases tf.summary.histogram('pre_activations', logits) final_tensor = tf.nn.softmax(logits, name=final_tensor_name) if quantize_layer: if is_training: tf.contrib.quantize.create_training_graph() else: tf.contrib.quantize.create_eval_graph() tf.summary.histogram('activations', final_tensor) if not is_training: return None, None, bottleneck_input, ground_truth_input, final_tensor with tf.name_scope('cross_entropy'): cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy( labels=ground_truth_input, logits=logits) tf.summary.scalar('cross_entropy', cross_entropy_mean) with tf.name_scope('train'): optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate) train_step = optimizer.minimize(cross_entropy_mean) return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input, final_tensor)
Adds a new softmax and fully-connected layer for training and eval. We need to retrain the top layer to identify our new classes, so this function adds the right operations to the graph, along with some variables to hold the weights, and then sets up all the gradients for the backward pass. The set up for the softmax and fully-connected layers is based on: https://www.tensorflow.org/tutorials/mnist/beginners/index.html Args: class_count: Integer of how many categories of things we're trying to recognize. final_tensor_name: Name string for the new final node that produces results. bottleneck_tensor: The output of the main CNN graph. quantize_layer: Boolean, specifying whether the newly added layer should be instrumented for quantization with TF-Lite. is_training: Boolean, specifying whether the newly add layer is for training or eval. Returns: The tensors for the training and cross entropy results, and tensors for the bottleneck input and ground truth input.
def main(): try: if get_global_option('show_config'): print json.dumps(config.get_configuration(), indent=2) elif get_global_option('daemon'): daemon = DynamicDynamoDBDaemon( '{0}/dynamic-dynamodb.{1}.pid'.format( get_global_option('pid_file_dir'), get_global_option('instance'))) if get_global_option('daemon') == 'start': logger.debug('Starting daemon') try: daemon.start() logger.info('Daemon started') except IOError as error: logger.error('Could not create pid file: {0}'.format(error)) logger.error('Daemon not started') elif get_global_option('daemon') == 'stop': logger.debug('Stopping daemon') daemon.stop() logger.info('Daemon stopped') sys.exit(0) elif get_global_option('daemon') == 'restart': logger.debug('Restarting daemon') daemon.restart() logger.info('Daemon restarted') elif get_global_option('daemon') in ['foreground', 'fg']: logger.debug('Starting daemon in foreground') daemon.run() logger.info('Daemon started in foreground') else: print( 'Valid options for --daemon are start, ' 'stop, restart, and foreground') sys.exit(1) else: if get_global_option('run_once'): execute() else: while True: execute() except Exception as error: logger.exception(error)
Main function called from dynamic-dynamodb
def get_policy(self, name): address = _create_policy_address(name) policy_list_bytes = None try: policy_list_bytes = self._state_view.get(address=address) except KeyError: return None if policy_list_bytes is not None: policy_list = _create_from_bytes(policy_list_bytes, identity_pb2.PolicyList) for policy in policy_list.policies: if policy.name == name: return policy return None
Get a single Policy by name. Args: name (str): The name of the Policy. Returns: (:obj:`Policy`) The Policy that matches the name.
def remove_edge_from_heap(self, segment_ids): self._initialize_heap() key = normalize_edge(segment_ids) if key in self.edge_map: self.edge_map[key][0] = None self.num_valid_edges -= 1
Remove an edge from the heap.
def pbkdf2_bin( data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None ): if not hashfunc: hashfunc = "sha256" data = to_bytes(data) salt = to_bytes(salt) if callable(hashfunc): _test_hash = hashfunc() hash_name = getattr(_test_hash, "name", None) else: hash_name = hashfunc return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
Returns a binary digest for the PBKDF2 hash algorithm of `data` with the given `salt`. It iterates `iterations` times and produces a key of `keylen` bytes. By default, SHA-256 is used as hash function; a different hashlib `hashfunc` can be provided. .. versionadded:: 0.9 :param data: the data to derive. :param salt: the salt for the derivation. :param iterations: the number of iterations. :param keylen: the length of the resulting key. If not provided the digest size will be used. :param hashfunc: the hash function to use. This can either be the string name of a known hash function or a function from the hashlib module. Defaults to sha256.
def getThroughput(self, instId: int) -> float: if instId not in self.instances.ids: return None perf_time = time.perf_counter() throughput = self.throughputs[instId].get_throughput(perf_time) return throughput
Return the throughput of the specified instance. :param instId: the id of the protocol instance
def poll_for_server_running(job_id): sys.stdout.write('Waiting for server in {0} to initialize ...'.format(job_id)) sys.stdout.flush() desc = dxpy.describe(job_id) while(SERVER_READY_TAG not in desc['tags'] and desc['state'] != 'failed'): time.sleep(SLEEP_PERIOD) sys.stdout.write('.') sys.stdout.flush() desc = dxpy.describe(job_id) if desc['state'] == 'failed': msg = RED('Error:') + ' Server failed to run.\n' msg += 'You may want to check the job logs by running:' msg += BOLD('dx watch {0}'.format(job_id)) err_exit(msg)
Poll for the job to start running and post the SERVER_READY_TAG.
def update(self): con = self.subpars.pars.control self(con.eqb*con.tind)
Update |KB| based on |EQB| and |TInd|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> eqb(10.0) >>> tind.value = 10.0 >>> derived.kb.update() >>> derived.kb kb(100.0)
def update_widget_attrs(self, bound_field, attrs): if bound_field.field.has_subwidgets() is False: widget_classes = getattr(self, 'widget_css_classes', None) if widget_classes: if 'class' in attrs: attrs['class'] += ' ' + widget_classes else: attrs.update({'class': widget_classes}) return attrs
Updated the widget attributes which shall be added to the widget when rendering this field.
def statuses_show(self, id, trim_user=None, include_my_retweet=None, include_entities=None): params = {'id': id} set_bool_param(params, 'trim_user', trim_user) set_bool_param(params, 'include_my_retweet', include_my_retweet) set_bool_param(params, 'include_entities', include_entities) return self._get_api('statuses/show.json', params)
Returns a single Tweet, specified by the id parameter. https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid :param str id: (*required*) The numerical ID of the desired tweet. :param bool trim_user: When set to ``True``, the tweet's user object includes only the status author's numerical ID. :param bool include_my_retweet: When set to ``True``, any Tweet returned that has been retweeted by the authenticating user will include an additional ``current_user_retweet`` node, containing the ID of the source status for the retweet. :param bool include_entities: When set to ``False``, the ``entities`` node will not be included. :returns: A tweet dict.
def list_inappproducts(self): result = self.service.inappproducts().list( packageName=self.package_name).execute() if result is not None: return result.get('inappproduct', list()) return list()
temp function to list inapp products.
def get_taskfile(self, refobj): tfid = cmds.getAttr("%s.taskfile_id" % refobj) try: return djadapter.taskfiles.get(pk=tfid) except djadapter.models.TaskFile.DoesNotExist: raise djadapter.models.TaskFile.DoesNotExist("Could not find the taskfile that was set on the node %s. Id was %s" % (refobj, tfid))
Return the taskfile that is loaded and represented by the refobj :param refobj: the reftrack node to query :type refobj: str :returns: The taskfile that is loaded in the scene :rtype: :class:`jukeboxcore.djadapter.TaskFile` :raises: None
def clear_session_value(self, name): self.redis().hdel(self._session_key, name) self._update_session_expiration()
Removes a session value
def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()): with Timer('\t{0} ({1}) completed in'.format(process_name, str(func))): pool = Pool(cpus) vals = pool.map(func, iterable) pool.close() return vals
Apply a function to each element in an iterable and return a result list. :param func: A function that returns a value :param iterable: A list or set of elements to be passed to the func as the singular parameter :param process_name: Name of the process, for printing purposes only :param cpus: Number of CPUs :return: Result list
def _visit_or_none(node, attr, visitor, parent, visit="visit", **kws): value = getattr(node, attr, None) if value: return getattr(visitor, visit)(value, parent, **kws) return None
If the given node has an attribute, visits the attribute, and otherwise returns None.
def _train_and_eval_batches(dataset, data_dir, input_name, num_devices): (train_data, eval_data, features_info, keys) = train_and_eval_dataset( dataset, data_dir) input_names, target_names = keys[0], keys[1] train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True, num_devices=num_devices) train_eval_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=False, num_devices=num_devices) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False, num_devices=num_devices) input_name = input_name or input_names[0] input_shape = features_info[input_name].shape return (train_batches, train_eval_batches, eval_batches, input_name, list(input_shape))
Return train and eval batches with input name and shape.
def cli_schemata_list(self, *args): self.log('Registered schemata languages:', ",".join(sorted(l10n_schemastore.keys()))) self.log('Registered Schemata:', ",".join(sorted(schemastore.keys()))) if '-c' in args or '-config' in args: self.log('Registered Configuration Schemata:', ",".join(sorted(configschemastore.keys())), pretty=True)
Display a list of registered schemata
def set_attr(self, key: str, value): self.attr_setter(self, key, value)
Sets node attribute. Can be customized by attr_setter property
def get_adjustments(self, assets, field, dt, perspective_dt): if isinstance(assets, Asset): assets = [assets] adjustment_ratios_per_asset = [] def split_adj_factor(x): return x if field != 'volume' else 1.0 / x for asset in assets: adjustments_for_asset = [] split_adjustments = self._get_adjustment_list( asset, self._splits_dict, "SPLITS" ) for adj_dt, adj in split_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(split_adj_factor(adj)) elif adj_dt > perspective_dt: break if field != 'volume': merger_adjustments = self._get_adjustment_list( asset, self._mergers_dict, "MERGERS" ) for adj_dt, adj in merger_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break dividend_adjustments = self._get_adjustment_list( asset, self._dividends_dict, "DIVIDENDS", ) for adj_dt, adj in dividend_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break ratio = reduce(mul, adjustments_for_asset, 1.0) adjustment_ratios_per_asset.append(ratio) return adjustment_ratios_per_asset
Returns a list of adjustments between the dt and perspective_dt for the given field and list of assets Parameters ---------- assets : list of type Asset, or Asset The asset, or assets whose adjustments are desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. Returns ------- adjustments : list[Adjustment] The adjustments to that field.
def _clean_up_name(self, name): for n in self.naughty: name = name.replace(n, '_') return name
Cleans up the name according to the rules specified in this exact function. Uses self.naughty, a list of naughty characters.
def get_interfaces_ip(self): def extract_ip_info(parsed_intf_dict): intf = parsed_intf_dict['name'] _ip_info = {intf: {}} v4_ip = parsed_intf_dict.get('ip') secondary_v4_ip = parsed_intf_dict.get('addr') v6_ip = parsed_intf_dict.get('addr6') if v4_ip != 'N/A': address, pref = v4_ip.split('/') _ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} if secondary_v4_ip is not None: members = secondary_v4_ip['member'] if not isinstance(members, list): members = [members] for address in members: address, pref = address.split('/') _ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)} if v6_ip is not None: members = v6_ip['member'] if not isinstance(members, list): members = [members] for address in members: address, pref = address.split('/') _ip_info[intf].setdefault('ipv6', {})[address] = {'prefix_length': int(pref)} if _ip_info == {intf: {}}: _ip_info = {} return _ip_info ip_interfaces = {} cmd = "<show><interface>all</interface></show>" self.device.op(cmd=cmd) interface_info_xml = xmltodict.parse(self.device.xml_root()) interface_info_json = json.dumps( interface_info_xml['response']['result']['ifnet']['entry'] ) interface_info = json.loads(interface_info_json) if isinstance(interface_info, dict): interface_info = [interface_info] for interface_dict in interface_info: ip_info = extract_ip_info(interface_dict) if ip_info: ip_interfaces.update(ip_info) return ip_interfaces
Return IP interface data.
def class_subobjects( class_: Type) -> Generator[Tuple[str, Type, bool], None, None]: argspec = inspect.getfullargspec(class_.__init__) defaults = argspec.defaults if argspec.defaults else [] num_optional = len(defaults) first_optional = len(argspec.args) - num_optional for i, attr_name in enumerate(argspec.args): if attr_name == 'self': continue if attr_name == 'yatiml_extra': continue attr_type = argspec.annotations.get(attr_name, Any) yield attr_name, attr_type, i < first_optional
Find the aggregated subobjects of an object. These are the public attributes. Args: class_: The class whose subobjects to return. Yields: Tuples (name, type, required) describing subobjects.
def service_name(doc): for service_id, service in doc.get('services', {}).items(): service['id'] = service_id service['organisation_id'] = doc['_id'] name = service.get('name', None) if name: yield name, service
View for getting service by name
def read_xso(src, xsomap): xso_parser = xso.XSOParser() for class_, cb in xsomap.items(): xso_parser.add_class(class_, cb) driver = xso.SAXDriver(xso_parser) parser = xml.sax.make_parser() parser.setFeature( xml.sax.handler.feature_namespaces, True) parser.setFeature( xml.sax.handler.feature_external_ges, False) parser.setContentHandler(driver) parser.parse(src)
Read a single XSO from a binary file-like input `src` containing an XML document. `xsomap` must be a mapping which maps :class:`~.XSO` subclasses to callables. These will be registered at a newly created :class:`.xso.XSOParser` instance which will be used to parse the document in `src`. The `xsomap` is thus used to determine the class parsing the root element of the XML document. This can be used to support multiple versions.
def cycle_find(key, width=4): key_len = len(key) buf = '' it = deBruijn(width, 26) for i in range(key_len): buf += chr(ord('A') + next(it)) if buf == key: return 0 for i, c in enumerate(it): buf = buf[1:] + chr(ord('A') + c) if buf == key: return i + 1 return -1
Given an element of a de Bruijn sequence, find its index in that sequence. Args: key(str): The piece of the de Bruijn sequence to find. width(int): The width of each element in the sequence. Returns: int: The index of ``key`` in the de Bruijn sequence.
def datasets(ctx, client): from renku.models._jsonld import asjsonld from renku.models.datasets import Dataset from renku.models.refs import LinkReference from ._checks.location_datasets import _dataset_metadata_pre_0_3_4 for old_path in _dataset_metadata_pre_0_3_4(client): with old_path.open('r') as fp: dataset = Dataset.from_jsonld(yaml.safe_load(fp)) name = str(old_path.parent.relative_to(client.path / 'data')) new_path = ( client.renku_datasets_path / dataset.identifier.hex / client.METADATA ) new_path.parent.mkdir(parents=True, exist_ok=True) dataset = dataset.rename_files( lambda key: os.path. relpath(str(old_path.parent / key), start=str(new_path.parent)) ) with new_path.open('w') as fp: yaml.dump(asjsonld(dataset), fp, default_flow_style=False) old_path.unlink() LinkReference.create(client=client, name='datasets/' + name).set_reference(new_path)
Migrate dataset metadata.
def variants(self, case_id, skip=0, count=1000, filters=None): filters = filters or {} logger.debug("Fetching case with case_id: {0}".format(case_id)) case_obj = self.case(case_id) plugin, case_id = self.select_plugin(case_obj) self.filters = plugin.filters gene_lists = (self.gene_list(list_id) for list_id in filters.get('gene_lists', [])) nested_geneids = (gene_list.gene_ids for gene_list in gene_lists) gene_ids = set(itertools.chain.from_iterable(nested_geneids)) if filters.get('gene_ids'): filters['gene_ids'].extend(gene_ids) else: filters['gene_ids'] = gene_ids variants = plugin.variants(case_id, skip, count, filters) return variants
Fetch variants for a case.
def slew(self, value): if float(value) != self.filepos: pos = float(value) * self.filesize self.mlog.f.seek(int(pos)) self.find_message()
move to a given position in the file
def get_reports(self): return sorted(self._reports, key=lambda x: x['stats']['totalTimeMillis'], reverse=True)
Returns a minimized version of the aggregation
def extract_bzip2 (archive, compression, cmd, verbosity, interactive, outdir): targetname = util.get_single_outfile(outdir, archive) try: with bz2.BZ2File(archive) as bz2file: with open(targetname, 'wb') as targetfile: data = bz2file.read(READ_SIZE_BYTES) while data: targetfile.write(data) data = bz2file.read(READ_SIZE_BYTES) except Exception as err: msg = "error extracting %s to %s: %s" % (archive, targetname, err) raise util.PatoolError(msg) return None
Extract a BZIP2 archive with the bz2 Python module.
def _machinectl(cmd, output_loglevel='debug', ignore_retcode=False, use_vt=False): prefix = 'machinectl --no-legend --no-pager' return __salt__['cmd.run_all']('{0} {1}'.format(prefix, cmd), output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt)
Helper function to run machinectl
def rpy2(): if LazyImport.rpy2_module is None: try: rpy2 = __import__('rpy2.robjects') except ImportError: raise ImportError('The rpy2 module is required') LazyImport.rpy2_module = rpy2 try: rpy2.forecast = rpy2.robjects.packages.importr('forecast') except: raise ImportError('R and the "forecast" package are required') rpy2.ts = rpy2.robjects.r['ts'] __import__('rpy2.robjects.numpy2ri') rpy2.robjects.numpy2ri.activate() return LazyImport.rpy2_module
Lazily import the rpy2 module
def simDeath(self): how_many_die = int(round(self.AgentCount*(1.0-self.LivPrb[0]))) base_bool = np.zeros(self.AgentCount,dtype=bool) base_bool[0:how_many_die] = True who_dies = self.RNG.permutation(base_bool) if self.T_age is not None: who_dies[self.t_age >= self.T_age] = True who_lives = np.logical_not(who_dies) wealth_living = np.sum(self.aLvlNow[who_lives]) wealth_dead = np.sum(self.aLvlNow[who_dies]) Ractuarial = 1.0 + wealth_dead/wealth_living self.aNrmNow[who_lives] = self.aNrmNow[who_lives]*Ractuarial self.aLvlNow[who_lives] = self.aLvlNow[who_lives]*Ractuarial return who_dies
Randomly determine which consumers die, and distribute their wealth among the survivors. This method only works if there is only one period in the cycle. Parameters ---------- None Returns ------- who_dies : np.array(bool) Boolean array of size AgentCount indicating which agents die.
def update_boxes(self, box_pos, box_size): assert box_pos.shape == (self.n_boxes, 2) assert len(box_size) == 2 self.box_bounds = _get_boxes(box_pos, size=box_size, keep_aspect_ratio=self.keep_aspect_ratio, )
Set the box bounds from specified box positions and sizes.
def equivalent_reflections(self, hkl): hkl = np.array(hkl, dtype='int', ndmin=2) rot = self.get_rotations() n, nrot = len(hkl), len(rot) R = rot.transpose(0, 2, 1).reshape((3*nrot, 3)).T refl = np.dot(hkl, R).reshape((n*nrot, 3)) ind = np.lexsort(refl.T) refl = refl[ind] diff = np.diff(refl, axis=0) mask = np.any(diff, axis=1) return np.vstack((refl[mask], refl[-1,:]))
Return all equivalent reflections to the list of Miller indices in hkl. Example: >>> from ase.lattice.spacegroup import Spacegroup >>> sg = Spacegroup(225) # fcc >>> sg.equivalent_reflections([[0, 0, 2]]) array([[ 0, 0, -2], [ 0, -2, 0], [-2, 0, 0], [ 2, 0, 0], [ 0, 2, 0], [ 0, 0, 2]])
def concatenate_node_predicates(node_predicates: NodePredicates) -> NodePredicate: if not isinstance(node_predicates, Iterable): return node_predicates node_predicates = tuple(node_predicates) if 1 == len(node_predicates): return node_predicates[0] def concatenated_node_predicate(graph: BELGraph, node: BaseEntity) -> bool: return all( node_predicate(graph, node) for node_predicate in node_predicates ) return concatenated_node_predicate
Concatenate multiple node predicates to a new predicate that requires all predicates to be met. Example usage: >>> from pybel.dsl import protein, gene >>> from pybel.struct.filters.node_predicates import not_pathology, node_exclusion_predicate_builder >>> app_protein = protein(name='APP', namespace='HGNC') >>> app_gene = gene(name='APP', namespace='HGNC') >>> app_predicate = node_exclusion_predicate_builder([app_protein, app_gene]) >>> my_predicate = concatenate_node_predicates([not_pathology, app_predicate])
def update_headers(self, response): if 'expires' in response.headers and 'cache-control' in response.headers: self.msg = self.server_cache_headers return response.headers else: self.msg = self.default_cache_vars date = parsedate(response.headers['date']) expires = datetime(*date[:6]) + timedelta(0, self.expire_after) response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())), 'cache-control': 'public'}) return response.headers
Returns the updated caching headers. Args: response (HttpResponse): The response from the remote service Returns: response:(HttpResponse.Headers): Http caching headers
def _matrix_grad(q, h, h_dx, t, t_prime): N = len(q) W = np.zeros([N, N]) Wprime = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) Wprime[i, i] = \ 0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)]) tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)]) grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \ + (q - t).T.dot(Wprime).dot(q - t) return grad
Returns the gradient with respect to a single variable
def get_group_admin(self, group): data = { 'name': group, } response = _fix_group(self.post('getGroupAdmin', data)) return response
Get the group's admins :type group: str :param group: group name :rtype: list :return: a list containing group admins
def _get_area_def_uniform_sampling(self, lon0, channel): logger.debug('Computing area definition') if lon0 is not None: proj_dict = {'a': EQUATOR_RADIUS, 'b': POLE_RADIUS, 'lon_0': lon0, 'h': ALTITUDE, 'proj': 'geos', 'units': 'm'} xmax, ymax = get_geostationary_angle_extent( namedtuple('area', ['proj_dict'])(proj_dict)) llx, lly, urx, ury = ALTITUDE * np.array([-xmax, -ymax, xmax, ymax]) area_extent = [llx, lly, urx, ury] if self._is_vis(channel): sampling = SAMPLING_NS_VIS else: sampling = SAMPLING_NS_IR pix_size = ALTITUDE * sampling area_def = pyresample.geometry.AreaDefinition( 'goes_geos_uniform', '{} geostationary projection (uniform sampling)'.format(self.platform_name), 'goes_geos_uniform', proj_dict, np.rint((urx - llx) / pix_size).astype(int), np.rint((ury - lly) / pix_size).astype(int), area_extent) return area_def else: return None
Get area definition with uniform sampling
def from_master_secret(cls, seed, network="bitcoin_testnet"): network = Wallet.get_network(network) seed = ensure_bytes(seed) I = hmac.new(b"Bitcoin seed", msg=seed, digestmod=sha512).digest() I_L, I_R = I[:32], I[32:] return cls(private_exponent=long_or_int(hexlify(I_L), 16), chain_code=long_or_int(hexlify(I_R), 16), network=network)
Generate a new PrivateKey from a secret key. :param seed: The key to use to generate this wallet. It may be a long string. Do not use a phrase from a book or song, as that will be guessed and is not secure. My advice is to not supply this argument and let me generate a new random key for you. See https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format # nopep8
def convert_dropout(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) probability = float(attrs.get("p", 0.5)) dropout_node = onnx.helper.make_node( "Dropout", input_nodes, [name], ratio=probability, name=name ) return [dropout_node]
Map MXNet's Dropout operator attributes to onnx's Dropout operator and return the created node.
def squash(self, a, b): return ((''.join(x) if isinstance(x, tuple) else x) for x in itertools.product(a, b))
Returns a generator that squashes two iterables into one. ``` ['this', 'that'], [[' and', ' or']] => ['this and', 'this or', 'that and', 'that or'] ```
def unpack_glist(g, type_, transfer_full=True): values = [] item = g while item: ptr = item.contents.data value = cast(ptr, type_).value values.append(value) if transfer_full: free(ptr) item = item.next() if transfer_full: g.free() return values
Takes a glist, copies the values casted to type_ in to a list and frees all items and the list.
def _write_user_prefs(self, user_prefs): with open(self.userPrefs, "w") as f: for key, value in user_prefs.items(): f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
writes the current user prefs dictionary to disk
def get_credentials(username=None, password=None, netrc=None, use_keyring=False): if netrc: path = None if netrc is True else netrc return authenticate_through_netrc(path) if not username: raise CredentialsError( 'Please provide a username with the -u option, ' 'or a .netrc file with the -n option.') if not password and use_keyring: password = keyring.get_password(KEYRING_SERVICE_NAME, username) if not password: password = getpass.getpass('Coursera password for {0}: '.format(username)) if use_keyring: keyring.set_password(KEYRING_SERVICE_NAME, username, password) return username, password
Return valid username, password tuple. Raises CredentialsError if username or password is missing.
def simplify(self) -> None: self.raw = cast(T, z3.simplify(self.raw))
Simplify this expression.
def _init_fields(self): fields = {} forms = {} sets = {} for name in dir(self): if name.startswith('_'): continue field = getattr(self, name) is_field = isinstance(field, Field) is_form = isinstance(field, Form) or ( inspect.isclass(field) and issubclass(field, Form)) is_set = isinstance(field, FormSet) if is_field: field = copy(field) field.name = self._prefix + name field.form = self if field.prepare is None: field.prepare = getattr(self, 'prepare_' + name, None) if field.clean is None: field.clean = getattr(self, 'clean_' + name, None) fields[name] = field setattr(self, name, field) elif is_form: forms[name] = field elif is_set: field._name = self._prefix + name sets[name] = field self._fields = fields self._forms = forms self._sets = sets
Creates the `_fields`, `_forms` asn `_sets` dicts. Any properties which begin with an underscore or are not `Field`, `Form` or `FormSet` **instances** are ignored by this method.
def hash(hash_type, input_text): hash_funcs = {'MD5' : hashlib.md5, 'SHA1' : hashlib.sha1, 'SHA224' : hashlib.sha224, 'SHA256' : hashlib.sha256, 'SHA384' : hashlib.sha384, 'SHA512' : hashlib.sha512} if hash_type == 'All': hash_type = ['MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512'] else: hash_type = [hash_type] return [{'Algorithm' : h, 'Hash' : hash_funcs[h](input_text).hexdigest()} for h in hash_type]
Hash input_text with the algorithm choice
def request_check(client, exception, *msg_parms, **kwargs): timeout = kwargs.get('timeout', None) req_msg = Message.request(*msg_parms) if timeout is not None: reply, informs = client.blocking_request(req_msg, timeout=timeout) else: reply, informs = client.blocking_request(req_msg) if not reply.reply_ok(): raise exception('Unexpected failure reply "{2}"\n' ' with device at {0}, request \n"{1}"' .format(client.bind_address_string, req_msg, reply)) return reply, informs
Make blocking request to client and raise exception if reply is not ok. Parameters ---------- client : DeviceClient instance exception: Exception class to raise *msg_parms : Message parameters sent to the Message.request() call **kwargs : Keyword arguments Forwards kwargs['timeout'] to client.blocking_request(). Forwards kwargs['mid'] to Message.request(). Returns ------- reply, informs : as returned by client.blocking_request Raises ------ *exception* passed as parameter is raised if reply.reply_ok() is False Notes ----- A typical use-case for this function is to use functools.partial() to bind a particular client and exception. The resulting function can then be used instead of direct client.blocking_request() calls to automate error handling.
def get(self, pk=None, **filters): LOG.debug(u'Querying (GET) %s by pk=%s and filters=%s', self.model_class.__name__, repr(pk), filters) query = self.model_class.objects.filter(**filters) if pk is None: obj = query.get() else: if (isinstance(pk, basestring) and pk.isdigit()) or isinstance(pk, numbers.Number): obj = query.get(pk=pk) elif 'slug' in self.model_class._meta.get_all_field_names(): obj = query.get(slug=pk) else: raise self.model_class.DoesNotExist() perm = build_permission_name(self.model_class, 'view') if not self.user.has_perm(perm, obj=obj): raise PermissionDenied(u'User %s has no permission %s for object %s' % (self.user, perm, obj)) return obj
Retrieve an object instance. If a single argument is supplied, object is queried by primary key, else filter queries will be applyed. If more than one object was found raise MultipleObjectsReturned. If no object found, raise DoesNotExist. Raise PermissionDenied if user has no permission 'view' on object. See https://docs.djangoproject.com/en/dev/ref/models/querysets/#get for more details
def notify_duration_exceeded(self, participants, reference_time): unsubmitted = [] for participant in participants: summary = ParticipationTime(participant, reference_time, self.config) status = self._mturk_status_for(participant) if status == "Approved": participant.status = "approved" session.commit() elif status == "Rejected": participant.status = "rejected" session.commit() elif status == "Submitted": self._resend_submitted_rest_notification_for(participant) self._message_researcher(self._resubmitted_msg(summary)) logger.warning( "Error - submitted notification for participant {} missed. " "A replacement notification was created and sent, " "but proceed with caution.".format(participant.id) ) else: self._send_notification_missing_rest_notification_for(participant) unsubmitted.append(summary) if unsubmitted: self._disable_autorecruit() self.close_recruitment() pick_one = unsubmitted[0] self._message_researcher(self._cancelled_msg(pick_one)) try: self.mturkservice.expire_hit(pick_one.participant.hit_id) except MTurkServiceException as ex: logger.exception(ex)
The participant has exceed the maximum time for the activity, defined in the "duration" config value. We need find out the assignment status on MTurk and act based on this.
def extend_request_args(self, args, item_cls, item_type, key, parameters, orig=False): try: item = self.get_item(item_cls, item_type, key) except KeyError: pass else: for parameter in parameters: if orig: try: args[parameter] = item[parameter] except KeyError: pass else: try: args[parameter] = item[verified_claim_name(parameter)] except KeyError: try: args[parameter] = item[parameter] except KeyError: pass return args
Add a set of parameters and their value to a set of request arguments. :param args: A dictionary :param item_cls: The :py:class:`oidcmsg.message.Message` subclass that describes the item :param item_type: The type of item, this is one of the parameter names in the :py:class:`oidcservice.state_interface.State` class. :param key: The key to the information in the database :param parameters: A list of parameters who's values this method will return. :param orig: Where the value of a claim is a signed JWT return that. :return: A dictionary with keys from the list of parameters and values being the values of those parameters in the item. If the parameter does not a appear in the item it will not appear in the returned dictionary.
def cert_from_key_info(key_info, ignore_age=False): res = [] for x509_data in key_info.x509_data: x509_certificate = x509_data.x509_certificate cert = x509_certificate.text.strip() cert = '\n'.join(split_len(''.join([s.strip() for s in cert.split()]), 64)) if ignore_age or active_cert(cert): res.append(cert) else: logger.info('Inactive cert') return res
Get all X509 certs from a KeyInfo instance. Care is taken to make sure that the certs are continues sequences of bytes. All certificates appearing in an X509Data element MUST relate to the validation key by either containing it or being part of a certification chain that terminates in a certificate containing the validation key. :param key_info: The KeyInfo instance :return: A possibly empty list of certs
def _get_list_of_completed_locales(product, channel): return utils.load_json_url(_ALL_LOCALES_URL.format(product=product, channel=channel))
Get all the translated locales supported by Google play So, locale unsupported by Google play won't be downloaded Idem for not translated locale
def get(self): email = {} if self.name is not None: email["name"] = self.name if self.email is not None: email["email"] = self.email return email
Get a JSON-ready representation of this Email. :returns: This Email, ready for use in a request body. :rtype: dict
def cdx_load(sources, query, process=True): cdx_iter = create_merged_cdx_gen(sources, query) if query.page_count: return cdx_iter cdx_iter = make_obj_iter(cdx_iter, query) if process and not query.secondary_index_only: cdx_iter = process_cdx(cdx_iter, query) custom_ops = query.custom_ops for op in custom_ops: cdx_iter = op(cdx_iter, query) if query.output == 'text': cdx_iter = cdx_to_text(cdx_iter, query.fields) elif query.output == 'json': cdx_iter = cdx_to_json(cdx_iter, query.fields) return cdx_iter
merge text CDX lines from sources, return an iterator for filtered and access-checked sequence of CDX objects. :param sources: iterable for text CDX sources. :param process: bool, perform processing sorting/filtering/grouping ops
def rename_with_prefix(self, prefix="", new_path=None, in_place=True, remove_desc=True): if new_path is None: prefixed = self.__class__(new_temp_path()) else: prefixed = self.__class__(new_path) def prefixed_iterator(): for i,read in enumerate(self): read.id = prefix + read.id if remove_desc: read.description = "" yield read prefixed.write(prefixed_iterator()) prefixed.close() if in_place: os.remove(self.path) shutil.move(prefixed, self.path) return prefixed
Rename every sequence based on a prefix.
def _send_event_task(args): endpoint = args['endpoint'] json_message = args['json_message'] _consumer_impl.send(endpoint, json_message)
Actually sends the MixPanel event. Runs in a uwsgi worker process.
def set_ipcsem_params(self, ftok=None, persistent=None): self._set('ftok', ftok) self._set('persistent-ipcsem', persistent, cast=bool) return self._section
Sets ipcsem lock engine params. :param str|unicode ftok: Set the ipcsem key via ftok() for avoiding duplicates. :param bool persistent: Do not remove ipcsem's on shutdown.
def add_outcome(self, name, outcome_id=None): if outcome_id is None: outcome_id = generate_outcome_id(list(self.outcomes.keys())) if name in self._outcomes: logger.error("Two outcomes cannot have the same names") return if outcome_id in self.outcomes: logger.error("Two outcomes cannot have the same outcome_ids") return outcome = Outcome(outcome_id, name, self) self._outcomes[outcome_id] = outcome return outcome_id
Add a new outcome to the state :param str name: the name of the outcome to add :param int outcome_id: the optional outcome_id of the new outcome :return: outcome_id: the outcome if of the generated state :rtype: int
def get(self, addresses): with self._lock: results = [] for add in addresses: self.validate_read(add) results.append(self._get(add)) return results
Returns the value in this context, or None, for each address in addresses. Useful for gets on the context manager. Args: addresses (list of str): The addresses to return values for, if within this context. Returns: results (list of bytes): The values in state for these addresses.
def author_list(self): author_list = [self.submitter] + \ [author for author in self.authors.all().exclude(pk=self.submitter.pk)] return ",\n".join([author.get_full_name() for author in author_list])
The list of authors als text, for admin submission list overview.
def remove_handlers_bound_to_instance(self, obj): for handler in self.handlers: if handler.im_self == obj: self -= handler
Remove all handlers bound to given object instance. This is useful to remove all handler methods that are part of an instance. :param object obj: Remove handlers that are methods of this instance
def rotation_coefs(self): return [np.cos(self.bearing_rads), np.sin(self.bearing_rads), -1.0*np.sin(self.bearing_rads), np.cos(self.bearing_rads)]
get the rotation coefficents in radians Returns ------- rotation_coefs : list the rotation coefficients implied by Vario2d.bearing
def deactivate_components_ui(self): selected_components = self.get_selected_components() self.__engine.start_processing("Deactivating Components ...", len(selected_components)) deactivation_failed_components = [] for component in selected_components: if component.interface.activated: if component.interface.deactivatable: success = self.deactivate_component(component.name) or False if not success: deactivation_failed_components.append(component) else: self.__engine.notifications_manager.warnify( "{0} | '{1}' Component cannot be deactivated!".format(self.__class__.__name__, component.name)) else: self.__engine.notifications_manager.warnify( "{0} | '{1}' Component is already deactivated!".format(self.__class__.__name__, component.name)) self.__engine.step_processing() self.__engine.stop_processing() self.__store_deactivated_components() if not deactivation_failed_components: return True else: raise manager.exceptions.ComponentDeactivationError( "{0} | Exception(s) raised while deactivating '{1}' Component(s)!".format(self.__class__.__name__, ", ".join(( deactivation_failed_component.name for deactivation_failed_component in deactivation_failed_components))))
Deactivates user selected Components. :return: Method success. :rtype: bool :note: May require user interaction.
def all_query(expression): def _all(index, expression=expression): ev = expression() if callable(expression) else expression try: iter(ev) except TypeError: raise AttributeError('$all argument must be an iterable!') hashed_ev = [index.get_hash_for(v) for v in ev] store_keys = set() if len(hashed_ev) == 0: return [] store_keys = set(index.get_keys_for(hashed_ev[0])) for value in hashed_ev[1:]: store_keys &= set(index.get_keys_for(value)) return list(store_keys) return _all
Match arrays that contain all elements in the query.
def gaussian_points(loc=(0, 0), scale=(10, 10), n=100): arr = np.random.normal(loc, scale, (n, 2)) return gpd.GeoSeries([shapely.geometry.Point(x, y) for (x, y) in arr])
Generates and returns `n` normally distributed points centered at `loc` with `scale` x and y directionality.
def logout(): logout_url = REMOTE_APP['logout_url'] apps = current_app.config.get('OAUTHCLIENT_REMOTE_APPS') if apps: cern_app = apps.get('cern', REMOTE_APP) logout_url = cern_app['logout_url'] return redirect(logout_url, code=302)
CERN logout view.
def patch_func(replacement, target_mod, func_name): original = getattr(target_mod, func_name) vars(replacement).setdefault('unpatched', original) setattr(target_mod, func_name, replacement)
Patch func_name in target_mod with replacement Important - original must be resolved by name to avoid patching an already patched function.
def accepted(self): try: self._saveModel() except Exception as err: self._statusBar.showMessage(str(err)) raise else: self._resetWidgets() self.exported.emit(True) self.accept()
Successfully close the widget and emit an export signal. This method is also a `SLOT`. The dialog will be closed, when the `Export Data` button is pressed. If errors occur during the export, the status bar will show the error message and the dialog will not be closed.
def rename_regions(self, regions): if type(regions) is list: regions = {old: new for old, new in zip(self.get_regions(), regions)} for df in self.get_DataFrame(data=True): df.rename(index=regions, columns=regions, inplace=True) try: for ext in self.get_extensions(data=True): for df in ext.get_DataFrame(data=True): df.rename(index=regions, columns=regions, inplace=True) except: pass self.meta._add_modify("Changed country names") return self
Sets new names for the regions Parameters ---------- regions : list or dict In case of dict: {'old_name' : 'new_name'} with a entry for each old_name which should be renamed In case of list: List of new names in order and complete without repetition
def get_timing_signal(length, min_timescale=1, max_timescale=1e4, num_timescales=16): positions = to_float(tf.range(length)) log_timescale_increment = ( math.log(max_timescale / min_timescale) / (num_timescales - 1)) inv_timescales = min_timescale * tf.exp( to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0) return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
Create Tensor of sinusoids of different frequencies. Args: length: Length of the Tensor to create, i.e. Number of steps. min_timescale: a float max_timescale: a float num_timescales: an int Returns: Tensor of shape (length, 2*num_timescales)
def std_dev(xs): m = mean(xs) return sqrt(sum((x - m) ** 2 for x in xs) / float(len(xs)))
Returns the standard deviation of the given iterable of numbers. From http://rosettacode.org/wiki/Standard_deviation#Python An empty list, or a list with non-numeric elements will raise a TypeError. >>> std_dev([2,4,4,4,5,5,7,9]) 2.0 >>> std_dev([9,10,11,7,13]) 2.0 >>> std_dev([1,1,10,19,19]) 8.049844718999243 >>> std_dev({1,1,10,19,19}) == std_dev({19,10,1}) True >>> std_dev([10,10,10,10,10]) 0.0 >>> std_dev([1,"b"]) Traceback (most recent call last): ... ValueError: Input can't have non-numeric elements >>> std_dev([]) Traceback (most recent call last): ... ValueError: Input can't be empty
def linked_attribute(self): if isinstance(self.to_cls, str): return 'id' else: return self.via or self.to_cls.meta_.id_field.attribute_name
Choose the Linkage attribute between `via` and designated `id_field` of the target class This method is initially called from `__set_name__()` -> `get_attribute_name()` at which point, the `to_cls` has not been initialized properly. We simply default the linked attribute to 'id' in that case. Eventually, when setting value the first time, the `to_cls` entity is initialized and the attribute name is reset correctly.
def shared_options(rq): "Default class options to pass to the CLI commands." return { 'url': rq.redis_url, 'config': None, 'worker_class': rq.worker_class, 'job_class': rq.job_class, 'queue_class': rq.queue_class, 'connection_class': rq.connection_class, }
Default class options to pass to the CLI commands.
def coordinates_to_index(self, x, y, x_shift=0, y_shift=0): target_x = x + x_shift target_y = y + y_shift self.validate_coordinates(target_x, target_y) index = (target_y * self.length) + target_x return index
Return a linear index from a set of 2D coordinates. Optionnal vertical and horizontal shifts might be applied.
def VFSOpen(pathspec, progress_callback = None ): if not VFS_HANDLERS: Init() fd = None vroot = _VFS_VIRTUALROOTS.get(pathspec.pathtype) if (not vroot or pathspec.is_virtualroot or pathspec.CollapsePath().startswith(vroot.CollapsePath())): working_pathspec = pathspec.Copy() else: working_pathspec = vroot.Copy() working_pathspec.last.nested_path = pathspec.Copy() while working_pathspec: component = working_pathspec.Pop() try: handler = VFS_HANDLERS[component.pathtype] except KeyError: raise UnsupportedHandlerError(component.pathtype) fd = handler.Open( fd=fd, component=component, handlers=dict(VFS_HANDLERS), pathspec=working_pathspec, progress_callback=progress_callback) if fd is None: raise ValueError("VFSOpen cannot be called with empty PathSpec.") return fd
Expands pathspec to return an expanded Path. A pathspec is a specification of how to access the file by recursively opening each part of the path by different drivers. For example the following pathspec: pathtype: OS path: "/dev/sda1" nested_path { pathtype: TSK path: "/home/image2.img" nested_path { pathtype: TSK path: "/home/a.txt" } } Instructs the system to: 1) open /dev/sda1 using the OS driver. 2) Pass the obtained filelike object to the TSK driver to open "/home/image2.img". 3) The obtained filelike object should be passed to the TSK driver to open "/home/a.txt". The problem remains how to get to this expanded path specification. Since the server is not aware of all the files on the client, the server may request this: pathtype: OS path: "/dev/sda1" nested_path { pathtype: TSK path: "/home/image2.img/home/a.txt" } Or even this: pathtype: OS path: "/dev/sda1/home/image2.img/home/a.txt" This function converts the pathspec requested by the server into an expanded pathspec required to actually open the file. This is done by expanding each component of the pathspec in turn. Expanding the component is done by opening each leading directory in turn and checking if it is a directory of a file. If its a file, we examine the file headers to determine the next appropriate driver to use, and create a nested pathspec. Note that for some clients there might be a virtual root specified. This is a directory that gets prepended to all pathspecs of a given pathtype. For example if there is a virtual root defined as ["os:/virtualroot"], a path specification like pathtype: OS path: "/home/user/*" will get translated into pathtype: OS path: "/virtualroot" is_virtualroot: True nested_path { pathtype: OS path: "/dev/sda1" } Args: pathspec: A Path() protobuf to normalize. progress_callback: A callback to indicate that the open call is still working but needs more time. Returns: The open filelike object. This will contain the expanded Path() protobuf as the member fd.pathspec. Raises: IOError: if one of the path components can not be opened.
def _categorize_data(self, data, cols, dims): if self.invert_axes: cols = cols[::-1] dims = dims[:2][::-1] ranges = [self.handles['%s_range' % ax] for ax in 'xy'] for i, col in enumerate(cols): column = data[col] if (isinstance(ranges[i], FactorRange) and (isinstance(column, list) or column.dtype.kind not in 'SU')): data[col] = [dims[i].pprint_value(v) for v in column]
Transforms non-string or integer types in datasource if the axis to be plotted on is categorical. Accepts the column data source data, the columns corresponding to the axes and the dimensions for each axis, changing the data inplace.
def write_to_fitsfile(self, fitsfile, clobber=True): from fermipy.skymap import Map hpx_header = self._hpx.make_header() index_map = Map(self.ipixs, self.wcs) mult_map = Map(self.mult_val, self.wcs) prim_hdu = index_map.create_primary_hdu() mult_hdu = index_map.create_image_hdu() for key in ['COORDSYS', 'ORDERING', 'PIXTYPE', 'ORDERING', 'ORDER', 'NSIDE', 'FIRSTPIX', 'LASTPIX']: prim_hdu.header[key] = hpx_header[key] mult_hdu.header[key] = hpx_header[key] hdulist = fits.HDUList([prim_hdu, mult_hdu]) hdulist.writeto(fitsfile, overwrite=clobber)
Write this mapping to a FITS file, to avoid having to recompute it
def compute_group_colors(self): seen = set() self.group_label_color = [ x for x in self.node_colors if not (x in seen or seen.add(x)) ]
Computes the group colors according to node colors
def _pngmeta(self): reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect') try: tags = self.tags except AttributeError: tags = {} from PIL import PngImagePlugin meta = PngImagePlugin.PngInfo() for k__, v__ in tags.items(): if k__ not in reserved: meta.add_text(k__, v__, 0) return meta
It will return GeoImage.tags as a PNG metadata object. Inspired by: public domain, Nick Galbreath http://blog.modp.com/2007/08/python-pil-and-png-metadata-take-2.html
def url(self): return URL.format(http=self.web_proto, host=self.host, port=self.port)
Represent device base url.
def get_avg_price_fifo(self) -> Decimal: balance = self.get_quantity() if not balance: return Decimal(0) paid = Decimal(0) accounts = self.get_holding_accounts() for account in accounts: splits = self.get_available_splits_for_account(account) for split in splits: paid += split.value avg_price = paid / balance return avg_price
Calculates the average price paid for the security. security = Commodity Returns Decimal value.
def open(self): self.hwman = HardwareManager(port=self._port) self.opened = True if self._connection_string is not None: try: self.hwman.connect_direct(self._connection_string) except HardwareError: self.hwman.close() raise elif self._connect_id is not None: try: self.hwman.connect(self._connect_id) except HardwareError: self.hwman.close() raise
Open and potentially connect to a device.
def unsubscribe_url(self): server_relative = ('%s?s=%s' % (reverse('tidings.unsubscribe', args=[self.pk]), self.secret)) return 'https://%s%s' % (Site.objects.get_current().domain, server_relative)
Return the absolute URL to visit to delete me.
def _smartos_computenode_data(): grains = {} vms = {} for vm in __salt__['cmd.run']('vmadm list -p -o uuid,alias,state,type').split("\n"): vm = dict(list(zip(['uuid', 'alias', 'state', 'type'], vm.split(':')))) vms[vm['uuid']] = vm del vms[vm['uuid']]['uuid'] grains['computenode_vms_total'] = len(vms) grains['computenode_vms_running'] = 0 grains['computenode_vms_stopped'] = 0 grains['computenode_vms_type'] = {'KVM': 0, 'LX': 0, 'OS': 0} for vm in vms: if vms[vm]['state'].lower() == 'running': grains['computenode_vms_running'] += 1 elif vms[vm]['state'].lower() == 'stopped': grains['computenode_vms_stopped'] += 1 if vms[vm]['type'] not in grains['computenode_vms_type']: grains['computenode_vms_type'][vms[vm]['type']] = 0 grains['computenode_vms_type'][vms[vm]['type']] += 1 sysinfo = salt.utils.json.loads(__salt__['cmd.run']('sysinfo')) grains['computenode_sdc_version'] = sysinfo['SDC Version'] grains['computenode_vm_capable'] = sysinfo['VM Capable'] if sysinfo['VM Capable']: grains['computenode_vm_hw_virt'] = sysinfo['CPU Virtualization'] grains['manufacturer'] = sysinfo['Manufacturer'] grains['productname'] = sysinfo['Product'] grains['uuid'] = sysinfo['UUID'] return grains
Return useful information from a SmartOS compute node
def nl_cb_set(cb, type_, kind, func, arg): if type_ < 0 or type_ > NL_CB_TYPE_MAX or kind < 0 or kind > NL_CB_KIND_MAX: return -NLE_RANGE if kind == NL_CB_CUSTOM: cb.cb_set[type_] = func cb.cb_args[type_] = arg else: cb.cb_set[type_] = cb_def[type_][kind] cb.cb_args[type_] = arg return 0
Set up a callback. Updates `cb` in place. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L293 Positional arguments: cb -- nl_cb class instance. type_ -- callback to modify (integer). kind -- kind of implementation (integer). func -- callback function (NL_CB_CUSTOM). arg -- argument passed to callback. Returns: 0 on success or a negative error code.
def update_index(index): logger.info("Updating search index: '%s'", index) client = get_client() responses = [] for model in get_index_models(index): logger.info("Updating search index model: '%s'", model.search_doc_type) objects = model.objects.get_search_queryset(index).iterator() actions = bulk_actions(objects, index=index, action="index") response = helpers.bulk(client, actions, chunk_size=get_setting("chunk_size")) responses.append(response) return responses
Re-index every document in a named index.