code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def update_work_as_completed(self, worker_id, work_id, other_values=None,
error=None):
client = self._datastore_client
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,
KIND_WORK, work_id)
work_entity = client.get(work_key, transaction=transaction)
if work_entity['claimed_worker_id'] != worker_id:
return False
work_entity['is_completed'] = True
if other_values:
work_entity.update(other_values)
if error:
work_entity['error'] = text_type(error)
transaction.put(work_entity)
except Exception:
return False
return True
|
Updates work piece in datastore as completed.
Args:
worker_id: ID of the worker which did the work
work_id: ID of the work which was done
other_values: dictionary with additonal values which should be saved
with the work piece
error: if not None then error occurred during computation of the work
piece. In such case work will be marked as completed with error.
Returns:
whether work was successfully updated
|
def set_shortcut(self, name, shortcut):
name = self.__normalize_name(name)
action = self.get_action(name)
if not action:
return
action.setShortcut(QKeySequence(shortcut))
return True
|
Sets given action shortcut.
:param name: Action to set the shortcut.
:type name: unicode
:param shortcut: Shortcut to set.
:type shortcut: unicode
:return: Method success.
:rtype: bool
|
def fetchJobStoreFiles(jobStore, options):
for jobStoreFile in options.fetch:
jobStoreHits = recursiveGlob(directoryname=options.jobStore,
glob_pattern=jobStoreFile)
for jobStoreFileID in jobStoreHits:
logger.debug("Copying job store file: %s to %s",
jobStoreFileID,
options.localFilePath[0])
jobStore.readFile(jobStoreFileID,
os.path.join(options.localFilePath[0],
os.path.basename(jobStoreFileID)),
symlink=options.useSymlinks)
|
Takes a list of file names as glob patterns, searches for these within a
given directory, and attempts to take all of the files found and copy them
into options.localFilePath.
:param jobStore: A fileJobStore object.
:param options.fetch: List of file glob patterns to search
for in the jobStore and copy into options.localFilePath.
:param options.localFilePath: Local directory to copy files into.
:param options.jobStore: The path to the jobStore directory.
|
def get_service_name(*args):
raw_services = _get_services()
services = dict()
for raw_service in raw_services:
if args:
if raw_service['DisplayName'] in args or \
raw_service['ServiceName'] in args or \
raw_service['ServiceName'].lower() in args:
services[raw_service['DisplayName']] = raw_service['ServiceName']
else:
services[raw_service['DisplayName']] = raw_service['ServiceName']
return services
|
The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by adding arguments of Display Names.
If no args are passed, return a dict of all services where the keys are the
service Display Names and the values are the Service Names.
If arguments are passed, create a dict of Display Names and Service Names
Returns:
dict: A dictionary of display names and service names
CLI Examples:
.. code-block:: bash
salt '*' service.get_service_name
salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
|
def _identify_all(header, footer, ext=None):
matches = list()
for magic_row in magic_header_array:
start = magic_row.offset
end = magic_row.offset + len(magic_row.byte_match)
if end > len(header):
continue
if header[start:end] == magic_row.byte_match:
matches.append(magic_row)
for magic_row in magic_footer_array:
start = magic_row.offset
if footer[start:] == magic_row.byte_match:
matches.append(magic_row)
if not matches:
raise PureError("Could not identify file")
return _confidence(matches, ext)
|
Attempt to identify 'data' by its magic numbers
|
def do_list(self, line):
repo_names = self.network.repo_names
print('Known repos:')
print(' ' + '\n '.join(repo_names))
|
List known repos
|
def create_version_model(self, task, releasetype, descriptor):
rootdata = treemodel.ListItemData(['Version', 'Releasetype', 'Path'])
rootitem = treemodel.TreeItem(rootdata)
for tf in task.taskfile_set.filter(releasetype=releasetype, descriptor=descriptor).order_by('-version'):
tfdata = djitemdata.TaskFileItemData(tf)
tfitem = treemodel.TreeItem(tfdata, rootitem)
for note in tf.notes.all():
notedata = djitemdata.NoteItemData(note)
treemodel.TreeItem(notedata, tfitem)
versionmodel = treemodel.TreeModel(rootitem)
return versionmodel
|
Create and return a new model that represents taskfiles for the given task, releasetpye and descriptor
:param task: the task of the taskfiles
:type task: :class:`djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str
:param descriptor: the descirptor
:type descriptor: str|None
:returns: the created tree model
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None
|
def _dict_to_report_line(cls, report_dict):
return '\t'.join([str(report_dict[x]) for x in report.columns])
|
Takes a report_dict as input and returns a report line
|
def append_rally_point(self, p):
if (self.rally_count() > 9):
print("Can't have more than 10 rally points, not adding.")
return
self.rally_points.append(p)
self.reindex()
|
add rallypoint to end of list
|
def issuetypes(accountable, project_key):
projects = accountable.issue_types(project_key)
headers = sorted(['id', 'name', 'description'])
rows = []
for key, issue_types in sorted(projects.items()):
for issue_type in issue_types:
rows.append(
[key] + [v for k, v in sorted(issue_type.items())
if k in headers]
)
rows.insert(0, ['project_key'] + headers)
print_table(SingleTable(rows))
|
List all issue types. Optional parameter to list issue types by a given
project.
|
def _scalar2array(d):
da = {}
for k, v in d.items():
if '_' not in k:
da[k] = v
else:
name = ''.join(k.split('_')[:-1])
ind = k.split('_')[-1]
dim = len(ind)
if name not in da:
shape = tuple(3 for i in range(dim))
da[name] = np.empty(shape, dtype=complex)
da[name][:] = np.nan
da[name][tuple(int(i) - 1 for i in ind)] = v
return da
|
Convert a dictionary with scalar elements and string indices '_1234'
to a dictionary of arrays. Unspecified entries are np.nan.
|
def ensure_file(path):
try:
exists = isfile(path)
if not exists:
with open(path, 'w+') as fname:
fname.write('initialized')
return (True, path)
return (True, 'exists')
except OSError as e:
return (False, e)
|
Checks if file exists, if fails, tries to create file
|
def _get_keycache(self, parentity, branch, turn, tick, *, forward):
lru_append(self.keycache, self._kc_lru, (parentity+(branch,), turn, tick), KEYCACHE_MAXSIZE)
return self._get_keycachelike(
self.keycache, self.keys, self._get_adds_dels,
parentity, branch, turn, tick, forward=forward
)
|
Get a frozenset of keys that exist in the entity at the moment.
With ``forward=True``, enable an optimization that copies old key sets
forward and updates them.
|
def write_pruned_iocs(self, directory=None, pruned_source=None):
if pruned_source is None:
pruned_source = self.pruned_11_iocs
if len(pruned_source) < 1:
log.error('no iocs available to write out')
return False
if not directory:
directory = os.getcwd()
if os.path.isfile(directory):
log.error('cannot writes iocs to a directory')
return False
utils.safe_makedirs(directory)
output_dir = os.path.abspath(directory)
for iocid in pruned_source:
ioc_obj = self.iocs_10[iocid]
ioc_obj.write_ioc_to_file(output_dir=output_dir, force=True)
return True
|
Writes IOCs to a directory that have been pruned of some or all IOCs.
:param directory: Directory to write IOCs to. If not provided, the current working directory is used.
:param pruned_source: Iterable containing a set of iocids. Defaults to self.iocs_10.
:return:
|
def update(self):
for node in self.get_all_nodes():
try:
node.update_ips()
if node.ips and \
not (node.preferred_ip and \
node.preferred_ip in node.ips):
node.connect()
except InstanceError as ex:
log.warning("Ignoring error updating information on node %s: %s",
node, ex)
self.repository.save_or_update(self)
|
Update connection information of all nodes in this cluster.
It happens, for example, that public ip's are not available
immediately, therefore calling this method might help.
|
def run(config, clear_opt=False):
flickr = flickrapi.FlickrAPI(config.get('walls', 'api_key'),
config.get('walls', 'api_secret'))
width = config.getint('walls', 'width')
height = config.getint('walls', 'height')
if clear_opt:
clear_dir(os.path.expanduser(config.get('walls', 'image_dir')))
tags = config.get('walls', 'tags')
for photo in flickr.walk(tags=tags, format='etree'):
try:
photo_url = smallest_url(flickr, photo.get('id'), width, height)
if photo_url:
break
except (KeyError, ValueError, TypeError):
stderr_and_exit('Unexpected data from Flickr.\n')
else:
stderr_and_exit('No matching photos found.\n')
dest = os.path.expanduser(config.get('walls', 'image_dir'))
try:
download(photo_url, dest)
except IOError:
stderr_and_exit('Error downloading image.\n')
|
Find an image and download it.
|
def get_all_snapshots(self, snapshot_ids=None,
owner=None, restorable_by=None,
filters=None):
params = {}
if snapshot_ids:
self.build_list_params(params, snapshot_ids, 'SnapshotId')
if owner:
params['Owner'] = owner
if restorable_by:
params['RestorableBy'] = restorable_by
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeSnapshots', params,
[('item', Snapshot)], verb='POST')
|
Get all EBS Snapshots associated with the current credentials.
:type snapshot_ids: list
:param snapshot_ids: Optional list of snapshot ids. If this list is
present, only the Snapshots associated with
these snapshot ids will be returned.
:type owner: str
:param owner: If present, only the snapshots owned by the specified user
will be returned. Valid values are:
* self
* amazon
* AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that are restorable
by the specified account id will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.snapshot.Snapshot`
:return: The requested Snapshot objects
|
def _ostaunicode(src):
if have_py_3:
bytename = src
else:
bytename = src.decode('utf-8')
try:
enc = bytename.encode('latin-1')
encbyte = b'\x08'
except (UnicodeEncodeError, UnicodeDecodeError):
enc = bytename.encode('utf-16_be')
encbyte = b'\x10'
return encbyte + enc
|
Internal function to create an OSTA byte string from a source string.
|
def virtual_temperature(temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
r
return temperature * ((mixing + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing)))
|
r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
The temperature
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual temperature of the parcel
Notes
-----
.. math:: T_v = T \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
|
def check_validation(self, cert):
if self.certificate_registry.is_ca(cert) and cert.signature not in self._validate_map:
self._validate_map[cert.signature] = ValidationPath(cert)
return self._validate_map.get(cert.signature)
|
Checks to see if a certificate has been validated, and if so, returns
the ValidationPath used to validate it.
:param cert:
An asn1crypto.x509.Certificate object
:return:
None if not validated, or a certvalidator.path.ValidationPath
object of the validation path
|
def gene_list(self, list_id):
return self.query(GeneList).filter_by(list_id=list_id).first()
|
Get a gene list from the database.
|
def build_news(ctx, draft=False, yes=False):
report.info(ctx, "docs.build-news", "building changelog from news fragments")
build_command = f"towncrier --version {ctx.metadata['version']}"
if draft:
report.warn(
ctx,
"docs.build-news",
"building changelog as draft (results are written to stdout)",
)
build_command += " --draft"
elif yes:
report.warn(
ctx, "docs.build-news", "removing news files without user confirmation (-y)"
)
build_command += " --yes"
ctx.run(build_command, hide=None)
|
Build towncrier newsfragments.
|
def reset(self):
for shard_id in self._shards:
if self._shards[shard_id].get('isReplicaSet'):
singleton = ReplicaSets()
elif self._shards[shard_id].get('isServer'):
singleton = Servers()
singleton.command(self._shards[shard_id]['_id'], 'reset')
for config_id in self._configsvrs:
self.configdb_singleton.command(config_id, 'reset')
for router_id in self._routers:
Servers().command(router_id, 'reset')
return self.info()
|
Ensure all shards, configs, and routers are running and available.
|
def connect(self, host, port, name=None):
client = self._clients.get(name)
client.connect_to(host, port)
|
Connects a client to given `host` and `port`. If client `name` is not
given then connects the latest client.
Examples:
| Connect | 127.0.0.1 | 8080 |
| Connect | 127.0.0.1 | 8080 | Client1 |
|
def count(self, eventRegistry):
self.setRequestedResult(RequestEventsInfo())
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get("events", {}).get("totalResults", 0)
return count
|
return the number of events that match the criteria
|
def new(self, dev_t_high, dev_t_low):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PN record already initialized!')
self.dev_t_high = dev_t_high
self.dev_t_low = dev_t_low
self._initialized = True
|
Create a new Rock Ridge POSIX device number record.
Parameters:
dev_t_high - The high-order 32-bits of the device number.
dev_t_low - The low-order 32-bits of the device number.
Returns:
Nothing.
|
def extract_image_size(self):
width, _ = self._extract_alternative_fields(
['Image ImageWidth', 'EXIF ExifImageWidth'], -1, int)
height, _ = self._extract_alternative_fields(
['Image ImageLength', 'EXIF ExifImageLength'], -1, int)
return width, height
|
Extract image height and width
|
def export_file(file_path):
if not os.path.isfile(file_path):
return error("Referenced file does not exist: '{}'.".format(file_path))
return "export {}".format(file_path)
|
Prepend the given parameter with ``export``
|
def make_archive(self, path):
zf = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(self.path):
relative_path = dirpath[len(self.path) + 1:]
if relative_path and not self._ignore(relative_path):
zf.write(dirpath, relative_path)
for name in filenames:
archive_name = os.path.join(relative_path, name)
if not self._ignore(archive_name):
real_path = os.path.join(dirpath, name)
self._check_type(real_path)
if os.path.islink(real_path):
self._check_link(real_path)
self._write_symlink(
zf, os.readlink(real_path), archive_name)
else:
zf.write(real_path, archive_name)
zf.close()
return path
|
Create archive of directory and write to ``path``.
:param path: Path to archive
Ignored::
* build/* - This is used for packing the charm itself and any
similar tasks.
* */.* - Hidden files are all ignored for now. This will most
likely be changed into a specific ignore list
(.bzr, etc)
|
def timescales_(self):
u, lv, rv = self._get_eigensystem()
with np.errstate(invalid='ignore', divide='ignore'):
timescales = - self.lag_time / np.log(u[1:])
return timescales
|
Implied relaxation timescales of the model.
The relaxation of any initial distribution towards equilibrium is
given, according to this model, by a sum of terms -- each corresponding
to the relaxation along a specific direction (eigenvector) in state
space -- which decay exponentially in time. See equation 19. from [1].
Returns
-------
timescales : array-like, shape = (n_timescales,)
The longest implied relaxation timescales of the model, expressed
in units of time-step between indices in the source data supplied
to ``fit()``.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105.
|
def get_remote_info(url_id):
try:
data = _send_request(url_id)
except Exception as e:
sys.stderr.write("Seeder GET error: ")
sys.stderr.write(str(e.message))
return None
return _convert_to_wakat_format(data)
|
Download data and convert them to dict used in frontend.
Args:
url_id (str): ID used as identification in Seeder.
Returns:
dict: Dict with data for frontend or None in case of error.
|
def update(self, item):
self.model.set(self._iter_for(item), 0, item)
|
Manually update an item's display in the list
:param item: The item to be updated.
|
def render_js_code(self, id_, *args, **kwargs):
if id_:
options = self.render_select2_options_code(
dict(self.get_options()), id_)
return mark_safe(self.html.format(id=id_, options=options))
return u''
|
Render html container for Select2 widget with options.
|
def wildcard_allowed_principals(self, pattern=None):
wildcard_allowed = []
for statement in self.statements:
if statement.wildcard_principals(pattern) and statement.effect == "Allow":
wildcard_allowed.append(statement)
return wildcard_allowed
|
Find statements which allow wildcard principals.
A pattern can be specified for the wildcard principal
|
def to_content_range_header(self, length):
range_for_length = self.range_for_length(length)
if range_for_length is not None:
return "%s %d-%d/%d" % (
self.units,
range_for_length[0],
range_for_length[1] - 1,
length,
)
return None
|
Converts the object into `Content-Range` HTTP header,
based on given length
|
def _concat(self, egdfs):
egdfs = list(egdfs)
edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False)
one2one = (
self.keep_index and
not any(edata.index.duplicated()) and
len(edata.index) == len(self.data.index))
if one2one:
edata = edata.sort_index()
else:
edata.reset_index(drop=True, inplace=True)
if self.keep_groups and self.groups:
edata = GroupedDataFrame(edata, groups=self.groups)
return edata
|
Concatenate evaluated group dataframes
Parameters
----------
egdfs : iterable
Evaluated dataframes
Returns
-------
edata : pandas.DataFrame
Evaluated data
|
async def get_state_json(
self,
rr_state_builder: Callable[['Verifier', str, int], Awaitable[Tuple[str, int]]],
fro: int,
to: int) -> (str, int):
LOGGER.debug(
'RevoCacheEntry.get_state_json >>> rr_state_builder: %s, fro: %s, to: %s',
rr_state_builder.__name__,
fro,
to)
rv = await self._get_update(rr_state_builder, fro, to, False)
LOGGER.debug('RevoCacheEntry.get_state_json <<< %s', rv)
return rv
|
Get rev reg state json, and its timestamp on the distributed ledger,
from cached rev reg state frames list or distributed ledger,
updating cache as necessary.
Raise BadRevStateTime if caller asks for a state in the future.
On return of any previously existing rev reg state frame, always update its query time beforehand.
:param rr_state_builder: callback to build rev reg state if need be (specify anchor instance's
_build_rr_state())
:param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data
:param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation state timestamp
:return: rev reg state json and ledger timestamp (epoch seconds)
|
def append(self, p_todo, p_string):
if len(p_string) > 0:
new_text = p_todo.source() + ' ' + p_string
p_todo.set_source_text(new_text)
self._update_todo_ids()
self.dirty = True
|
Appends a text to the todo, specified by its number.
The todo will be parsed again, such that tags and projects in de
appended string are processed.
|
def from_gtp(gtpc):
gtpc = gtpc.upper()
if gtpc == 'PASS':
return None
col = _GTP_COLUMNS.index(gtpc[0])
row_from_bottom = int(gtpc[1:])
return go.N - row_from_bottom, col
|
Converts from a GTP coordinate to a Minigo coordinate.
|
def zoom_bbox(self, bbox):
try:
bbox.transform(self.map.srs)
except gdal.GDALException:
pass
else:
self.map.zoom_to_box(mapnik.Box2d(*bbox.extent))
|
Zoom map to geometry extent.
Arguments:
bbox -- OGRGeometry polygon to zoom map extent
|
def lookup(self, iterable, gather=False):
for result in self.root.lookup(iterable,
gather=gather,
edit_distance=0,
max_edit_distance=self.max_edit_distance,
match_threshold=self.match_threshold):
yield result
|
Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init
|
def flatten(inputs, scope=None):
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k])
|
Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
|
def parse(s):
r
stuff = []
rest = s
while True:
front, token, rest = peel_off_esc_code(rest)
if front:
stuff.append(front)
if token:
try:
tok = token_type(token)
if tok:
stuff.extend(tok)
except ValueError:
raise ValueError("Can't parse escape sequence: %r %r %r %r" % (s, repr(front), token, repr(rest)))
if not rest:
break
return stuff
|
r"""
Returns a list of strings or format dictionaries to describe the strings.
May raise a ValueError if it can't be parsed.
>>> parse(">>> []")
['>>> []']
>>> #parse("\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m")
|
def save(self, *args, **kwargs):
letter = getattr(self, "block_letter", None)
if letter and len(letter) >= 1:
self.block_letter = letter[:1].upper() + letter[1:]
super(EighthBlock, self).save(*args, **kwargs)
|
Capitalize the first letter of the block name.
|
def compose(self, mapping):
items = [f.compose(mapping) for f in self._items]
return self.__class__(items, self.shape, self.ftype)
|
Apply the ``compose`` method to all functions.
Returns a new farray.
|
def render(self, context):
user = self._get_value(self.user_key, context)
feature = self._get_value(self.feature, context)
if feature is None:
return ''
allowed = show_feature(user, feature)
return self.nodelist.render(context) if allowed else ''
|
Handle the actual rendering.
|
def synchronizeLayout(primary, secondary, surface_size):
primary.configure_bound(surface_size)
secondary.configure_bound(surface_size)
if (primary.key_size < secondary.key_size):
logging.warning('Normalizing key size from secondary to primary')
secondary.key_size = primary.key_size
elif (primary.key_size > secondary.key_size):
logging.warning('Normalizing key size from primary to secondary')
primary.key_size = secondary.key_size
if (primary.size[1] > secondary.size[1]):
logging.warning('Normalizing layout size from secondary to primary')
secondary.set_size(primary.size, surface_size)
elif (primary.size[1] < secondary.size[1]):
logging.warning('Normalizing layout size from primary to secondary')
primary.set_size(secondary.size, surface_size)
|
Synchronizes given layouts by normalizing height by using
max height of given layouts to avoid transistion dirty effects.
:param primary: Primary layout used.
:param secondary: Secondary layout used.
:param surface_size: Target surface size on which layout will be displayed.
|
def save_project(self, project, filename=''):
r
if filename == '':
filename = project.name
filename = self._parse_filename(filename=filename, ext='pnm')
d = {project.name: project}
with open(filename, 'wb') as f:
pickle.dump(d, f)
|
r"""
Saves given Project to a 'pnm' file
This will include all of associated objects, including algorithms.
Parameters
----------
project : OpenPNM Project
The project to save.
filename : string, optional
If no filename is given, the given project name is used. See Notes
for more information.
See Also
--------
save_workspace
Notes
-----
The filename can be a string such as 'saved_file.pnm'. The string can
include absolute path such as 'C:\networks\saved_file.pnm', or can
be a relative path such as '..\..\saved_file.pnm', which will look
2 directories above the current working directory. Can also be a
path object object such as that produced by ``pathlib`` or
``os.path`` in the Python standard library.
|
def real_space(self):
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`real_space` not defined for non-numeric `dtype`')
return self.astype(self.real_dtype)
|
The space corresponding to this space's `real_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
|
def __update_paths(self, settings):
if not isinstance(settings, dict):
return
if 'custom_base_path' in settings:
base_path = settings['custom_base_path']
base_path = join(dirname(__file__), base_path)
self.__load_paths(base_path)
|
Set custom paths if necessary
|
def ms_cutall(self, viewer, event, data_x, data_y):
if not self.cancut:
return True
x, y = self.get_win_xy(viewer)
if event.state == 'move':
self._cutboth_xy(viewer, x, y)
elif event.state == 'down':
self._start_x, self._start_y = x, y
image = viewer.get_image()
self._loval, self._hival = viewer.autocuts.calc_cut_levels(image)
else:
viewer.onscreen_message(None)
return True
|
An interactive way to set the low AND high cut levels.
|
def _parse_box_list(self, output):
boxes = []
name = provider = version = None
for timestamp, target, kind, data in self._parse_machine_readable_output(output):
if kind == 'box-name':
if name is not None:
boxes.append(Box(name=name, provider=provider, version=version))
name = data
provider = version = None
elif kind == 'box-provider':
provider = data
elif kind == 'box-version':
version = data
if name is not None:
boxes.append(Box(name=name, provider=provider, version=version))
return boxes
|
Remove Vagrant usage for unit testing
|
def isempty(path):
if op.isdir(path):
return [] == os.listdir(path)
elif op.isfile(path):
return 0 == os.stat(path).st_size
return None
|
Returns True if the given file or directory path is empty.
**Examples**:
::
auxly.filesys.isempty("foo.txt") # Works on files...
auxly.filesys.isempty("bar") # ...or directories!
|
def start(self):
self.streams.append(sys.stdout)
sys.stdout = self.stream
|
Activate the TypingStream on stdout
|
def output_package(dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version)
|
Return string displaying package information.
|
def _regressor_names(con_name, hrf_model, fir_delays=None):
if hrf_model in ['glover', 'spm', None]:
return [con_name]
elif hrf_model in ["glover + derivative", 'spm + derivative']:
return [con_name, con_name + "_derivative"]
elif hrf_model in ['spm + derivative + dispersion',
'glover + derivative + dispersion']:
return [con_name, con_name + "_derivative", con_name + "_dispersion"]
elif hrf_model == 'fir':
return [con_name + "_delay_%d" % i for i in fir_delays]
|
Returns a list of regressor names, computed from con-name and hrf type
Parameters
----------
con_name: string
identifier of the condition
hrf_model: string or None,
hrf model chosen
fir_delays: 1D array_like, optional,
Delays used in case of an FIR model
Returns
-------
names: list of strings,
regressor names
|
def page(self, end=values.unset, start=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
params = values.of({
'End': serialize.iso8601_datetime(end),
'Start': serialize.iso8601_datetime(start),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return DataSessionPage(self._version, response, self._solution)
|
Retrieve a single page of DataSessionInstance records from the API.
Request is executed immediately
:param datetime end: The end
:param datetime start: The start
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DataSessionInstance
:rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionPage
|
def get_blocked(self):
url = self.reddit_session.config['blocked']
return self.reddit_session.request_json(url)
|
Return a UserList of Redditors with whom the user has blocked.
|
def _id_to_subword(self, subword_id):
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 <= subword_id < len(self._subwords):
return self._subwords[subword_id]
else:
offset = len(self._subwords)
subword_id -= offset
bytestr = bytes(bytearray([subword_id]))
return bytestr
|
Converts a subword integer ID to a subword string.
|
def find_entry_name_of_alias(self, alias):
if alias in self.aliases:
name = self.aliases[alias]
if name in self.entries:
return name
else:
for name, entry in self.entries.items():
aliases = entry.get_aliases(includename=False)
if alias in aliases:
if (ENTRY.DISTINCT_FROM not in entry or
alias not in entry[ENTRY.DISTINCT_FROM]):
return name
return None
|
Return the first entry name with the given 'alias' included in its
list of aliases.
Returns
-------
name of matching entry (str) or 'None' if no matches
|
def __SetDefaultUploadStrategy(self, upload_config, http_request):
if upload_config.resumable_path is None:
self.strategy = SIMPLE_UPLOAD
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = RESUMABLE_UPLOAD
self.strategy = strategy
|
Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
|
def _is_interactive(self):
return not (
self.realworld and (dt.date.today() > self.datetime.date()))
|
Prevent middlewares and orders to work outside live mode
|
def cancelUpdate(self):
key = '/library/sections/%s/refresh' % self.key
self._server.query(key, method=self._server._session.delete)
|
Cancel update of this Library Section.
|
def mediatype_create(name, mediatype, **kwargs):
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'mediatype.create'
params = {"description": name}
params['type'] = mediatype
params = _params_extend(params, _ignore_name=True, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['mediatypeid']
else:
raise KeyError
except KeyError:
return ret
|
Create new mediatype
.. note::
This function accepts all standard mediatype properties: keyword
argument names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/3.0/manual/api/reference/mediatype/object
:param mediatype: media type - 0: email, 1: script, 2: sms, 3: Jabber, 100: Ez Texting
:param exec_path: exec path - Required for script and Ez Texting types, see Zabbix API docs
:param gsm_modem: exec path - Required for sms type, see Zabbix API docs
:param smtp_email: email address from which notifications will be sent, required for email type
:param smtp_helo: SMTP HELO, required for email type
:param smtp_server: SMTP server, required for email type
:param status: whether the media type is enabled - 0: enabled, 1: disabled
:param username: authentication user, required for Jabber and Ez Texting types
:param passwd: authentication password, required for Jabber and Ez Texting types
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the created mediatype.
CLI Example:
.. code-block:: bash
salt '*' zabbix.mediatype_create 'Email' 0 smtp_email='[email protected]'
smtp_server='mailserver.example.com' smtp_helo='zabbix.example.com'
|
def sojourn_time(p):
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii)
|
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
|
def trace_in_process_link(self, link_bytes):
return tracers.InProcessLinkTracer(self._nsdk,
self._nsdk.trace_in_process_link(link_bytes))
|
Creates a tracer for tracing asynchronous related processing in the same process.
For more information see :meth:`create_in_process_link`.
:param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`.
:rtype: tracers.InProcessLinkTracer
.. versionadded:: 1.1.0
|
def create_environment(self, name, default=False, zone=None):
from qubell.api.private.environment import Environment
return Environment.new(organization=self, name=name, zone_id=zone, default=default, router=self._router)
|
Creates environment and returns Environment object.
|
async def _do(self, ctx, times: int, *, command):
msg = copy.copy(ctx.message)
msg.content = command
for i in range(times):
await self.bot.process_commands(msg)
|
Repeats a command a specified number of times.
|
def flatten(self):
args = list(self.args)
i = 0
for arg in self.args:
if isinstance(arg, self.__class__):
args[i:i + 1] = arg.args
i += len(arg.args)
else:
i += 1
return self.__class__(*args)
|
Return a new expression where nested terms of this expression are
flattened as far as possible.
E.g. A & (B & C) becomes A & B & C.
|
def parse(self, data_model, crit):
tables = pd.DataFrame(data_model)
data_model = {}
for table_name in tables.columns:
data_model[table_name] = pd.DataFrame(tables[table_name]['columns']).T
data_model[table_name] = data_model[table_name].where((pd.notnull(data_model[table_name])), None)
zipped = list(zip(crit.keys(), crit.values()))
crit_map = pd.DataFrame(zipped)
crit_map.index = crit_map[0]
crit_map.drop(0, axis='columns', inplace=True)
crit_map.rename({1: 'criteria_map'}, axis='columns', inplace=True)
crit_map.index.rename("", inplace=True)
for table_name in ['measurements', 'specimens', 'samples', 'sites', 'locations',
'contribution', 'criteria', 'images', 'ages']:
crit_map.loc[table_name] = np.nan
return data_model, crit_map
|
Take the relevant pieces of the data model json
and parse into data model and criteria map.
Parameters
----------
data_model : data model piece of json (nested dicts)
crit : criteria map piece of json (nested dicts)
Returns
----------
data_model : dictionary of DataFrames
crit_map : DataFrame
|
def signup_verify(request, uidb36=None, token=None):
user = authenticate(uidb36=uidb36, token=token, is_active=False)
if user is not None:
user.is_active = True
user.save()
auth_login(request, user)
info(request, _("Successfully signed up"))
return login_redirect(request)
else:
error(request, _("The link you clicked is no longer valid."))
return redirect("/")
|
View for the link in the verification email sent to a new user
when they create an account and ``ACCOUNTS_VERIFICATION_REQUIRED``
is set to ``True``. Activates the user and logs them in,
redirecting to the URL they tried to access when signing up.
|
def _prepare_value(val, maxlen=50, notype=False):
if val is None or val is True or val is False:
return str(val)
sval = repr(val)
sval = sval.replace("\n", " ").replace("\t", " ").replace("`", "'")
if len(sval) > maxlen:
sval = sval[:maxlen - 4] + "..." + sval[-1]
if notype:
return sval
else:
tval = checker_for_type(type(val)).name()
return "%s of type %s" % (sval, tval)
|
Stringify value `val`, ensuring that it is not too long.
|
def dimod_object_hook(obj):
if _is_sampleset_v2(obj):
return SampleSet.from_serializable(obj)
elif _is_bqm_v2(obj):
return BinaryQuadraticModel.from_serializable(obj)
return obj
|
JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders.
|
def template_string(
task: Task, template: str, jinja_filters: FiltersDict = None, **kwargs: Any
) -> Result:
jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters
text = jinja_helper.render_from_string(
template=template, host=task.host, jinja_filters=jinja_filters, **kwargs
)
return Result(host=task.host, result=text)
|
Renders a string with jinja2. All the host data is available in the template
Arguments:
template (string): template string
jinja_filters (dict): jinja filters to enable. Defaults to nornir.config.jinja2.filters
**kwargs: additional data to pass to the template
Returns:
Result object with the following attributes set:
* result (``string``): rendered string
|
def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):
src_r = RasterUtilClass.read_raster(srcfile)
src_data = src_r.data
dst_data = numpy.copy(src_data)
if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:
gdaltype = src_r.dataType
no_data = src_r.noDataValue
new_no_data = DEFAULT_NODATA
if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:
new_no_data = 0
if not MathClass.floatequal(new_no_data, src_r.noDataValue):
if src_r.noDataValue not in v_dict:
v_dict[src_r.noDataValue] = new_no_data
no_data = new_no_data
for (k, v) in iteritems(v_dict):
dst_data[src_data == k] = v
RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,
src_r.geotrans, src_r.srs, no_data, gdaltype)
|
Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
|
def kernel_command_line(self, kernel_command_line):
log.info('QEMU VM "{name}" [{id}] has set the QEMU kernel command line to {kernel_command_line}'.format(name=self._name,
id=self._id,
kernel_command_line=kernel_command_line))
self._kernel_command_line = kernel_command_line
|
Sets the kernel command line for this QEMU VM.
:param kernel_command_line: QEMU kernel command line
|
def post_task(task_data, task_uri='/tasks'):
url = '{}/{}'.format(API_URL, task_uri.lstrip('/'))
if isinstance(task_data, str):
task_json = task_data
else:
task_json = json.dumps(task_data)
resp = requests.post(url, data=task_json, headers=HEADERS, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
resp_json = resp.json()
LOG.debug(resp_json)
assert resp.ok, 'Spinnaker communication error: {0}'.format(resp.text)
return resp_json['ref']
|
Create Spinnaker Task.
Args:
task_data (str): Task JSON definition.
Returns:
str: Spinnaker Task ID.
Raises:
AssertionError: Error response from Spinnaker.
|
def _get_md_files(self):
all_f = _all_files_matching_ext(os.getcwd(), "md")
exclusions = [
"*.egg/*",
"*.eggs/*",
"*build/*"
] + self.exclusions
return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
|
Get all markdown files.
|
def plot_posterior_contour(self, idx_param1=0, idx_param2=1, res1=100, res2=100, smoothing=0.01):
return plt.contour(*self.posterior_mesh(idx_param1, idx_param2, res1, res2, smoothing))
|
Plots a contour of the kernel density estimation
of a 2D projection of the current posterior distribution.
:param int idx_param1: Parameter to be treated as :math:`x` when
plotting.
:param int idx_param2: Parameter to be treated as :math:`y` when
plotting.
:param int res1: Resolution along the :math:`x` direction.
:param int res2: Resolution along the :math:`y` direction.
:param float smoothing: Standard deviation of the Gaussian kernel
used to smooth the particle approximation to the current posterior.
.. seealso::
:meth:`SMCUpdater.posterior_mesh`
|
def getItem(self, index, altItem=None):
if index.isValid():
item = index.internalPointer()
if item:
return item
return altItem
|
Returns the TreeItem for the given index. Returns the altItem if the index is invalid.
|
def run_qpoints(self,
q_points,
with_eigenvectors=False,
with_group_velocities=False,
with_dynamical_matrices=False,
nac_q_direction=None):
if self._dynamical_matrix is None:
msg = ("Dynamical matrix has not yet built.")
raise RuntimeError(msg)
if with_group_velocities:
if self._group_velocity is None:
self._set_group_velocity()
group_velocity = self._group_velocity
else:
group_velocity = None
self._qpoints = QpointsPhonon(
np.reshape(q_points, (-1, 3)),
self._dynamical_matrix,
nac_q_direction=nac_q_direction,
with_eigenvectors=with_eigenvectors,
group_velocity=group_velocity,
with_dynamical_matrices=with_dynamical_matrices,
factor=self._factor)
|
Phonon calculations on q-points.
Parameters
----------
q_points: array_like or float, optional
q-points in reduced coordinates.
dtype='double', shape=(q-points, 3)
with_eigenvectors: bool, optional
Eigenvectors are stored by setting True. Default False.
with_group_velocities : bool, optional
Group velocities are calculated by setting True. Default is False.
with_dynamical_matrices : bool, optional
Calculated dynamical matrices are stored by setting True.
Default is False.
nac_q_direction : array_like
q=(0,0,0) is replaced by q=epsilon * nac_q_direction where epsilon
is infinitsimal for non-analytical term correction. This is used,
e.g., to observe LO-TO splitting,
|
def get_image_upload_to(self, filename):
dummy, ext = os.path.splitext(filename)
return os.path.join(
machina_settings.FORUM_IMAGE_UPLOAD_TO,
'{id}{ext}'.format(id=str(uuid.uuid4()).replace('-', ''), ext=ext),
)
|
Returns the path to upload a new associated image to.
|
def update_credit_note(self, credit_note_id, credit_note_dict):
return self._create_put_request(resource=CREDIT_NOTES, billomat_id=credit_note_id, send_data=credit_note_dict)
|
Updates a credit note
:param credit_note_id: the credit note id
:param credit_note_dict: dict
:return: dict
|
def _parse_options(options: List[str]) -> Dict[str, str]:
try:
return dict(i.split('=', maxsplit=1) for i in options)
except ValueError:
raise ArgumentError(
f'Option must be in format <key>=<value>, got: {options}')
|
Parse repeatable CLI options
>>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"'])
>>> print(json.dumps(opts, sort_keys=True))
{"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"}
|
def set_toolBox_height(tool_box, height=32):
for button in tool_box.findChildren(QAbstractButton):
button.setMinimumHeight(height)
return True
|
Sets given height to given QToolBox widget.
:param toolbox: ToolBox.
:type toolbox: QToolBox
:param height: Height.
:type height: int
:return: Definition success.
:rtype: bool
|
def post_message(self, msg):
super(mavlogfile, self).post_message(msg)
if self.planner_format:
self.f.read(1)
self.timestamp = msg._timestamp
self._last_message = msg
if msg.get_type() != "BAD_DATA":
self._last_timestamp = msg._timestamp
msg._link = self._link
|
add timestamp to message
|
def wrap_generator(func):
async def _wrapped(*a, **k):
r, ret = None, []
gen = func(*a, **k)
while True:
try:
item = gen.send(r)
except StopIteration:
break
if inspect.isawaitable(item):
r = await item
else:
r = item
ret.append(r)
if len(ret) == 1:
return ret.pop()
return ret
return _wrapped
|
Decorator to convert a generator function to an async function which collects
and returns generator results, returning a list if there are multiple results
|
def force_lazy_import(name):
obj = import_object(name)
module_items = list(getattr(obj, '__dict__', {}).items())
for key, value in module_items:
if getattr(value, '__module__', None):
import_object(name + '.' + key)
|
Import any modules off of "name" by iterating a new list rather than a generator so that this
library works with lazy imports.
|
def to_adb_message(self, data):
message = AdbMessage(AdbMessage.WIRE_TO_CMD.get(self.cmd),
self.arg0, self.arg1, data)
if (len(data) != self.data_length or
message.data_crc32 != self.data_checksum):
raise usb_exceptions.AdbDataIntegrityError(
'%s (%s) received invalid data: %s', message, self, repr(data))
return message
|
Turn the data into an ADB message.
|
def default_value(self, default_value):
if default_value not in self.default_values:
if len(self.default_labels) == len(self.default_values):
self.default_values[-1] = default_value
else:
self.default_values.append(default_value)
self._default_value = default_value
|
Setter for default_value.
:param default_value: The default value.
:type default_value: object
|
def channel(self, channel_id=None):
if channel_id in self.channels:
return self.channels[channel_id]
return Channel(self, channel_id)
|
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist.
|
def put(request, obj_id=None):
res = Result()
data = request.PUT or json.loads(request.body)['body']
if obj_id:
tag = Tag.objects.get(pk=obj_id)
tag.name = data.get('name', tag.name)
tag.artist = data.get('artist', tag.artist)
tag.save()
else:
tags = [_ for _ in data.get('tags', '').split(',') if _]
guids = [_ for _ in data.get('guids', '').split(',') if _]
_manageTags(tags, guids)
return JsonResponse(res.asDict())
|
Adds tags from objects resolved from guids
:param tags: Tags to add
:type tags: list
:param guids: Guids to add tags from
:type guids: list
:returns: json
|
def _internal_function_call(self, call_conf):
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
|
Call internal function.
:param call_conf:
:return:
|
def validate_json_field(dist, attr, value):
try:
is_json_compat(value)
except ValueError as e:
raise DistutilsSetupError("%r %s" % (attr, e))
return True
|
Check for json validity.
|
def remove(self, parent, child):
self.remove_links(parent, (child,))
if parent not in self and parent in self._parent_to_not_ok:
del self._parent_to_not_ok[parent]
if child not in self and child in self._parent_to_not_ok:
del self._parent_to_not_ok[child]
|
Remove a dependency between parent and child.
Parameters
----------
parent : boolean instance of :class:`katcp.Sensor`
The sensor that used to depend on child.
child : boolean instance of :class:`katcp.Sensor` or None
The sensor parent used to depend on.
|
def queue_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> None:
self._check_and_create_process(server_info.hostname)
self._queued_tasks_nb += 1
if scan_command.is_aggressive:
self._hostname_queues_dict[server_info.hostname].put((server_info, scan_command))
else:
self._task_queue.put((server_info, scan_command))
|
Queue a scan command targeting a specific server.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
|
def get_config_window_bounds(self):
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
monitor = screeninfo.get_monitors()[monitor_index]
bounds_x += monitor.x
bounds_y += monitor.y
except NotImplementedError:
self.logger.warn('Current environment doesn\'t support get_monitors')
return bounds_x, bounds_y
|
Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
|
def wnsumd(window):
assert isinstance(window, stypes.SpiceCell)
assert window.dtype == 1
meas = ctypes.c_double()
avg = ctypes.c_double()
stddev = ctypes.c_double()
shortest = ctypes.c_int()
longest = ctypes.c_int()
libspice.wnsumd_c(ctypes.byref(window), ctypes.byref(meas),
ctypes.byref(avg), ctypes.byref(stddev),
ctypes.byref(shortest), ctypes.byref(longest))
return meas.value, avg.value, stddev.value, shortest.value, longest.value
|
Summarize the contents of a double precision window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnsumd_c.html
:param window: Window to be summarized.
:type window: spiceypy.utils.support_types.SpiceCell
:return:
Total measure of intervals in window,
Average measure, Standard deviation,
Location of shortest interval,
Location of longest interval.
:rtype: tuple
|
def start(self):
origin = inspect.stack()[1][0]
self.reset()
self._start_tracer(origin)
|
Start collecting trace information.
|
def build_binary_op(self, op, other):
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, other)
return self_expr, other_expr, new_inputs
|
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.