code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def predict_variant_effect_on_transcript_or_failure(variant, transcript):
"""
Try predicting the effect of a variant on a particular transcript but
suppress raised exceptions by converting them into `Failure` effect
values.
"""
try:
return predict_variant_effect_on_transcript(
variant=variant,
transcript=transcript)
except (AssertionError, ValueError) as error:
logger.warn(
"Encountered error annotating %s for %s: %s",
variant,
transcript,
error)
return Failure(variant, transcript) | Try predicting the effect of a variant on a particular transcript but
suppress raised exceptions by converting them into `Failure` effect
values. |
def generate_safemode_windows():
"""Produce batch file to run QML in safe-mode
Usage:
$ python -c "import compat;compat.generate_safemode_windows()"
$ run.bat
"""
try:
import pyblish
import pyblish_qml
import PyQt5
except ImportError:
return sys.stderr.write(
"Run this in a terminal with access to "
"the Pyblish libraries and PyQt5.\n")
template = r"""@echo off
:: Clear all environment variables
@echo off
if exist ".\backup_env.bat" del ".\backup_env.bat"
for /f "tokens=1* delims==" %%a in ('set') do (
echo set %%a=%%b>> .\backup_env.bat
set %%a=
)
:: Set only the bare essentials
set PATH={PyQt5}
set PATH=%PATH%;{python}
set PYTHONPATH={pyblish}
set PYTHONPATH=%PYTHONPATH%;{pyblish_qml}
set PYTHONPATH=%PYTHONPATH%;{PyQt5}
set SystemRoot=C:\Windows
:: Run Pyblish
python -m pyblish_qml
:: Restore environment
backup_env.bat
"""
values = {}
for lib in (pyblish, pyblish_qml, PyQt5):
values[lib.__name__] = os.path.dirname(os.path.dirname(lib.__file__))
values["python"] = os.path.dirname(sys.executable)
with open("run.bat", "w") as f:
print("Writing %s" % template.format(**values))
f.write(template.format(**values)) | Produce batch file to run QML in safe-mode
Usage:
$ python -c "import compat;compat.generate_safemode_windows()"
$ run.bat |
def connect(self, interface=None):
"""Connect to the USB for the hottop.
Attempt to discover the USB port used for the Hottop and then form a
connection using the serial library.
:returns: bool
:raises SerialConnectionError:
"""
if self._simulate:
return True
if not interface:
match = self._autodiscover_usb()
self._log.debug("Auto-discovered USB port: %s" % match)
else:
self.USB_PORT = interface
try:
self._conn = serial.Serial(self.USB_PORT, baudrate=self.BAUDRATE,
bytesize=self.BYTE_SIZE,
parity=self.PARITY,
stopbits=self.STOPBITS,
timeout=self.TIMEOUT)
except serial.serialutil.SerialException as e:
raise SerialConnectionError(str(e))
self._log.debug("Serial connection set")
if not self._conn.isOpen():
self._conn.open()
self._log.debug("Serial connection opened")
return True | Connect to the USB for the hottop.
Attempt to discover the USB port used for the Hottop and then form a
connection using the serial library.
:returns: bool
:raises SerialConnectionError: |
def hash_for_signing(self, msg):
"""
Return a hash of msg, according to odd bitcoin method: double SHA256 over a bitcoin
encoded stream of two strings: a fixed magic prefix and the actual message.
"""
magic = self.msg_magic_for_netcode()
fd = io.BytesIO()
stream_satoshi_string(fd, magic.encode('utf8'))
stream_satoshi_string(fd, msg.encode('utf8'))
# return as a number, since it's an input to signing algos like that anyway
return from_bytes_32(double_sha256(fd.getvalue())) | Return a hash of msg, according to odd bitcoin method: double SHA256 over a bitcoin
encoded stream of two strings: a fixed magic prefix and the actual message. |
def add_args(parser, positional=False):
"""
Extends a commandline argument parser with arguments for specifying
read sources.
"""
group = parser.add_argument_group("read loading")
group.add_argument("reads" if positional else "--reads",
nargs="+", default=[],
help="Paths to bam files. Any number of paths may be specified.")
group.add_argument(
"--read-source-name",
nargs="+",
help="Names for each read source. The number of names specified "
"must match the number of bam files. If not specified, filenames are "
"used for names.")
# Add filters
group = parser.add_argument_group(
"read filtering",
"A number of read filters are available. See the pysam "
"documentation (http://pysam.readthedocs.org/en/latest/api.html) "
"for details on what these fields mean. When multiple filter "
"options are specified, reads must match *all* filters.")
for (name, (kind, message, function)) in READ_FILTERS.items():
extra = {}
if kind is bool:
extra["action"] = "store_true"
extra["default"] = None
elif kind is int:
extra["type"] = int
extra["metavar"] = "N"
elif kind is str:
extra["metavar"] = "STRING"
group.add_argument("--" + name.replace("_", "-"),
help=message,
**extra) | Extends a commandline argument parser with arguments for specifying
read sources. |
def pkey(self):
"""Returns the private key for quick authentication on the SSH server."""
if self._pkey is None:
self._pkey = self._get_pkey()
return self._pkey | Returns the private key for quick authentication on the SSH server. |
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target) | Rename this path to the given path. |
def has_button(self, button):
"""Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_pointer_has_button(
self._handle, button)
assert rc >= 0, 'This device is not a pointer device'
return bool(rc) | Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError |
def feedkeys(self, keys, options='', escape_csi=True):
"""Push `keys` to Nvim user input buffer.
Options can be a string with the following character flags:
- 'm': Remap keys. This is default.
- 'n': Do not remap keys.
- 't': Handle keys as if typed; otherwise they are handled as if coming
from a mapping. This matters for undo, opening folds, etc.
"""
return self.request('nvim_feedkeys', keys, options, escape_csi) | Push `keys` to Nvim user input buffer.
Options can be a string with the following character flags:
- 'm': Remap keys. This is default.
- 'n': Do not remap keys.
- 't': Handle keys as if typed; otherwise they are handled as if coming
from a mapping. This matters for undo, opening folds, etc. |
def convert_tensor_to_label(scope, operator, container):
'''
This converter tries to convert a dummy operator 'TensorToLabel' into a sequence of some ONNX operators. Those
operators are used to extract the label with the highest probability for doing a prediction. We assume that the
elements in the given probability tensor are aligned with the class labels specified in the CoreML model. That is,
if you have a class label vector ['a', 'b'] in our CoreML classifier, the first (and the only) input of this
operator should be [probability_of_class_a, probability_of_class_b].
Assume that we have C classes with batch size N (N must be 1. If not, the output class probabilities need to be
encoded as a sequence of dictionary, which is not allowed in ONNX). The ONNX computation graph of this operator may
look like
Probability tensor [1, C] (the variable defined at operator.inputs[0])
|
v
ArgMax LoadConstant (its attribute is extracted from
| | operator.raw_operator, which is a
| | CoreML classifier)
| |
v |
best index [1] |
| |
v v
ArrayFeatureExtractor <-------------------- a 1-D tensor of class labels [C]
|
v
predicted label [1]
'''
model_type = operator.raw_operator.WhichOneof('Type')
if model_type == 'neuralNetworkClassifier':
model = operator.raw_operator.neuralNetworkClassifier
if model.WhichOneof('ClassLabels') == 'stringClassLabels':
labels = list(s.encode('utf-8') for s in model.stringClassLabels.vector)
label_type = onnx_proto.TensorProto.STRING
elif model.WhichOneof('ClassLabels') == 'int64ClassLabels':
labels = list(int(i) for i in model.int64ClassLabels.vector)
label_type = onnx_proto.TensorProto.INT64
else:
raise ValueError('Unknown label type found')
elif model_type == 'pipelineClassifier':
model = operator.raw_operator.pipelineClassifier
if model.WhichOneof('ClassLabels') == 'stringClassLabels':
labels = list(s.encode('utf-8') for s in model.pipelineClassifier.stringClassLabels.vector)
label_type = onnx_proto.TensorProto.STRING
elif model.WhichOneof('ClassLabels') == 'int64ClassLabels':
labels = list(int(i) for i in model.int64ClassLabels.vector)
label_type = onnx_proto.TensorProto.INT64
else:
raise ValueError('Unknown label type found')
else:
raise ValueError('Only neural network classifiers and pipeline classifiers are supported')
# Use a Constant operator to load and output all labels as a tensor
label_loader_name = scope.get_unique_operator_name('LabelLoader')
label_buffer_name = scope.get_unique_variable_name('ClassLabels')
label_loader_value = helper.make_tensor(label_buffer_name, label_type, [len(labels)], labels)
apply_constant(scope, [label_buffer_name], container,
operator_name=label_loader_name, value=label_loader_value)
# Extract most possible label index
label_id_extractor_name = scope.get_unique_operator_name('LabelIndexExtractor')
label_id_extractor_attrs = {'name': label_id_extractor_name}
label_id_extractor_attrs['axis'] = 1
label_id_extractor_attrs['keepdims'] = 1
extracted_id_name = scope.get_unique_variable_name('LabelId')
container.add_node('ArgMax', [operator.inputs[0].full_name], [extracted_id_name], **label_id_extractor_attrs)
# Pick up the label indicated by the selected ID
label_selector_name = scope.get_unique_operator_name('LabelSelector')
label_selector_attrs = {'name': label_selector_name}
container.add_node('ArrayFeatureExtractor', [label_buffer_name, extracted_id_name], [operator.outputs[0].full_name],
op_domain='ai.onnx.ml', **label_selector_attrs) | This converter tries to convert a dummy operator 'TensorToLabel' into a sequence of some ONNX operators. Those
operators are used to extract the label with the highest probability for doing a prediction. We assume that the
elements in the given probability tensor are aligned with the class labels specified in the CoreML model. That is,
if you have a class label vector ['a', 'b'] in our CoreML classifier, the first (and the only) input of this
operator should be [probability_of_class_a, probability_of_class_b].
Assume that we have C classes with batch size N (N must be 1. If not, the output class probabilities need to be
encoded as a sequence of dictionary, which is not allowed in ONNX). The ONNX computation graph of this operator may
look like
Probability tensor [1, C] (the variable defined at operator.inputs[0])
|
v
ArgMax LoadConstant (its attribute is extracted from
| | operator.raw_operator, which is a
| | CoreML classifier)
| |
v |
best index [1] |
| |
v v
ArrayFeatureExtractor <-------------------- a 1-D tensor of class labels [C]
|
v
predicted label [1] |
def set_break(
self, filename, lineno=None, temporary=False, cond=None,
funcname=None
):
"""Put a breakpoint for filename"""
log.info(
'Setting break fn:%s lno:%s tmp:%s cond:%s fun:%s' %
(filename, lineno, temporary, cond, funcname)
)
breakpoint = self.get_break(
filename, lineno, temporary, cond, funcname
)
self.breakpoints.add(breakpoint)
log.info('Breakpoint %r added' % breakpoint)
return breakpoint | Put a breakpoint for filename |
def clean_translation(self):
"""
Do not allow translations longer than the max_lenght of the field to
be translated.
"""
translation = self.cleaned_data['translation']
if self.instance and self.instance.content_object:
# do not allow string longer than translatable field
obj = self.instance.content_object
field = obj._meta.get_field(self.instance.field)
max_length = field.max_length
if max_length and len(translation) > max_length:
raise forms.ValidationError(
_('The entered translation is too long. You entered '
'%(entered)s chars, max length is %(maxlength)s') % {
'entered': len(translation),
'maxlength': max_length,
}
)
else:
raise forms.ValidationError(
_('Can not store translation. First create all translation'
' for this object')
)
return translation | Do not allow translations longer than the max_lenght of the field to
be translated. |
def update(self):
"""Update object properties."""
self._attrs = self._session.refresh_attributes(self.name)
self._attrs = assert_is_dict(self._attrs)
# force base_state to update properties
if self.base_station:
self.base_station.update() | Update object properties. |
def changes(self):
"""Returns a list of changes to represent the diff between
old and new value.
Returns:
list: [string] representation of the change (if any)
between old and new value
"""
output = []
if self.status() is self.UNMODIFIED:
output = [self.formatter % (' ', self.key, self.old_value)]
elif self.status() is self.ADDED:
output.append(self.formatter % ('+', self.key, self.new_value))
elif self.status() is self.REMOVED:
output.append(self.formatter % ('-', self.key, self.old_value))
elif self.status() is self.MODIFIED:
output.append(self.formatter % ('-', self.key, self.old_value))
output.append(self.formatter % ('+', self.key, self.new_value))
return output | Returns a list of changes to represent the diff between
old and new value.
Returns:
list: [string] representation of the change (if any)
between old and new value |
def reload(self):
'Loads rows and/or columns. Override in subclass.'
self.rows = []
for r in self.iterload():
self.addRow(r) | Loads rows and/or columns. Override in subclass. |
def lookup_hist(self, mh):
"""Return histogram within binning of Histdd mh, with values looked up in this histogram.
This is not rebinning: no interpolation /renormalization is performed.
It's just a lookup.
"""
result = mh.similar_blank_histogram()
points = np.stack([mh.all_axis_bin_centers(i)
for i in range(mh.dimensions)]).reshape(mh.dimensions, -1)
values = self.lookup(*points)
result.histogram = values.reshape(result.histogram.shape)
return result | Return histogram within binning of Histdd mh, with values looked up in this histogram.
This is not rebinning: no interpolation /renormalization is performed.
It's just a lookup. |
def username(self):
"""The username of the issuer."""
entry = self._proto.commandQueueEntry
if entry.HasField('username'):
return entry.username
return None | The username of the issuer. |
def calculate_file_distances(dicom_files, field_weights=None,
dist_method_cls=None, **kwargs):
"""
Calculates the DicomFileDistance between all files in dicom_files, using an
weighted Levenshtein measure between all field names in field_weights and
their corresponding weights.
Parameters
----------
dicom_files: iterable of str
Dicom file paths
field_weights: dict of str to float
A dict with header field names to float scalar values, that
indicate a distance measure ratio for the levenshtein distance
averaging of all the header field names in it. e.g., {'PatientID': 1}
dist_method_cls: DicomFileDistance class
Distance method object to compare the files.
If None, the default DicomFileDistance method using Levenshtein
distance between the field_wieghts will be used.
kwargs: DicomFileDistance instantiation named arguments
Apart from the field_weitghts argument.
Returns
-------
file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN
Levenshtein distances between each of the N items in dicom_files.
"""
if dist_method_cls is None:
dist_method = LevenshteinDicomFileDistance(field_weights)
else:
try:
dist_method = dist_method_cls(field_weights=field_weights, **kwargs)
except:
log.exception('Could not instantiate {} object with field_weights '
'and {}'.format(dist_method_cls, kwargs))
dist_dtype = np.float16
n_files = len(dicom_files)
try:
file_dists = np.zeros((n_files, n_files), dtype=dist_dtype)
except MemoryError as mee:
import scipy.sparse
file_dists = scipy.sparse.lil_matrix((n_files, n_files),
dtype=dist_dtype)
for idxi in range(n_files):
dist_method.set_dicom_file1(dicom_files[idxi])
for idxj in range(idxi+1, n_files):
dist_method.set_dicom_file2(dicom_files[idxj])
if idxi != idxj:
file_dists[idxi, idxj] = dist_method.transform()
return file_dists | Calculates the DicomFileDistance between all files in dicom_files, using an
weighted Levenshtein measure between all field names in field_weights and
their corresponding weights.
Parameters
----------
dicom_files: iterable of str
Dicom file paths
field_weights: dict of str to float
A dict with header field names to float scalar values, that
indicate a distance measure ratio for the levenshtein distance
averaging of all the header field names in it. e.g., {'PatientID': 1}
dist_method_cls: DicomFileDistance class
Distance method object to compare the files.
If None, the default DicomFileDistance method using Levenshtein
distance between the field_wieghts will be used.
kwargs: DicomFileDistance instantiation named arguments
Apart from the field_weitghts argument.
Returns
-------
file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN
Levenshtein distances between each of the N items in dicom_files. |
def pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs):
"""
Pseudocolor plot of 2d DataArray
Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`
"""
# decide on a default for infer_intervals (GH781)
x = np.asarray(x)
if infer_intervals is None:
if hasattr(ax, 'projection'):
if len(x.shape) == 1:
infer_intervals = True
else:
infer_intervals = False
else:
infer_intervals = True
if (infer_intervals and
((np.shape(x)[0] == np.shape(z)[1]) or
((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])))):
if len(x.shape) == 1:
x = _infer_interval_breaks(x, check_monotonic=True)
else:
# we have to infer the intervals on both axes
x = _infer_interval_breaks(x, axis=1)
x = _infer_interval_breaks(x, axis=0)
if (infer_intervals and
(np.shape(y)[0] == np.shape(z)[0])):
if len(y.shape) == 1:
y = _infer_interval_breaks(y, check_monotonic=True)
else:
# we have to infer the intervals on both axes
y = _infer_interval_breaks(y, axis=1)
y = _infer_interval_breaks(y, axis=0)
primitive = ax.pcolormesh(x, y, z, **kwargs)
# by default, pcolormesh picks "round" values for bounds
# this results in ugly looking plots with lots of surrounding whitespace
if not hasattr(ax, 'projection') and x.ndim == 1 and y.ndim == 1:
# not a cartopy geoaxis
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
return primitive | Pseudocolor plot of 2d DataArray
Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh` |
def execute_script(self, script, g=None):
"""
Runs a script, returning the result.
Parameters
----------
script
String script to be evaluated (see below).
g=None
Optional dictionary of additional globals for the script evaluation.
These will automatically be inserted into self.extra_globals.
Usage
-----
Scripts are of the form:
"3.0 + x/y - d[0] where x=3.0*c('my_column')+h('setting'); y=d[1]"
By default, "d" refers to the databox object itself, giving access to
everything and enabling complete control over the universe. Meanwhile,
c() and h() give quick reference to d.c() and d.h() to get columns and
header lines. Additionally, these scripts can see all of the numpy
functions like sin, cos, sqrt, etc.
If you would like access to additional globals in a script,
there are a few options in addition to specifying the g parametres.
You can set self.extra_globals to the appropriate globals dictionary
or add globals using self.insert_global(). Setting g=globals() will
automatically insert all of your current globals into this databox
instance.
There are a few shorthand scripts available as well. You can simply type
a column name such as 'my_column' or a column number like 2. However, I
only added this functionality as a shortcut, and something like
"2.0*a where a=my_column" will not work unless 'my_column is otherwise
defined. I figure since you're already writing a complicated script in
that case, you don't want to accidentally shortcut your way into using
a column instead of a constant! Use "2.0*a where a=c('my_column')"
instead.
"""
# add any extra user-supplied global variables for the eventual eval() call.
if not g==None: self.extra_globals.update(g)
# If the script is not a list of scripts, return the script value.
# This is the termination of a recursive call.
if not _s.fun.is_iterable(script):
# special case
if script is None: return None
# get the expression and variables dictionary
[expression, v] = self._parse_script(script)
# if there was a problem parsing the script
if v is None:
print("ERROR: Could not parse '"+script+"'")
return None
# get all the numpy stuff too
g = self._globals()
g.update(v)
# otherwise, evaluate the script using python's eval command
return eval(expression, g)
# Otherwise, this is a list of (lists of) scripts. Make the recursive call.
output = []
for s in script: output.append(self.execute_script(s))
return output | Runs a script, returning the result.
Parameters
----------
script
String script to be evaluated (see below).
g=None
Optional dictionary of additional globals for the script evaluation.
These will automatically be inserted into self.extra_globals.
Usage
-----
Scripts are of the form:
"3.0 + x/y - d[0] where x=3.0*c('my_column')+h('setting'); y=d[1]"
By default, "d" refers to the databox object itself, giving access to
everything and enabling complete control over the universe. Meanwhile,
c() and h() give quick reference to d.c() and d.h() to get columns and
header lines. Additionally, these scripts can see all of the numpy
functions like sin, cos, sqrt, etc.
If you would like access to additional globals in a script,
there are a few options in addition to specifying the g parametres.
You can set self.extra_globals to the appropriate globals dictionary
or add globals using self.insert_global(). Setting g=globals() will
automatically insert all of your current globals into this databox
instance.
There are a few shorthand scripts available as well. You can simply type
a column name such as 'my_column' or a column number like 2. However, I
only added this functionality as a shortcut, and something like
"2.0*a where a=my_column" will not work unless 'my_column is otherwise
defined. I figure since you're already writing a complicated script in
that case, you don't want to accidentally shortcut your way into using
a column instead of a constant! Use "2.0*a where a=c('my_column')"
instead. |
def from_euler312(self, roll, pitch, yaw):
'''fill the matrix from Euler angles in radians in 312 convention'''
c3 = cos(pitch)
s3 = sin(pitch)
s2 = sin(roll)
c2 = cos(roll)
s1 = sin(yaw)
c1 = cos(yaw)
self.a.x = c1 * c3 - s1 * s2 * s3
self.b.y = c1 * c2
self.c.z = c3 * c2
self.a.y = -c2*s1
self.a.z = s3*c1 + c3*s2*s1
self.b.x = c3*s1 + s3*s2*c1
self.b.z = s1*s3 - s2*c1*c3
self.c.x = -s3*c2
self.c.y = s2 | fill the matrix from Euler angles in radians in 312 convention |
def indexes_all(ol,value):
'''
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a')
'''
length = ol.__len__()
indexes =[]
for i in range(0,length):
if(value == ol[i]):
indexes.append(i)
else:
pass
return(indexes) | from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a') |
def put_file(self, in_path, out_path):
"""
Implement put_file() by streamily transferring the file via
FileService.
:param str in_path:
Local filesystem path to read.
:param str out_path:
Remote filesystem path to write.
"""
try:
st = os.stat(in_path)
except OSError as e:
self._throw_io_error(e, in_path)
raise
if not stat.S_ISREG(st.st_mode):
raise IOError('%r is not a regular file.' % (in_path,))
# If the file is sufficiently small, just ship it in the argument list
# rather than introducing an extra RTT for the child to request it from
# FileService.
if st.st_size <= self.SMALL_FILE_LIMIT:
try:
fp = open(in_path, 'rb')
try:
s = fp.read(self.SMALL_FILE_LIMIT + 1)
finally:
fp.close()
except OSError:
self._throw_io_error(e, in_path)
raise
# Ensure did not grow during read.
if len(s) == st.st_size:
return self.put_data(out_path, s, mode=st.st_mode,
utimes=(st.st_atime, st.st_mtime))
self._connect()
self.parent.call_service(
service_name='mitogen.service.FileService',
method_name='register',
path=mitogen.utils.cast(in_path)
)
# For now this must remain synchronous, as the action plug-in may have
# passed us a temporary file to transfer. A future FileService could
# maintain an LRU list of open file descriptors to keep the temporary
# file alive, but that requires more work.
self.get_chain().call(
ansible_mitogen.target.transfer_file,
context=self.parent,
in_path=in_path,
out_path=out_path
) | Implement put_file() by streamily transferring the file via
FileService.
:param str in_path:
Local filesystem path to read.
:param str out_path:
Remote filesystem path to write. |
def append(self, observation, action, reward, terminal, training=True):
"""Append an observation to the memory
# Argument
observation (dict): Observation returned by environment
action (int): Action taken to obtain this observation
reward (float): Reward obtained by taking this action
terminal (boolean): Is the state terminal
"""
super(SequentialMemory, self).append(observation, action, reward, terminal, training=training)
# This needs to be understood as follows: in `observation`, take `action`, obtain `reward`
# and weather the next state is `terminal` or not.
if training:
self.observations.append(observation)
self.actions.append(action)
self.rewards.append(reward)
self.terminals.append(terminal) | Append an observation to the memory
# Argument
observation (dict): Observation returned by environment
action (int): Action taken to obtain this observation
reward (float): Reward obtained by taking this action
terminal (boolean): Is the state terminal |
def _get_fullname(obj):
# type: (Any) -> str
"""Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object.
"""
if not hasattr(obj, "__name__"):
obj = obj.__class__
if obj.__module__ in ("builtins", "__builtin__"):
return obj.__name__
return "{}.{}".format(obj.__module__, obj.__name__) | Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object. |
def get_param(self, number):
"""
Reads an internal Partner object parameter.
"""
logger.debug("retreiving param number %s" % number)
type_ = snap7.snap7types.param_types[number]
value = type_()
code = self.library.Par_GetParam(self.pointer, ctypes.c_int(number),
ctypes.byref(value))
check_error(code)
return value.value | Reads an internal Partner object parameter. |
def save(self):
# type: () -> None
"""Save the currentin-memory state.
"""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
with open(fname, "w") as f:
parser.write(f) | Save the currentin-memory state. |
def validate_token(refresh_url, exceptions=(), callback=None,
access_key='access_token', refresh_key='refresh_token'):
''' a decorator used to validate the access_token for oauth based
data sources.
This decorator should be used on every method in the data source that
fetches data from the oauth controlled resource, and that relies on a
valid access_token in order to operate properly.
If the token is valid, the normal flow continues without any change.
Otherwise, if any of `exceptions` tuple is raised, the normal flow
will be preceded by the following steps:
1. `refresh_url` will be called in order to refresh the token
2. the newly refreshed token will be saved in the source
3. the `callback` function will be called
If the refresh fails for any reason, the user would have to re-grant
permission for the application
Parameters
----------
refresh_url : str
The URL to be called in order to refresh the access token.
callback : str or callable
A callback function to be called whenever the access_token is
validated. The callback function would be called with the refreshed
token as an argument.
If the `callback` is not `callable`, but an `str` it will be called
on `self` (i.e. call a method on your Data Source)
Defaults to None
exceptions : tuple
A list of exceptions that should cause token revalidation
Defaults to Exception, meaning that all errors will cause token
refresh
access_key : str
The access token key as defined in the source and in the response from
the refresh URL.
Defaults to `access_token`
refresh_key : str
The refresh token key as defined in the source and in the request to
the refresh URL.
Defaults to `refresh_token`
'''
def _validate_token(f):
def wrapper(*args):
self = args[0]
try:
return f(*args)
except exceptions:
try:
self.log('Revalidating the access token...')
self.source[access_key] = None
# get a new token from refresh_url
token = self.source.get(refresh_key)
data = dict(self.options['refresh'],
**{refresh_key: token})
r = requests.post(refresh_url, data=data)
self.source[access_key] = r.json()[access_key]
# save the new token in the database
changes = {access_key: self.source[access_key]}
self.fire('source-change', changes)
# notify the callback that a new token was issued
if callback:
if callable(callback):
_callback = callback
else:
_callback = getattr(self, callback)
_callback(self.source.get(access_key))
return f(*args)
except Exception, e:
self.log('Error: Access token can\'t be revalidated. '
'The user would have to re-authenticate',
traceback.format_exc())
# raise a non-retryable exception
raise PanoplyException(
'access token could not be refreshed ({})'
.format(str(e)), retryable=False)
return wrapper
return _validate_token | a decorator used to validate the access_token for oauth based
data sources.
This decorator should be used on every method in the data source that
fetches data from the oauth controlled resource, and that relies on a
valid access_token in order to operate properly.
If the token is valid, the normal flow continues without any change.
Otherwise, if any of `exceptions` tuple is raised, the normal flow
will be preceded by the following steps:
1. `refresh_url` will be called in order to refresh the token
2. the newly refreshed token will be saved in the source
3. the `callback` function will be called
If the refresh fails for any reason, the user would have to re-grant
permission for the application
Parameters
----------
refresh_url : str
The URL to be called in order to refresh the access token.
callback : str or callable
A callback function to be called whenever the access_token is
validated. The callback function would be called with the refreshed
token as an argument.
If the `callback` is not `callable`, but an `str` it will be called
on `self` (i.e. call a method on your Data Source)
Defaults to None
exceptions : tuple
A list of exceptions that should cause token revalidation
Defaults to Exception, meaning that all errors will cause token
refresh
access_key : str
The access token key as defined in the source and in the response from
the refresh URL.
Defaults to `access_token`
refresh_key : str
The refresh token key as defined in the source and in the request to
the refresh URL.
Defaults to `refresh_token` |
def filter_service_by_host_bp_rule_label(label):
"""Filter for service
Filter on label
:param label: label to filter
:type label: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for service. Accept if label in service.host.labels"""
service = items["service"]
host = items["hosts"][service.host]
if service is None or host is None:
return False
return label in host.labels
return inner_filter | Filter for service
Filter on label
:param label: label to filter
:type label: str
:return: Filter
:rtype: bool |
def add_virtual_columns_aitoff(self, alpha, delta, x, y, radians=True):
"""Add aitoff (https://en.wikipedia.org/wiki/Aitoff_projection) projection
:param alpha: azimuth angle
:param delta: polar angle
:param x: output name for x coordinate
:param y: output name for y coordinate
:param radians: input and output in radians (True), or degrees (False)
:return:
"""
transform = "" if radians else "*pi/180."
aitoff_alpha = "__aitoff_alpha_%s_%s" % (alpha, delta)
# sanatize
aitoff_alpha = re.sub("[^a-zA-Z_]", "_", aitoff_alpha)
self.add_virtual_column(aitoff_alpha, "arccos(cos({delta}{transform})*cos({alpha}{transform}/2))".format(**locals()))
self.add_virtual_column(x, "2*cos({delta}{transform})*sin({alpha}{transform}/2)/sinc({aitoff_alpha}/pi)/pi".format(**locals()))
self.add_virtual_column(y, "sin({delta}{transform})/sinc({aitoff_alpha}/pi)/pi".format(**locals())) | Add aitoff (https://en.wikipedia.org/wiki/Aitoff_projection) projection
:param alpha: azimuth angle
:param delta: polar angle
:param x: output name for x coordinate
:param y: output name for y coordinate
:param radians: input and output in radians (True), or degrees (False)
:return: |
def override_account_fields(self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden):
"""Override fields on ``self.account``.
"""
# mark that the portfolio is dirty to override the fields again
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs['self'] | Override fields on ``self.account``. |
def get_changes(self, changers, in_hierarchy=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
function_changer = _FunctionChangers(self.pyname.get_object(),
self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy,
resources, task_handle) | Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched. |
def ltime(etobs, obs, direct, targ):
"""
This routine computes the transmit (or receive) time
of a signal at a specified target, given the receive
(or transmit) time at a specified observer. The elapsed
time between transmit and receive is also returned.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ltime_c.html
:param etobs: Epoch of a signal at some observer
:type etobs: float
:param obs: NAIF ID of some observer
:type obs: int
:param direct: Direction the signal travels ( "->" or "<-" )
:type direct: str
:param targ: NAIF ID of the target object
:type targ: int
:return: epoch and time
:rtype: tuple
"""
etobs = ctypes.c_double(etobs)
obs = ctypes.c_int(obs)
direct = stypes.stringToCharP(direct)
targ = ctypes.c_int(targ)
ettarg = ctypes.c_double()
elapsd = ctypes.c_double()
libspice.ltime_c(etobs, obs, direct, targ, ctypes.byref(ettarg),
ctypes.byref(elapsd))
return ettarg.value, elapsd.value | This routine computes the transmit (or receive) time
of a signal at a specified target, given the receive
(or transmit) time at a specified observer. The elapsed
time between transmit and receive is also returned.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ltime_c.html
:param etobs: Epoch of a signal at some observer
:type etobs: float
:param obs: NAIF ID of some observer
:type obs: int
:param direct: Direction the signal travels ( "->" or "<-" )
:type direct: str
:param targ: NAIF ID of the target object
:type targ: int
:return: epoch and time
:rtype: tuple |
def _feed_calendar_span(gtfs, stats):
"""
Computes the temporal coverage of each source feed
Parameters
----------
gtfs: gtfspy.GTFS object
stats: dict
where to append the stats
Returns
-------
stats: dict
"""
n_feeds = _n_gtfs_sources(gtfs)[0]
max_start = None
min_end = None
if n_feeds > 1:
for i in range(n_feeds):
feed_key = "feed_" + str(i) + "_"
start_key = feed_key + "calendar_start"
end_key = feed_key + "calendar_end"
calendar_span = gtfs.conn.cursor().execute(
'SELECT min(date), max(date) FROM trips, days '
'WHERE trips.trip_I = days.trip_I AND trip_id LIKE ?;', (feed_key + '%',)).fetchone()
stats[start_key] = calendar_span[0]
stats[end_key] = calendar_span[1]
if calendar_span[0] is not None and calendar_span[1] is not None:
if not max_start and not min_end:
max_start = calendar_span[0]
min_end = calendar_span[1]
else:
if gtfs.get_day_start_ut(calendar_span[0]) > gtfs.get_day_start_ut(max_start):
max_start = calendar_span[0]
if gtfs.get_day_start_ut(calendar_span[1]) < gtfs.get_day_start_ut(min_end):
min_end = calendar_span[1]
stats["latest_feed_start_date"] = max_start
stats["earliest_feed_end_date"] = min_end
else:
stats["latest_feed_start_date"] = stats["start_date"]
stats["earliest_feed_end_date"] = stats["end_date"]
return stats | Computes the temporal coverage of each source feed
Parameters
----------
gtfs: gtfspy.GTFS object
stats: dict
where to append the stats
Returns
-------
stats: dict |
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_voice(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
voice=self.voice, chat_id=self.receiver, reply_to_message_id=self.reply_id, caption=self.caption, parse_mode=self.parse_mode, duration=self.duration, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) | Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage |
def initializerepo(self):
""" Fill empty directory with products and make first commit """
try:
os.mkdir(self.repopath)
except OSError:
pass
cmd = self.repo.init(bare=self.bare, shared=self.shared)
if not self.bare:
self.write_testing_data([], [])
self.write_training_data([], [])
self.write_classifier(None)
cmd = self.repo.add('training.pkl')
cmd = self.repo.add('testing.pkl')
cmd = self.repo.add('classifier.pkl')
cmd = self.repo.commit(m='initial commit')
cmd = self.repo.tag('initial')
cmd = self.set_version('initial') | Fill empty directory with products and make first commit |
def binomial_coefficient(n, k):
""" Calculate the binomial coefficient indexed by n and k.
Args:
n (int): positive integer
k (int): positive integer
Returns:
The binomial coefficient indexed by n and k
Raises:
TypeError: If either n or k is not an integer
ValueError: If either n or k is negative, or if k is strictly greater than n
"""
if not isinstance(k, int) or not isinstance(n, int):
raise TypeError("Expecting positive integers")
if k > n:
raise ValueError("k must be lower or equal than n")
if k < 0 or n < 0:
raise ValueError("Expecting positive integers")
return factorial(n) // (factorial(k) * factorial(n - k)) | Calculate the binomial coefficient indexed by n and k.
Args:
n (int): positive integer
k (int): positive integer
Returns:
The binomial coefficient indexed by n and k
Raises:
TypeError: If either n or k is not an integer
ValueError: If either n or k is negative, or if k is strictly greater than n |
def writeDetails(accept, readId, taxonomy, fp):
"""
Write read and taxonomy details.
@param accept: A C{bool} indicating whether the read was accepted,
according to its taxonomy.
@param readId: The C{str} id of the read.
@taxonomy: A C{list} of taxonomy C{str} levels.
@fp: An open file pointer to write to.
"""
fp.write('%s %s\n %s\n\n' % (
'MATCH:' if accept else 'MISS: ', readId,
' | '.join(taxonomy) if taxonomy else 'No taxonomy found.')) | Write read and taxonomy details.
@param accept: A C{bool} indicating whether the read was accepted,
according to its taxonomy.
@param readId: The C{str} id of the read.
@taxonomy: A C{list} of taxonomy C{str} levels.
@fp: An open file pointer to write to. |
def user_warning(self, message, caption='Warning!'):
"""
Shows a dialog that warns the user about some action
Parameters
----------
message : message to display to user
caption : title for dialog (default: "Warning!")
Returns
-------
continue_bool : True or False
"""
dlg = wx.MessageDialog(self, message, caption,
wx.OK | wx.CANCEL | wx.ICON_WARNING)
if self.show_dlg(dlg) == wx.ID_OK:
continue_bool = True
else:
continue_bool = False
dlg.Destroy()
return continue_bool | Shows a dialog that warns the user about some action
Parameters
----------
message : message to display to user
caption : title for dialog (default: "Warning!")
Returns
-------
continue_bool : True or False |
def get_partitions(self, persistence=None):
""" Returns the partitioned data based on a specified
persistence level.
@ In, persistence, a floating point value specifying the
size of the smallest feature we want to track.
Default = None means consider all features.
@ Out, a dictionary lists where each key is a integer
specifying the index of the maximum. Each entry will hold a
list of indices specifying points that are associated to
this maximum.
"""
if persistence is None:
persistence = self.persistence
partitions = {}
# TODO: Possibly cache at the critical persistence values,
# previously caching was done at every query level, but that
# does not make sense as the partitions will only change once
# the next value in self.persistences is attained. Honestly,
# this is probably not a necessary optimization that needs to
# be made. Consider instead, Yarden's way of storing the points
# such that merged arrays will be adjacent.
for key, items in self.base_partitions.items():
new_key = key
while (
self.merge_sequence[new_key][0] < persistence
and self.merge_sequence[new_key][1] != new_key
):
new_key = self.merge_sequence[new_key][1]
if new_key not in partitions:
partitions[new_key] = []
partitions[new_key].extend(items)
for key in partitions:
partitions[key] = sorted(list(set(partitions[key])))
return partitions | Returns the partitioned data based on a specified
persistence level.
@ In, persistence, a floating point value specifying the
size of the smallest feature we want to track.
Default = None means consider all features.
@ Out, a dictionary lists where each key is a integer
specifying the index of the maximum. Each entry will hold a
list of indices specifying points that are associated to
this maximum. |
def _get_step_inout(step):
"""Retrieve set of inputs and outputs connecting steps.
"""
inputs = []
outputs = []
prescatter = collections.defaultdict(list)
remapped = {}
assert step.outputs_record_schema["type"] == "record"
output_names = set([])
for outp in step.outputs_record_schema["fields"]:
outputs.append({"id": outp["name"]})
output_names.add(outp["name"])
assert step.inputs_record_schema["type"] == "record"
for inp in step.inputs_record_schema["fields"]:
source = inp["source"].split("#")[-1].replace("/", ".")
# Check if we're unpacking from a record, and unpack from our object
if "valueFrom" in inp:
attr_access = "['%s']" % inp["name"]
if inp["valueFrom"].find(attr_access) > 0:
source += ".%s" % inp["name"]
if isinstance(inp["type"], dict) and isinstance(inp["type"].get("items"), dict):
if inp["type"]["items"].get("type") == "array" and "inputBinding" in inp["type"]:
source, prescatter = _unpack_object_array(inp, source, prescatter)
# Avoid clashing input and output names, WDL requires unique
if inp["name"] in output_names:
new_name = inp["name"] + "_input"
remapped[inp["name"]] = new_name
inp["name"] = new_name
inputs.append({"id": inp["name"], "value": source})
return inputs, outputs, remapped, dict(prescatter) | Retrieve set of inputs and outputs connecting steps. |
def set_name(address, name, anyway=False):
"""Set the name of an address.
Sets the name of an address in IDA.
If the name already exists, check the `anyway` parameter:
True - Add `_COUNTER` to the name (default IDA behaviour)
False - Raise an `exceptions.SarkErrorNameAlreadyExists` exception.
Args
address: The address to rename.
name: The desired name.
anyway: Set anyway or not. Defualt ``False``.
"""
success = idaapi.set_name(address, name, idaapi.SN_NOWARN | idaapi.SN_NOCHECK)
if success:
return
if anyway:
success = idaapi.do_name_anyway(address, name)
if success:
return
raise exceptions.SarkSetNameFailed("Failed renaming 0x{:08X} to {!r}.".format(address, name))
raise exceptions.SarkErrorNameAlreadyExists(
"Can't rename 0x{:08X}. Name {!r} already exists.".format(address, name)) | Set the name of an address.
Sets the name of an address in IDA.
If the name already exists, check the `anyway` parameter:
True - Add `_COUNTER` to the name (default IDA behaviour)
False - Raise an `exceptions.SarkErrorNameAlreadyExists` exception.
Args
address: The address to rename.
name: The desired name.
anyway: Set anyway or not. Defualt ``False``. |
def _convert_etree_element_to_rule(entry_element):
''' Converts entry element to rule object.
The format of xml for rule:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<RuleDescription
xmlns:i="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<Filter i:type="SqlFilterExpression">
<SqlExpression>MyProperty='XYZ'</SqlExpression>
</Filter>
<Action i:type="SqlFilterAction">
<SqlExpression>set MyProperty2 = 'ABC'</SqlExpression>
</Action>
</RuleDescription>
</content>
</entry>
'''
rule = Rule()
rule_element = entry_element.find('./atom:content/sb:RuleDescription', _etree_sb_feed_namespaces)
if rule_element is not None:
filter_element = rule_element.find('./sb:Filter', _etree_sb_feed_namespaces)
if filter_element is not None:
rule.filter_type = filter_element.attrib.get(
_make_etree_ns_attr_name(_etree_sb_feed_namespaces['i'], 'type'), None)
sql_exp_element = filter_element.find('./sb:SqlExpression', _etree_sb_feed_namespaces)
if sql_exp_element is not None:
rule.filter_expression = sql_exp_element.text
action_element = rule_element.find('./sb:Action', _etree_sb_feed_namespaces)
if action_element is not None:
rule.action_type = action_element.attrib.get(
_make_etree_ns_attr_name(_etree_sb_feed_namespaces['i'], 'type'), None)
sql_exp_element = action_element.find('./sb:SqlExpression', _etree_sb_feed_namespaces)
if sql_exp_element is not None:
rule.action_expression = sql_exp_element.text
# extract id, updated and name value from feed entry and set them of rule.
for name, value in _ETreeXmlToObject.get_entry_properties_from_element(
entry_element, True, '/rules').items():
setattr(rule, name, value)
return rule | Converts entry element to rule object.
The format of xml for rule:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<RuleDescription
xmlns:i="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<Filter i:type="SqlFilterExpression">
<SqlExpression>MyProperty='XYZ'</SqlExpression>
</Filter>
<Action i:type="SqlFilterAction">
<SqlExpression>set MyProperty2 = 'ABC'</SqlExpression>
</Action>
</RuleDescription>
</content>
</entry> |
def write_reports(self, relative_path, suite_name, reports,
package_name=None):
"""write the collection of reports to the given path"""
dest_path = self.reserve_file(relative_path)
with open(dest_path, 'wb') as outf:
outf.write(toxml(reports, suite_name, package_name=package_name))
return dest_path | write the collection of reports to the given path |
def get_esri_extent(esriobj):
"""
Get the extent of an ESRI resource
"""
extent = None
srs = None
if 'fullExtent' in esriobj._json_struct:
extent = esriobj._json_struct['fullExtent']
if 'extent' in esriobj._json_struct:
extent = esriobj._json_struct['extent']
try:
srs = extent['spatialReference']['wkid']
except KeyError, err:
LOGGER.error(err, exc_info=True)
return [extent, srs] | Get the extent of an ESRI resource |
def query_string(context, **kwargs):
"""Add param to the given query string"""
params = context["request"].GET.copy()
for key, value in list(kwargs.items()):
params[key] = value
return "?" + params.urlencode() | Add param to the given query string |
def json_dumps(obj, # type: Any
**kwargs # type: Any
): # type: (...) -> str
""" Force use of unicode. """
if six.PY2:
kwargs['encoding'] = 'utf-8'
return json.dumps(convert_to_dict(obj), **kwargs) | Force use of unicode. |
def to_dict(self):
"""
Returns the fragment in a dictionary representation.
"""
return {
'content': self.content,
'resources': [r._asdict() for r in self.resources], # pylint: disable=W0212
'js_init_fn': self.js_init_fn,
'js_init_version': self.js_init_version,
'json_init_args': self.json_init_args
} | Returns the fragment in a dictionary representation. |
def frame_paths(frame_type, start_time, end_time, server=None, url_type='file'):
"""Return the paths to a span of frame files
Parameters
----------
frame_type : string
The string representation of the frame type (ex. 'H1_ER_C00_L1')
start_time : int
The start time that we need the frames to span.
end_time : int
The end time that we need the frames to span.
server : {None, SERVER:PORT string}, optional
Optional string to specify the datafind server to use. By default an
attempt is made to use a local datafind server.
url_type : string
Returns only frame URLs with a particular scheme or head such
as "file" or "gsiftp". Default is "file", which queries locally
stored frames. Option can be disabled if set to None.
Returns
-------
paths : list of paths
The list of paths to the frame files.
Examples
--------
>>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048)
"""
site = frame_type[0]
connection = datafind_connection(server)
connection.find_times(site, frame_type,
gpsstart=start_time, gpsend=end_time)
cache = connection.find_frame_urls(site, frame_type, start_time, end_time,urltype=url_type)
paths = [entry.path for entry in cache]
return paths | Return the paths to a span of frame files
Parameters
----------
frame_type : string
The string representation of the frame type (ex. 'H1_ER_C00_L1')
start_time : int
The start time that we need the frames to span.
end_time : int
The end time that we need the frames to span.
server : {None, SERVER:PORT string}, optional
Optional string to specify the datafind server to use. By default an
attempt is made to use a local datafind server.
url_type : string
Returns only frame URLs with a particular scheme or head such
as "file" or "gsiftp". Default is "file", which queries locally
stored frames. Option can be disabled if set to None.
Returns
-------
paths : list of paths
The list of paths to the frame files.
Examples
--------
>>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048) |
def get_valid_cwd():
"""Determine and check the current working directory for validity.
Typically, an directory arises when you checkout a different branch on git
that doesn't have this directory. When an invalid directory is found, a
warning is printed to the screen, but the directory is still returned
as-is, since this is what the shell considers to be the cwd."""
try:
cwd = _current_dir()
except:
warn("Your current directory is invalid. If you open a ticket at " +
"https://github.com/milkbikis/powerline-shell/issues/new " +
"we would love to help fix the issue.")
sys.stdout.write("> ")
sys.exit(1)
parts = cwd.split(os.sep)
up = cwd
while parts and not os.path.exists(up):
parts.pop()
up = os.sep.join(parts)
if cwd != up:
warn("Your current directory is invalid. Lowest valid directory: "
+ up)
return cwd | Determine and check the current working directory for validity.
Typically, an directory arises when you checkout a different branch on git
that doesn't have this directory. When an invalid directory is found, a
warning is printed to the screen, but the directory is still returned
as-is, since this is what the shell considers to be the cwd. |
def remove_foreign_key(self, name):
"""
Removes the foreign key constraint with the given name.
:param name: The constraint name
:type name: str
"""
name = self._normalize_identifier(name)
if not self.has_foreign_key(name):
raise ForeignKeyDoesNotExist(name, self._name)
del self._fk_constraints[name] | Removes the foreign key constraint with the given name.
:param name: The constraint name
:type name: str |
def store_zonefiles( self, zonefile_names, zonefiles, zonefile_txids, zonefile_block_heights, peer_zonefile_hashes, peer_hostport, path, con=None ):
"""
Store a list of RPC-fetched zonefiles (but only ones in peer_zonefile_hashes) from the given peer_hostport
Return the list of zonefile hashes stored.
"""
ret = []
with AtlasDBOpen(con=con, path=path) as dbcon:
for fetched_zfhash, zonefile_txt in zonefiles.items():
if fetched_zfhash not in peer_zonefile_hashes or fetched_zfhash not in zonefile_block_heights:
# unsolicited
log.warn("%s: Unsolicited zonefile %s" % (self.hostport, fetched_zfhash))
continue
rc = self.store_zonefile_data( fetched_zfhash, zonefile_txt, min(zonefile_block_heights[fetched_zfhash]), peer_hostport, dbcon, path )
if rc:
# don't ask for it again
ret.append( fetched_zfhash )
return ret | Store a list of RPC-fetched zonefiles (but only ones in peer_zonefile_hashes) from the given peer_hostport
Return the list of zonefile hashes stored. |
def _adjust_rowcol(self, insertion_point, no_to_insert, axis, tab=None):
"""Adjusts row and column sizes on insertion/deletion"""
if axis == 2:
self._shift_rowcol(insertion_point, no_to_insert)
return
assert axis in (0, 1)
cell_sizes = self.col_widths if axis else self.row_heights
set_cell_size = self.set_col_width if axis else self.set_row_height
new_sizes = {}
del_sizes = []
for pos, table in cell_sizes:
if pos > insertion_point and (tab is None or tab == table):
if 0 <= pos + no_to_insert < self.shape[axis]:
new_sizes[(pos + no_to_insert, table)] = \
cell_sizes[(pos, table)]
del_sizes.append((pos, table))
for pos, table in new_sizes:
set_cell_size(pos, table, new_sizes[(pos, table)])
for pos, table in del_sizes:
if (pos, table) not in new_sizes:
set_cell_size(pos, table, None) | Adjusts row and column sizes on insertion/deletion |
def stream_bloom_filters(dataset, # type: Iterable[Sequence[Text]]
keys, # type: Sequence[Sequence[bytes]]
schema # type: Schema
):
# type: (...) -> Iterable[Tuple[bitarray, Text, int]]
""" Compute composite Bloom filters (CLKs) for every record in an
iterable dataset.
:param dataset: An iterable of indexable records.
:param schema: An instantiated Schema instance
:param keys: A tuple of two lists of secret keys used in the HMAC.
:return: Generator yielding bloom filters as 3-tuples
"""
tokenizers = [tokenizer.get_tokenizer(field.hashing_properties)
for field in schema.fields]
return (crypto_bloom_filter(s, tokenizers, schema, keys)
for s in dataset) | Compute composite Bloom filters (CLKs) for every record in an
iterable dataset.
:param dataset: An iterable of indexable records.
:param schema: An instantiated Schema instance
:param keys: A tuple of two lists of secret keys used in the HMAC.
:return: Generator yielding bloom filters as 3-tuples |
def when(self, *bools):
"""
:type bools: bool
:param bools: Boolean arguments
All boolean arguments passed to this method must evaluate to `True` for
printing to be enabled.
So for example, the following code would print ``x: 1``
.. code-block:: python
for x in range(10):
Behold().when(x == 1).show('x')
"""
self.passes = self.passes and all(bools)
return self | :type bools: bool
:param bools: Boolean arguments
All boolean arguments passed to this method must evaluate to `True` for
printing to be enabled.
So for example, the following code would print ``x: 1``
.. code-block:: python
for x in range(10):
Behold().when(x == 1).show('x') |
def lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=None):
"""Convert a LabeledPoint RDD into an RDD of feature-label pairs
:param lp_rdd: LabeledPoint RDD of features and labels
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: int, number of total classes
:return: Spark RDD with feature-label pairs
"""
if categorical:
if not nb_classes:
labels = np.asarray(lp_rdd.map(
lambda lp: lp.label).collect(), dtype='int32')
nb_classes = np.max(labels) + 1
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features),
encode_label(lp.label, nb_classes)))
else:
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label))
return rdd | Convert a LabeledPoint RDD into an RDD of feature-label pairs
:param lp_rdd: LabeledPoint RDD of features and labels
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: int, number of total classes
:return: Spark RDD with feature-label pairs |
def tradeBreaksSSE(symbols=None, on_data=None, token='', version=''):
'''Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
'''
return _runSSE('trade-breaks', symbols, on_data, token, version) | Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version |
def get_assessments_taken_by_search(self, assessment_taken_query, assessment_taken_search):
"""Pass through to provider AssessmentTakenSearchSession.get_assessments_taken_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_assessments_taken_by_search(assessment_taken_query, assessment_taken_search) | Pass through to provider AssessmentTakenSearchSession.get_assessments_taken_by_search |
def pre_save(self, model_instance, add):
"""Returns field's value just before saving."""
value = super().pre_save(model_instance, add)
if isinstance(value, LocalizedValue):
for file in value.__dict__.values():
if file and not file._committed:
file.save(file.name, file, save=False)
return value | Returns field's value just before saving. |
def patch_stackless():
'''
This function should be called to patch the stackless module so that new tasklets are properly tracked in the
debugger.
'''
global _application_set_schedule_callback
_application_set_schedule_callback = stackless.set_schedule_callback(_schedule_callback)
def set_schedule_callback(callable):
global _application_set_schedule_callback
old = _application_set_schedule_callback
_application_set_schedule_callback = callable
return old
def get_schedule_callback():
global _application_set_schedule_callback
return _application_set_schedule_callback
set_schedule_callback.__doc__ = stackless.set_schedule_callback.__doc__
if hasattr(stackless, "get_schedule_callback"):
get_schedule_callback.__doc__ = stackless.get_schedule_callback.__doc__
stackless.set_schedule_callback = set_schedule_callback
stackless.get_schedule_callback = get_schedule_callback
if not hasattr(stackless.tasklet, "trace_function"):
# Older versions of Stackless, released before 2014
__call__.__doc__ = stackless.tasklet.__call__.__doc__
stackless.tasklet.__call__ = __call__
setup.__doc__ = stackless.tasklet.setup.__doc__
stackless.tasklet.setup = setup
run.__doc__ = stackless.run.__doc__
stackless.run = run | This function should be called to patch the stackless module so that new tasklets are properly tracked in the
debugger. |
def to_api_repr(self):
"""Generate a resource for :meth:`_begin`."""
configuration = self._configuration.to_api_repr()
resource = {
"jobReference": self._properties["jobReference"],
"configuration": configuration,
}
configuration["query"]["query"] = self.query
return resource | Generate a resource for :meth:`_begin`. |
def file_compile(self, path):
"""Compiles a file specified by path on the device"""
log.info('Compile '+path)
cmd = 'node.compile("%s")' % path
res = self.__exchange(cmd)
log.info(res)
return res | Compiles a file specified by path on the device |
def dim_range_key(eldim):
"""
Returns the key to look up a dimension range.
"""
if isinstance(eldim, dim):
dim_name = repr(eldim)
if dim_name.startswith("'") and dim_name.endswith("'"):
dim_name = dim_name[1:-1]
else:
dim_name = eldim.name
return dim_name | Returns the key to look up a dimension range. |
def hysteresis_magic2(path_to_file='.', hyst_file="rmag_hysteresis.txt",
save=False, save_folder='.',
fmt="svg", plots=True):
"""
Calculates hysteresis parameters, saves them in rmag_hysteresis format file.
If selected, this function also plots hysteresis loops, delta M curves,
d (Delta M)/dB curves, and IRM backfield curves.
Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
plots: whether or not to display the plots (default is true)
"""
user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", ""
pltspec = ""
dir_path = save_folder
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
rmag_out = save_folder + '/' + rmag_out
meas_file = path_to_file + '/' + hyst_file
rmag_rem = save_folder + "/rmag_remanence.txt"
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(hysteresis_magic.__doc__)
print('bad file')
return
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
experiment_names, sids = [], []
for rec in meas_data:
meths = rec['magic_method_codes'].split(':')
methods = []
for meth in meths:
methods.append(meth.strip())
if 'LP-HYS' in methods:
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
#
fignum = 1
sample_num = 0
# initialize variables to record some bulk info in first loop
first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1
while sample_num < len(sids):
sample = sids[sample_num]
print(sample, sample_num + 1, 'out of ', len(sids))
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
for rec in meas_data:
methcodes = rec['magic_method_codes'].split(':')
meths = []
for meth in methcodes:
meths.append(meth.strip())
if rec['er_specimen_name'] == sample and "LP-HYS" in meths:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec['measurement_magn_moment']))
if first_rec == 1:
e = rec['magic_experiment_name']
HystRec = {}
first_rec = 0
if "er_location_name" in list(rec.keys()):
HystRec["er_location_name"] = rec["er_location_name"]
locname = rec['er_location_name'].replace('/', '-')
if "er_sample_name" in list(rec.keys()):
HystRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
HystRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
HystRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
HystRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IRM-DCD" in meths:
Bdcd.append(float(rec['treatment_dc_field']))
Mdcd.append(float(rec['measurement_magn_moment']))
if first_dcd_rec == 1:
RemRec = {}
irm_exp = rec['magic_experiment_name']
first_dcd_rec = 0
if "er_location_name" in list(rec.keys()):
RemRec["er_location_name"] = rec["er_location_name"]
if "er_sample_name" in list(rec.keys()):
RemRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
RemRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
RemRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
RemRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IMAG" in meths:
if first_imag_rec == 1:
imag_exp = rec['magic_experiment_name']
first_imag_rec = 0
Bimag.append(float(rec['measurement_lab_field_dc']))
Mimag.append(float(rec['measurement_magn_moment']))
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
# fignum = 1
fig = plt.figure(figsize=(8, 8))
hpars, deltaM, Bdm, B, Mnorm, MadjN = iplot_hys(1, B, M, sample)
ax1 = fig.add_subplot(2, 2, 1)
ax1.axhline(0, color='k')
ax1.axvline(0, color='k')
ax1.plot(B, Mnorm, 'r')
ax1.plot(B, MadjN, 'b')
ax1.set_xlabel('B (T)')
ax1.set_ylabel("M/Msat")
# ax1.set_title(sample)
ax1.set_xlim(-1, 1)
ax1.set_ylim(-1, 1)
bounds = ax1.axis()
n4 = 'Ms: ' + \
'%8.2e' % (float(hpars['hysteresis_ms_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.9, n4, fontsize=9)
n1 = 'Mr: ' + \
'%8.2e' % (float(hpars['hysteresis_mr_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.7, n1, fontsize=9)
n2 = 'Bc: ' + '%8.2e' % (float(hpars['hysteresis_bc'])) + ' T'
ax1.text(bounds[1] - .9 * bounds[1], -.5, n2, fontsize=9)
if 'hysteresis_xhf' in list(hpars.keys()):
n3 = r'Xhf: ' + \
'%8.2e' % (float(hpars['hysteresis_xhf'])) + ' m^3'
ax1.text(bounds[1] - .9 * bounds[1], -.3, n3, fontsize=9)
# plt.subplot(1,2,2)
# plt.subplot(1,3,3)
DdeltaM = []
Mhalf = ""
for k in range(2, len(Bdm)):
# differnential
DdeltaM.append(
old_div(abs(deltaM[k] - deltaM[k - 2]), (Bdm[k] - Bdm[k - 2])))
for k in range(len(deltaM)):
if old_div(deltaM[k], deltaM[0]) < 0.5:
Mhalf = k
break
try:
Bhf = Bdm[Mhalf - 1:Mhalf + 1]
Mhf = deltaM[Mhalf - 1:Mhalf + 1]
# best fit line through two bounding points
poly = polyfit(Bhf, Mhf, 1)
Bcr = old_div((.5 * deltaM[0] - poly[1]), poly[0])
hpars['hysteresis_bcr'] = '%8.3e' % (Bcr)
hpars['magic_method_codes'] = "LP-BCR-HDM"
if HDD['deltaM'] != 0:
ax2 = fig.add_subplot(2, 2, 2)
ax2.plot(Bdm, deltaM, 'b')
ax2.set_xlabel('B (T)')
ax2.set_ylabel('Delta M')
linex = [0, Bcr, Bcr]
liney = [old_div(deltaM[0], 2.), old_div(deltaM[0], 2.), 0]
ax2.plot(linex, liney, 'r')
# ax2.set_title(sample)
ax3 = fig.add_subplot(2, 2, 3)
ax3.plot(Bdm[(len(Bdm) - len(DdeltaM)):], DdeltaM, 'b')
ax3.set_xlabel('B (T)')
ax3.set_ylabel('d (Delta M)/dB')
# ax3.set_title(sample)
ax4 = fig.add_subplot(2, 2, 4)
ax4.plot(Bdcd, Mdcd)
ax4.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax4.axhline(0, color='k')
ax4.axvline(0, color='k')
ax4.set_xlabel('B (T)')
ax4.set_ylabel('M/Mr')
except:
print("not doing it")
hpars['hysteresis_bcr'] = '0'
hpars['magic_method_codes'] = ""
plt.gcf()
plt.gca()
plt.tight_layout()
if save:
plt.savefig(save_folder + '/' + sample + '_hysteresis.' + fmt)
plt.show()
sample_num += 1 | Calculates hysteresis parameters, saves them in rmag_hysteresis format file.
If selected, this function also plots hysteresis loops, delta M curves,
d (Delta M)/dB curves, and IRM backfield curves.
Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
plots: whether or not to display the plots (default is true) |
def is_secret_registered(
self,
secrethash: SecretHash,
block_identifier: BlockSpecification,
) -> bool:
"""True if the secret for `secrethash` is registered at `block_identifier`.
Throws NoStateForBlockIdentifier if the given block_identifier
is older than the pruning limit
"""
if not self.client.can_query_state_for_block(block_identifier):
raise NoStateForBlockIdentifier()
block = self.get_secret_registration_block_by_secrethash(
secrethash=secrethash,
block_identifier=block_identifier,
)
return block is not None | True if the secret for `secrethash` is registered at `block_identifier`.
Throws NoStateForBlockIdentifier if the given block_identifier
is older than the pruning limit |
def import_surf_mesh(file_name):
""" Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface
"""
raw_content = read_file(file_name)
raw_content = raw_content.split("\n")
content = []
for rc in raw_content:
temp = rc.strip().split()
content.append(temp)
# 1st line defines the dimension and it must be 3
if int(content[0][0]) != 3:
raise TypeError("Input mesh '" + str(file_name) + "' must be 3-dimensional")
# Create a NURBS surface instance and fill with the data read from mesh file
surf = shortcuts.generate_surface(rational=True)
# 2nd line is the degrees
surf.degree_u = int(content[1][0])
surf.degree_v = int(content[1][1])
# 3rd line is the number of weighted control points in u and v directions
dim_u = int(content[2][0])
dim_v = int(content[2][1])
# Starting from 6th line, we have the weighted control points
ctrlpts_end = 5 + (dim_u * dim_v)
ctrlpts_mesh = content[5:ctrlpts_end]
# mesh files have the control points in u-row order format
ctrlpts = compatibility.flip_ctrlpts_u(ctrlpts_mesh, dim_u, dim_v)
# mesh files store control points in format (x, y, z, w)
ctrlptsw = compatibility.generate_ctrlptsw(ctrlpts)
# Set control points
surf.set_ctrlpts(ctrlptsw, dim_u, dim_v)
# 4th and 5th lines are knot vectors
surf.knotvector_u = [float(u) for u in content[3]]
surf.knotvector_v = [float(v) for v in content[4]]
# Return the surface instance
return surf | Generates a NURBS surface object from a mesh file.
:param file_name: input mesh file
:type file_name: str
:return: a NURBS surface
:rtype: NURBS.Surface |
def clean_whitespace(string, compact=False):
"""Return string with compressed whitespace."""
for a, b in (('\r\n', '\n'), ('\r', '\n'), ('\n\n', '\n'),
('\t', ' '), (' ', ' ')):
string = string.replace(a, b)
if compact:
for a, b in (('\n', ' '), ('[ ', '['),
(' ', ' '), (' ', ' '), (' ', ' ')):
string = string.replace(a, b)
return string.strip() | Return string with compressed whitespace. |
def _value_and_batch_jacobian(f, x):
"""Enables uniform interface to value and batch jacobian calculation.
Works in both eager and graph modes.
Arguments:
f: The scalar function to evaluate.
x: The value at which to compute the value and the batch jacobian.
Returns:
A tuple (f(x), J(x)), where J(x) is the batch jacobian.
"""
if tf.executing_eagerly():
with tf.GradientTape() as tape:
tape.watch(x)
value = f(x)
batch_jacobian = tape.batch_jacobian(value, x)
else:
value = f(x)
batch_jacobian = gradients.batch_jacobian(value, x)
return value, batch_jacobian | Enables uniform interface to value and batch jacobian calculation.
Works in both eager and graph modes.
Arguments:
f: The scalar function to evaluate.
x: The value at which to compute the value and the batch jacobian.
Returns:
A tuple (f(x), J(x)), where J(x) is the batch jacobian. |
def create_table(table, data):
"""
Create table with defined name and fields
:return: None
"""
fields = data['fields']
query = '('
indexed_fields = ''
for key, value in fields.items():
non_case_field = value[0][0:value[0].find('(')]
if non_case_field == 'int':
sign = value[0][value[0].find(',') + 1:-1:].strip()
if sign == 'signed':
field_type = 'Int'
else:
field_type = 'UInt'
bits = re.findall('\d+', value[0])[0]
field = key + ' ' + field_type + bits
query += field + ','
elif non_case_field == 'strin':
field_type = 'String'
field = key + ' ' + field_type
query += field + ','
elif non_case_field == 'float':
field_type = 'Float'
bits = re.findall('\d+', value[0])[0]
field = key + ' ' + field_type + bits
query += field + ','
if value[1] == 'yes':
indexed_fields += key + ','
query = query[:-1:] + f",date Date) ENGINE = MergeTree(date, ({indexed_fields} date), 8192)"
client.execute(f"CREATE TABLE {table} {query}") | Create table with defined name and fields
:return: None |
def authenticate_request(self, method, bucket='', key='', headers=None):
'''Authenticate a HTTP request by filling in Authorization field header.
:param method: HTTP method (e.g. GET, PUT, POST)
:param bucket: name of the bucket.
:param key: name of key within bucket.
:param headers: dictionary of additional HTTP headers.
:return: boto.connection.HTTPRequest object with Authorization header
filled (NB: will also have a Date field if none before and a User-Agent
field will be set to Boto).
'''
# following is extracted from S3Connection.make_request and the method
# it calls: AWSAuthConnection.make_request
path = self.conn.calling_format.build_path_base(bucket, key)
auth_path = self.conn.calling_format.build_auth_path(bucket, key)
http_request = boto.connection.AWSAuthConnection.build_base_http_request(
self.conn,
method,
path,
auth_path,
{},
headers
)
http_request.authorize(connection=self.conn)
return http_request | Authenticate a HTTP request by filling in Authorization field header.
:param method: HTTP method (e.g. GET, PUT, POST)
:param bucket: name of the bucket.
:param key: name of key within bucket.
:param headers: dictionary of additional HTTP headers.
:return: boto.connection.HTTPRequest object with Authorization header
filled (NB: will also have a Date field if none before and a User-Agent
field will be set to Boto). |
def RegisterSourceType(cls, source_type_class):
"""Registers a source type.
Source types are identified based on their type indicator.
Args:
source_type_class (type): source type.
Raises:
KeyError: if source types is already set for the corresponding
type indicator.
"""
if source_type_class.TYPE_INDICATOR in cls._source_type_classes:
raise KeyError(
'Source type already set for type: {0:s}.'.format(
source_type_class.TYPE_INDICATOR))
cls._source_type_classes[source_type_class.TYPE_INDICATOR] = (
source_type_class) | Registers a source type.
Source types are identified based on their type indicator.
Args:
source_type_class (type): source type.
Raises:
KeyError: if source types is already set for the corresponding
type indicator. |
def _find_usages_vpn_gateways(self):
"""find usage of vpn gateways"""
# do not include deleting and deleted in the results
vpngws = self.conn.describe_vpn_gateways(Filters=[
{
'Name': 'state',
'Values': [
'available',
'pending'
]
}
])['VpnGateways']
self.limits['Virtual private gateways']._add_current_usage(
len(vpngws),
aws_type='AWS::EC2::VPNGateway'
) | find usage of vpn gateways |
def post(self, *args, **kwargs):
"""Add / create relationship(s)"""
json_data = request.get_json() or {}
relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()
if 'data' not in json_data:
raise BadRequest('You must provide data with a "data" route node', source={'pointer': '/data'})
if isinstance(json_data['data'], dict):
if 'type' not in json_data['data']:
raise BadRequest('Missing type in "data" node', source={'pointer': '/data/type'})
if 'id' not in json_data['data']:
raise BadRequest('Missing id in "data" node', source={'pointer': '/data/id'})
if json_data['data']['type'] != related_type_:
raise InvalidType('The type field does not match the resource type', source={'pointer': '/data/type'})
if isinstance(json_data['data'], list):
for obj in json_data['data']:
if 'type' not in obj:
raise BadRequest('Missing type in "data" node', source={'pointer': '/data/type'})
if 'id' not in obj:
raise BadRequest('Missing id in "data" node', source={'pointer': '/data/id'})
if obj['type'] != related_type_:
raise InvalidType('The type provided does not match the resource type',
source={'pointer': '/data/type'})
self.before_post(args, kwargs, json_data=json_data)
obj_, updated = self._data_layer.create_relationship(json_data,
model_relationship_field,
related_id_field,
kwargs)
status_code = 200
result = {'meta': {'message': 'Relationship successfully created'}}
if updated is False:
result = ''
status_code = 204
final_result = self.after_post(result, status_code)
return final_result | Add / create relationship(s) |
def AIMAFile(components, mode='r'):
"Open a file based at the AIMA root directory."
import utils
dir = os.path.dirname(utils.__file__)
return open(apply(os.path.join, [dir] + components), mode) | Open a file based at the AIMA root directory. |
def _in_header(self, col):
"""Validate column identifier(s)."""
# pylint: disable=R1704
if not self._has_header:
# Conditionally register exceptions so that they do not appear
# in situations where has_header is always True. In this way
# they are not auto-documented by default
icol_ex = pexdoc.exh.addex(RuntimeError, "Invalid column specification")
hnf_ex = pexdoc.exh.addex(ValueError, "Column *[column_identifier]* not found")
col_list = [col] if isinstance(col, (str, int)) else col
for col in col_list:
edata = {"field": "column_identifier", "value": col}
if not self._has_header:
# Condition not subsumed in raise_exception_if
# so that calls that always have has_header=True
# do not pick up this exception
icol_ex(not isinstance(col, int))
hnf_ex((col < 0) or (col > len(self._header) - 1), edata)
else:
hnf_ex(
(isinstance(col, int) and ((col < 0) or (col > self._data_cols)))
or (
isinstance(col, str) and (col.upper() not in self._header_upper)
),
edata,
)
return col_list | Validate column identifier(s). |
def create_issue(self, data, params=None):
"""Creates an issue or a sub-task from a JSON representation.
You can provide two parameters in request's body: update or fields. The fields, that can be set on an issue
create operation, can be determined using the /rest/api/2/issue/createmeta resource. If a particular field is
not configured to appear on the issue's Create screen, then it will not be returned in the createmeta response.
A field validation error will occur if such field is submitted in request.
Creating a sub-task is similar to creating an issue with the following differences:
issueType field must be set to a sub-task issue type (use /issue/createmeta to find sub-task issue types), and
You must provide a parent field with the ID or key of the parent issue.
Args:
data:
params:
Returns:
"""
return self._post(self.API_URL + 'issue', data=data, params=params) | Creates an issue or a sub-task from a JSON representation.
You can provide two parameters in request's body: update or fields. The fields, that can be set on an issue
create operation, can be determined using the /rest/api/2/issue/createmeta resource. If a particular field is
not configured to appear on the issue's Create screen, then it will not be returned in the createmeta response.
A field validation error will occur if such field is submitted in request.
Creating a sub-task is similar to creating an issue with the following differences:
issueType field must be set to a sub-task issue type (use /issue/createmeta to find sub-task issue types), and
You must provide a parent field with the ID or key of the parent issue.
Args:
data:
params:
Returns: |
def send (self, command, *args, **kwargs):
"""Creates, validates, and sends the given command as a UDP
packet to the destination (host, port) specified when this
CmdAPI was created.
Returns True if the command was created, valid, and sent,
False otherwise.
"""
status = False
cmdobj = self._cmddict.create(command, *args, **kwargs)
messages = []
if not cmdobj.validate(messages):
for msg in messages:
log.error(msg)
else:
encoded = cmdobj.encode()
if self._verbose:
size = len(cmdobj.name)
pad = (size - len(cmdobj.name) + 1) * ' '
gds.hexdump(encoded, preamble=cmdobj.name + ':' + pad)
try:
values = (self._host, self._port, str(cmdobj))
log.command('Sending to %s:%d: %s' % values)
self._socket.sendto(encoded, (self._host, self._port))
status = True
with pcap.open(self.CMD_HIST_FILE, 'a') as output:
output.write(str(cmdobj))
except socket.error as e:
log.error(e.message)
except IOError as e:
log.error(e.message)
return status | Creates, validates, and sends the given command as a UDP
packet to the destination (host, port) specified when this
CmdAPI was created.
Returns True if the command was created, valid, and sent,
False otherwise. |
def _all_arcs(self):
"""Get the set of all arcs in this code object and its children.
See `_arcs` for details.
"""
arcs = set()
for bp in self.child_parsers():
arcs.update(bp._arcs())
return arcs | Get the set of all arcs in this code object and its children.
See `_arcs` for details. |
def add_observee_with_credentials(self, user_id, access_token=None, observee_password=None, observee_unique_id=None):
"""
Add an observee with credentials.
Register the given user to observe another user, given the observee's credentials.
*Note:* all users are allowed to add their own observees, given the observee's
credentials or access token are provided. Administrators can add observees given credentials, access token or
the {api:UserObserveesController#update observee's id}.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - observee[unique_id]
"""The login id for the user to observe. Required if access_token is omitted."""
if observee_unique_id is not None:
data["observee[unique_id]"] = observee_unique_id
# OPTIONAL - observee[password]
"""The password for the user to observe. Required if access_token is omitted."""
if observee_password is not None:
data["observee[password]"] = observee_password
# OPTIONAL - access_token
"""The access token for the user to observe. Required if <tt>observee[unique_id]</tt> or <tt>observee[password]</tt> are omitted."""
if access_token is not None:
data["access_token"] = access_token
self.logger.debug("POST /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, single_item=True) | Add an observee with credentials.
Register the given user to observe another user, given the observee's credentials.
*Note:* all users are allowed to add their own observees, given the observee's
credentials or access token are provided. Administrators can add observees given credentials, access token or
the {api:UserObserveesController#update observee's id}. |
def _check_input_files(nspc, parser):
"""check filename args. otherwise if one of the 3 filenames is bad
it's hard to tell which one"""
if not len(nspc.filenames) == 3:
parser.print_help()
msg = """
3 Expected files; Expected content: study population association",
{} Actual files: {}""".format(len(nspc.filenames), ' '.join(nspc.filenames))
raise Exception(msg)
for fin in nspc.filenames:
if not os.path.exists(fin):
return "*{}* does not exist".format(fin)
return False | check filename args. otherwise if one of the 3 filenames is bad
it's hard to tell which one |
def actnorm_center(name, x, reverse=False, init=False):
"""Add a bias to x.
Initialize such that the output of the first minibatch is zero centered
per channel.
Args:
name: scope
x: 2-D or 4-D Tensor.
reverse: Forward or backward operation.
init: data-dependent initialization.
Returns:
x_center: (x + b), if reverse is True and (x - b) otherwise.
"""
shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
assert len(shape) == 2 or len(shape) == 4
if len(shape) == 2:
x_mean = tf.reduce_mean(x, [0], keepdims=True)
b = get_variable_ddi("b", (1, shape[1]), initial_value=-x_mean,
init=init)
elif len(shape) == 4:
x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True)
b = get_variable_ddi(
"b", (1, 1, 1, shape[3]), initial_value=-x_mean, init=init)
if not reverse:
x += b
else:
x -= b
return x | Add a bias to x.
Initialize such that the output of the first minibatch is zero centered
per channel.
Args:
name: scope
x: 2-D or 4-D Tensor.
reverse: Forward or backward operation.
init: data-dependent initialization.
Returns:
x_center: (x + b), if reverse is True and (x - b) otherwise. |
def reset(db_name):
"""Reset database."""
conn = psycopg2.connect(database='postgres')
db = Database(db_name)
conn.autocommit = True
with conn.cursor() as cursor:
cursor.execute(db.drop_statement())
cursor.execute(db.create_statement())
conn.close() | Reset database. |
def mavlink_packet(self, m):
'''handle mavlink packets'''
if m.get_type() == 'REMOTE_LOG_DATA_BLOCK':
if not self.packet_is_for_me(m):
self.dropped += 1
return
if self.sender is None and m.seqno == 0:
if self.log_settings.verbose:
print("DFLogger: Received data packet - starting new log")
self.start_new_log()
self.sender = (m.get_srcSystem(), m.get_srcComponent())
if self.sender is None:
# No connection right now, and this packet did not start one
return
if self.stopped:
# send a stop packet @1Hz until the other end gets the idea:
self.tell_sender_to_stop(m)
return
if self.sender is not None:
size = len(m.data)
data = ''.join(str(chr(x)) for x in m.data[:size])
ofs = size*(m.seqno)
self.logfile.seek(ofs)
self.logfile.write(data)
if m.seqno in self.missing_blocks:
if self.log_settings.verbose:
print("DFLogger: Got missing block: %d" % (m.seqno,))
del self.missing_blocks[m.seqno]
self.missing_found += 1
self.blocks_to_ack_and_nack.append(
[self.master, m.seqno, 1, time.time(), None]
)
self.acking_blocks[m.seqno] = 1
# print("DFLogger: missing: %s" %
# (str(self.missing_blocks),))
else:
self.do_ack_block(m.seqno)
if self.last_seqno < m.seqno:
self.last_seqno = m.seqno
self.download += size | handle mavlink packets |
def _dmi_cast(key, val, clean=True):
'''
Simple caster thingy for trying to fish out at least ints & lists from strings
'''
if clean and not _dmi_isclean(key, val):
return
elif not re.match(r'serial|part|asset|product', key, flags=re.IGNORECASE):
if ',' in val:
val = [el.strip() for el in val.split(',')]
else:
try:
val = int(val)
except Exception:
pass
return val | Simple caster thingy for trying to fish out at least ints & lists from strings |
def _write_plan(self, stream):
"""Write the plan line to the stream.
If we have a plan and have not yet written it out, write it to
the given stream.
"""
if self.plan is not None:
if not self._plan_written:
print("1..{0}".format(self.plan), file=stream)
self._plan_written = True | Write the plan line to the stream.
If we have a plan and have not yet written it out, write it to
the given stream. |
def _theorem5p4(adj, ub):
"""By Theorem 5.4, if any two vertices have ub + 1 common neighbors
then we can add an edge between them.
"""
new_edges = set()
for u, v in itertools.combinations(adj, 2):
if u in adj[v]:
# already an edge
continue
if len(adj[u].intersection(adj[v])) > ub:
new_edges.add((u, v))
while new_edges:
for u, v in new_edges:
adj[u].add(v)
adj[v].add(u)
new_edges = set()
for u, v in itertools.combinations(adj, 2):
if u in adj[v]:
continue
if len(adj[u].intersection(adj[v])) > ub:
new_edges.add((u, v)) | By Theorem 5.4, if any two vertices have ub + 1 common neighbors
then we can add an edge between them. |
def ancestors(self, cl=None, noduplicates=True):
""" returns all ancestors in the taxonomy """
if not cl:
cl = self
if cl.parents():
bag = []
for x in cl.parents():
if x.uri != cl.uri: # avoid circular relationships
bag += [x] + self.ancestors(x, noduplicates)
else:
bag += [x]
# finally:
if noduplicates:
return remove_duplicates(bag)
else:
return bag
else:
return [] | returns all ancestors in the taxonomy |
def screenshot(path=None):
"""Capture the screen and save it as a png file.
If path is None then the image will be placed in the current
folder with the names:
``screenshot001.png, screenshot002.png, ...``
Args:
path (Optional[Text]): The file path to save the screenshot.
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
if isinstance(path, str):
_lib.TCOD_sys_save_screenshot(_encodeString(path))
elif path is None: # save to screenshot001.png, screenshot002.png, ...
filelist = _os.listdir('.')
n = 1
filename = 'screenshot%.3i.png' % n
while filename in filelist:
n += 1
filename = 'screenshot%.3i.png' % n
_lib.TCOD_sys_save_screenshot(_encodeString(filename))
else: # assume file like obj
#save to temp file and copy to file-like obj
tmpname = _os.tempnam()
_lib.TCOD_sys_save_screenshot(_encodeString(tmpname))
with tmpname as tmpfile:
path.write(tmpfile.read())
_os.remove(tmpname) | Capture the screen and save it as a png file.
If path is None then the image will be placed in the current
folder with the names:
``screenshot001.png, screenshot002.png, ...``
Args:
path (Optional[Text]): The file path to save the screenshot. |
def _clear_pattern(self):
""" Clears this event recurrence """
# pattern group
self.__interval = None
self.__days_of_week = set()
self.__first_day_of_week = None
self.__day_of_month = None
self.__month = None
self.__index = 'first'
# range group
self.__start_date = None
self.__end_date = None
self.__occurrences = None | Clears this event recurrence |
def _is_valid_ins(self, ins_ir):
"""Check for instruction validity as a gadgets.
"""
invalid_instrs = [
ReilMnemonic.JCC,
ReilMnemonic.UNDEF,
ReilMnemonic.UNKN,
]
return not any([i.mnemonic in invalid_instrs for i in ins_ir]) | Check for instruction validity as a gadgets. |
def register(self, classes=[]):
"""
Registers new plugins.
The registration only creates a new entry for a plugin inside the _classes dictionary.
It does not activate or even initialise the plugin.
A plugin must be a class, which inherits directly or indirectly from GwBasePattern.
:param classes: List of plugin classes
:type classes: list
"""
if not isinstance(classes, list):
raise AttributeError("plugins must be a list, not %s." % type(classes))
plugin_registered = []
for plugin_class in classes:
plugin_name = plugin_class.__name__
self.register_class(plugin_class, plugin_name)
self._log.debug("Plugin %s registered" % plugin_name)
plugin_registered.append(plugin_name)
self._log.info("Plugins registered: %s" % ", ".join(plugin_registered)) | Registers new plugins.
The registration only creates a new entry for a plugin inside the _classes dictionary.
It does not activate or even initialise the plugin.
A plugin must be a class, which inherits directly or indirectly from GwBasePattern.
:param classes: List of plugin classes
:type classes: list |
def delete(self):
"""Deletes the photoset.
"""
method = 'flickr.photosets.delete'
_dopost(method, auth=True, photoset_id=self.id)
return True | Deletes the photoset. |
def publish_avatar_set(self, avatar_set):
"""
Make `avatar_set` the current avatar of the jid associated with this
connection.
If :attr:`synchronize_vcard` is true and PEP is available the
vCard is only synchronized if the PEP update is successful.
This means publishing the ``image/png`` avatar data and the
avatar metadata set in pubsub. The `avatar_set` must be an
instance of :class:`AvatarSet`. If :attr:`synchronize_vcard` is
true the avatar is additionally published in the user vCard.
"""
id_ = avatar_set.png_id
done = False
with (yield from self._publish_lock):
if (yield from self._pep.available()):
yield from self._pep.publish(
namespaces.xep0084_data,
avatar_xso.Data(avatar_set.image_bytes),
id_=id_
)
yield from self._pep.publish(
namespaces.xep0084_metadata,
avatar_set.metadata,
id_=id_
)
done = True
if self._synchronize_vcard:
my_vcard = yield from self._vcard.get_vcard()
my_vcard.set_photo_data("image/png",
avatar_set.image_bytes)
self._vcard_id = avatar_set.png_id
yield from self._vcard.set_vcard(my_vcard)
self._presence_server.resend_presence()
done = True
if not done:
raise RuntimeError(
"failed to publish avatar: no protocol available"
) | Make `avatar_set` the current avatar of the jid associated with this
connection.
If :attr:`synchronize_vcard` is true and PEP is available the
vCard is only synchronized if the PEP update is successful.
This means publishing the ``image/png`` avatar data and the
avatar metadata set in pubsub. The `avatar_set` must be an
instance of :class:`AvatarSet`. If :attr:`synchronize_vcard` is
true the avatar is additionally published in the user vCard. |
def flush(self, stats, cs_status=None):
"""Clear and update the screen.
stats: Stats database to display
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
"""
self.erase()
self.display(stats, cs_status=cs_status) | Clear and update the screen.
stats: Stats database to display
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server |
def write(self, fptr):
"""Write a channel definition box to file.
"""
self._validate(writing=True)
num_components = len(self.association)
fptr.write(struct.pack('>I4s', 8 + 2 + num_components * 6, b'cdef'))
fptr.write(struct.pack('>H', num_components))
for j in range(num_components):
fptr.write(struct.pack('>' + 'H' * 3,
self.index[j],
self.channel_type[j],
self.association[j])) | Write a channel definition box to file. |
def find_argname(self, argname, rec=False):
"""Get the index and :class:`AssignName` node for given name.
:param argname: The name of the argument to search for.
:type argname: str
:param rec: Whether or not to include arguments in unpacked tuples
in the search.
:type rec: bool
:returns: The index and node for the argument.
:rtype: tuple(str or None, AssignName or None)
"""
if self.args: # self.args may be None in some cases (builtin function)
return _find_arg(argname, self.args, rec)
return None, None | Get the index and :class:`AssignName` node for given name.
:param argname: The name of the argument to search for.
:type argname: str
:param rec: Whether or not to include arguments in unpacked tuples
in the search.
:type rec: bool
:returns: The index and node for the argument.
:rtype: tuple(str or None, AssignName or None) |
async def delTrigger(self, iden):
'''
Deletes a trigger from the cortex
'''
trig = self.cell.triggers.get(iden)
self._trig_auth_check(trig.get('useriden'))
self.cell.triggers.delete(iden) | Deletes a trigger from the cortex |
def updateDynamics(self):
'''
Calculates a new "aggregate dynamic rule" using the history of variables
named in track_vars, and distributes this rule to AgentTypes in agents.
Parameters
----------
none
Returns
-------
dynamics : instance
The new "aggregate dynamic rule" that agents believe in and act on.
Should have attributes named in dyn_vars.
'''
# Make a dictionary of inputs for the dynamics calculator
history_vars_string = ''
arg_names = list(getArgNames(self.calcDynamics))
if 'self' in arg_names:
arg_names.remove('self')
for name in arg_names:
history_vars_string += ' \'' + name + '\' : self.' + name + '_hist,'
update_dict = eval('{' + history_vars_string + '}')
# Calculate a new dynamic rule and distribute it to the agents in agent_list
dynamics = self.calcDynamics(**update_dict) # User-defined dynamics calculator
for var_name in self.dyn_vars:
this_obj = getattr(dynamics,var_name)
for this_type in self.agents:
setattr(this_type,var_name,this_obj)
return dynamics | Calculates a new "aggregate dynamic rule" using the history of variables
named in track_vars, and distributes this rule to AgentTypes in agents.
Parameters
----------
none
Returns
-------
dynamics : instance
The new "aggregate dynamic rule" that agents believe in and act on.
Should have attributes named in dyn_vars. |
def from_pycbc(cls, fs, copy=True):
"""Convert a `pycbc.types.frequencyseries.FrequencySeries` into
a `FrequencySeries`
Parameters
----------
fs : `pycbc.types.frequencyseries.FrequencySeries`
the input PyCBC `~pycbc.types.frequencyseries.FrequencySeries`
array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
spectrum : `FrequencySeries`
a GWpy version of the input frequency series
"""
return cls(fs.data, f0=0, df=fs.delta_f, epoch=fs.epoch, copy=copy) | Convert a `pycbc.types.frequencyseries.FrequencySeries` into
a `FrequencySeries`
Parameters
----------
fs : `pycbc.types.frequencyseries.FrequencySeries`
the input PyCBC `~pycbc.types.frequencyseries.FrequencySeries`
array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
spectrum : `FrequencySeries`
a GWpy version of the input frequency series |
def _create_datadict(cls, internal_name):
"""Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object
"""
if internal_name == "LOCATION":
return Location()
if internal_name == "DESIGN CONDITIONS":
return DesignConditions()
if internal_name == "TYPICAL/EXTREME PERIODS":
return TypicalOrExtremePeriods()
if internal_name == "GROUND TEMPERATURES":
return GroundTemperatures()
if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS":
return HolidaysOrDaylightSavings()
if internal_name == "COMMENTS 1":
return Comments1()
if internal_name == "COMMENTS 2":
return Comments2()
if internal_name == "DATA PERIODS":
return DataPeriods()
raise ValueError(
"No DataDictionary known for {}".format(internal_name)) | Creates an object depending on `internal_name`
Args:
internal_name (str): IDD name
Raises:
ValueError: if `internal_name` cannot be matched to a data dictionary object |
Subsets and Splits