text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Parse qual attribute of the old HEPData format
<END_TASK>
<USER_TASK:>
Description:
def _parse_qual(self, data):
"""Parse qual attribute of the old HEPData format
example qual:
*qual: RE : P P --> Z0 Z0 X
:param data: data to be parsed
:type data: str
""" |
list = []
headers = data.split(':')
name = headers[0].strip()
name = re.split(' IN ', name, flags=re.I) # ignore case
units = None
if len(name) > 1:
units = name[1].strip()
name = name[0].strip()
if len(headers) < 2:
raise BadFormat("*qual line must contain a name and values: %s" % data)
for header in headers[1:]:
xheader = {'name': name}
if units:
xheader['units'] = units
xheader['value'] = header.strip()
list.append(xheader)
# extract energy if SQRT(S) is one of the qualifiers
if name.startswith('SQRT(S)') and lower(units) in ('gev'):
energies = re.split(' TO ', xheader['value'], flags=re.I)
for energy in energies:
try:
energy = float(energy)
self.set_of_energies.add(energy)
except:
pass
self.current_table.qualifiers.append(list) |
<SYSTEM_TASK:>
Processes line stripping any comments from it
<END_TASK>
<USER_TASK:>
Description:
def _strip_comments(line):
"""Processes line stripping any comments from it
:param line: line to be processed
:type line: str
:return: line with removed comments
:rtype: str
""" |
if line == '':
return line
r = re.search('(?P<line>[^#]*)(#(?P<comment>.*))?', line)
if r:
line = r.group('line')
if not line.endswith('\n'):
line += '\n'
return line
return '\n' |
<SYSTEM_TASK:>
Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
<END_TASK>
<USER_TASK:>
Description:
def _bind_set_table_metadata(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table metatadata dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
""" |
def set_table_metadata(self, data):
if multiline:
data = self._read_multiline(data)
if key == 'location' and data:
data = 'Data from ' + data
self.current_table.metadata[key] = data.strip()
# method must be bound, so we use __get__
return set_table_metadata.__get__(self) |
<SYSTEM_TASK:>
Returns parsing function which will parse data as text, and add it to the table additional data dictionary
<END_TASK>
<USER_TASK:>
Description:
def _bind_parse_additional_data(self, key, multiline=False):
"""Returns parsing function which will parse data as text, and add it to the table additional data dictionary
with the provided key
:param key: dictionary key under which parsed data will be added to table.metadata
:type key: str
:param multiline: if True this attribute will be treated as multiline
:type multiline: bool
:return: function with bound key and multiline attributes
:rtype: Function
""" |
def _set_additional_data_bound(self, data):
"""Concrete method for setting additional data
:param self:
:type self: OldHEPData
"""
# if it's multiline, parse it
if multiline:
data = self._read_multiline(data)
if key not in self.additional_data:
self.additional_data[key] = []
self.additional_data[key].append(data)
# method must be bound, so we use __get__
return _set_additional_data_bound.__get__(self) |
<SYSTEM_TASK:>
If an error is a percentage, we convert to a float, then
<END_TASK>
<USER_TASK:>
Description:
def error_value_processor(value, error):
"""
If an error is a percentage, we convert to a float, then
calculate the percentage of the supplied value.
:param value: base value, e.g. 10
:param error: e.g. 20.0%
:return: the absolute error, e.g. 12 for the above case.
""" |
if isinstance(error, (str, unicode)):
try:
if "%" in error:
error_float = float(error.replace("%", ""))
error_abs = (value/100) * error_float
return error_abs
elif error == "":
error = 0.0
else:
error = float(error)
except:
pass
return error |
<SYSTEM_TASK:>
Send a message to a channel or group via Slack RTM socket, returning
<END_TASK>
<USER_TASK:>
Description:
def send_msg(self, text, channel, confirm=True):
"""
Send a message to a channel or group via Slack RTM socket, returning
the resulting message object
params:
- text(str): Message text to send
- channel(Channel): Target channel
- confirm(bool): If True, wait for a reply-to confirmation before returning.
""" |
self._send_id += 1
msg = SlackMsg(self._send_id, channel.id, text)
self.ws.send(msg.json)
self._stats['messages_sent'] += 1
if confirm:
# Wait for confirmation our message was received
for e in self.events():
if e.get('reply_to') == self._send_id:
msg.sent = True
msg.ts = e.ts
return msg
else:
return msg |
<SYSTEM_TASK:>
Extend event object with User and Channel objects
<END_TASK>
<USER_TASK:>
Description:
def _process_event(self, event):
""" Extend event object with User and Channel objects """ |
if event.get('user'):
event.user = self.lookup_user(event.get('user'))
if event.get('channel'):
event.channel = self.lookup_channel(event.get('channel'))
if self.user.id in event.mentions:
event.mentions_me = True
event.mentions = [ self.lookup_user(uid) for uid in event.mentions ]
return event |
<SYSTEM_TASK:>
Returns a Evolution Stone object containing the details about the
<END_TASK>
<USER_TASK:>
Description:
def get_evolution_stone(self, slug):
"""
Returns a Evolution Stone object containing the details about the
evolution stone.
""" |
endpoint = '/evolution-stone/' + slug
return self.make_request(self.BASE_URL + endpoint) |
<SYSTEM_TASK:>
Returns a Pokemon League object containing the details about the
<END_TASK>
<USER_TASK:>
Description:
def get_league(self, slug):
"""
Returns a Pokemon League object containing the details about the
league.
""" |
endpoint = '/league/' + slug
return self.make_request(self.BASE_URL + endpoint) |
<SYSTEM_TASK:>
Returns an array of Pokemon objects containing all the forms of the
<END_TASK>
<USER_TASK:>
Description:
def get_pokemon_by_name(self, name):
"""
Returns an array of Pokemon objects containing all the forms of the
Pokemon specified the name of the Pokemon.
""" |
endpoint = '/pokemon/' + str(name)
return self.make_request(self.BASE_URL + endpoint) |
<SYSTEM_TASK:>
Returns an array of Pokemon objects containing all the forms of the
<END_TASK>
<USER_TASK:>
Description:
def get_pokemon_by_number(self, number):
"""
Returns an array of Pokemon objects containing all the forms of the
Pokemon specified the Pokedex number.
""" |
endpoint = '/pokemon/' + str(number)
return self.make_request(self.BASE_URL + endpoint) |
<SYSTEM_TASK:>
This method provides easier access to all writers inheriting Writer class
<END_TASK>
<USER_TASK:>
Description:
def get_concrete_class(cls, class_name):
"""This method provides easier access to all writers inheriting Writer class
:param class_name: name of the parser (name of the parser class which should be used)
:type class_name: str
:return: Writer subclass specified by parser_name
:rtype: Writer subclass
:raise ValueError:
""" |
def recurrent_class_lookup(cls):
for cls in cls.__subclasses__():
if lower(cls.__name__) == lower(class_name):
return cls
elif len(cls.__subclasses__()) > 0:
r = recurrent_class_lookup(cls)
if r is not None:
return r
return None
cls = recurrent_class_lookup(cls)
if cls:
return cls
else:
raise ValueError("'class_name '%s' is invalid" % class_name) |
<SYSTEM_TASK:>
Return an iterator of tuples for slicing, in 'length' chunks.
<END_TASK>
<USER_TASK:>
Description:
def _groups_of(length, total_length):
"""
Return an iterator of tuples for slicing, in 'length' chunks.
Parameters
----------
length : int
Length of each chunk.
total_length : int
Length of the object we are slicing
Returns
-------
iterable of tuples
Values defining a slice range resulting in length 'length'.
""" |
indices = tuple(range(0, total_length, length)) + (None, )
return _pairwise(indices) |
<SYSTEM_TASK:>
Save the numeric results of each source into its corresponding target.
<END_TASK>
<USER_TASK:>
Description:
def save(sources, targets, masked=False):
"""
Save the numeric results of each source into its corresponding target.
Parameters
----------
sources: list
The list of source arrays for saving from; limited to length 1.
targets: list
The list of target arrays for saving to; limited to length 1.
masked: boolean
Uses a masked array from sources if True.
""" |
# TODO: Remove restriction
assert len(sources) == 1 and len(targets) == 1
array = sources[0]
target = targets[0]
# Request bitesize pieces of the source and assign them to the
# target.
# NB. This algorithm does not use the minimal number of chunks.
# e.g. If the second dimension could be sliced as 0:99, 99:100
# then clearly the first dimension would have to be single
# slices for the 0:99 case, but could be bigger slices for the
# 99:100 case.
# It's not yet clear if this really matters.
all_slices = _all_slices(array)
for index in np.ndindex(*[len(slices) for slices in all_slices]):
keys = tuple(slices[i] for slices, i in zip(all_slices, index))
if masked:
target[keys] = array[keys].masked_array()
else:
target[keys] = array[keys].ndarray() |
<SYSTEM_TASK:>
Count the non-masked elements of the array along the given axis.
<END_TASK>
<USER_TASK:>
Description:
def count(a, axis=None):
"""
Count the non-masked elements of the array along the given axis.
.. note:: Currently limited to operating on a single axis.
:param axis: Axis or axes along which the operation is performed.
The default (axis=None) is to perform the operation
over all the dimensions of the input array.
The axis may be negative, in which case it counts from
the last to the first axis.
If axis is a tuple of ints, the operation is performed
over multiple axes.
:type axis: None, or int, or iterable of ints.
:return: The Array representing the requested mean.
:rtype: Array
""" |
axes = _normalise_axis(axis, a)
if axes is None or len(axes) != 1:
msg = "This operation is currently limited to a single axis"
raise AxisSupportError(msg)
return _Aggregation(a, axes[0],
_CountStreamsHandler, _CountMaskedStreamsHandler,
np.dtype('i'), {}) |
<SYSTEM_TASK:>
Request the minimum of an Array over any number of axes.
<END_TASK>
<USER_TASK:>
Description:
def min(a, axis=None):
"""
Request the minimum of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
Parameters
----------
a : Array object
The object whose minimum is to be found.
axis : None, or int, or iterable of ints
Axis or axes along which the operation is performed. The default
(axis=None) is to perform the operation over all the dimensions of the
input array. The axis may be negative, in which case it counts from
the last to the first axis. If axis is a tuple of ints, the operation
is performed over multiple axes.
Returns
-------
out : Array
The Array representing the requested mean.
""" |
axes = _normalise_axis(axis, a)
assert axes is not None and len(axes) == 1
return _Aggregation(a, axes[0],
_MinStreamsHandler, _MinMaskedStreamsHandler,
a.dtype, {}) |
<SYSTEM_TASK:>
Request the maximum of an Array over any number of axes.
<END_TASK>
<USER_TASK:>
Description:
def max(a, axis=None):
"""
Request the maximum of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
Parameters
----------
a : Array object
The object whose maximum is to be found.
axis : None, or int, or iterable of ints
Axis or axes along which the operation is performed. The default
(axis=None) is to perform the operation over all the dimensions of the
input array. The axis may be negative, in which case it counts from
the last to the first axis. If axis is a tuple of ints, the operation
is performed over multiple axes.
Returns
-------
out : Array
The Array representing the requested max.
""" |
axes = _normalise_axis(axis, a)
assert axes is not None and len(axes) == 1
return _Aggregation(a, axes[0],
_MaxStreamsHandler, _MaxMaskedStreamsHandler,
a.dtype, {}) |
<SYSTEM_TASK:>
Request the sum of an Array over any number of axes.
<END_TASK>
<USER_TASK:>
Description:
def sum(a, axis=None):
"""
Request the sum of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
Parameters
----------
a : Array object
The object whose summation is to be found.
axis : None, or int, or iterable of ints
Axis or axes along which the operation is performed. The default
(axis=None) is to perform the operation over all the dimensions of the
input array. The axis may be negative, in which case it counts from
the last to the first axis. If axis is a tuple of ints, the operation
is performed over multiple axes.
Returns
-------
out : Array
The Array representing the requested sum.
""" |
axes = _normalise_axis(axis, a)
assert axes is not None and len(axes) == 1
return _Aggregation(a, axes[0],
_SumStreamsHandler, _SumMaskedStreamsHandler,
a.dtype, {}) |
<SYSTEM_TASK:>
Request the mean of an Array over any number of axes.
<END_TASK>
<USER_TASK:>
Description:
def mean(a, axis=None, mdtol=1):
"""
Request the mean of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
:param axis: Axis or axes along which the operation is performed.
The default (axis=None) is to perform the operation
over all the dimensions of the input array.
The axis may be negative, in which case it counts from
the last to the first axis.
If axis is a tuple of ints, the operation is performed
over multiple axes.
:type axis: None, or int, or iterable of ints.
:param float mdtol: Tolerance of missing data. The value in each
element of the resulting array will be masked if the
fraction of masked data contributing to that element
exceeds mdtol. mdtol=0 means no missing data is
tolerated while mdtol=1 will mean the resulting
element will be masked if and only if all the
contributing elements of the source array are masked.
Defaults to 1.
:return: The Array representing the requested mean.
:rtype: Array
""" |
axes = _normalise_axis(axis, a)
if axes is None or len(axes) != 1:
msg = "This operation is currently limited to a single axis"
raise AxisSupportError(msg)
dtype = (np.array([0], dtype=a.dtype) / 1.).dtype
kwargs = dict(mdtol=mdtol)
return _Aggregation(a, axes[0],
_MeanStreamsHandler, _MeanMaskedStreamsHandler,
dtype, kwargs) |
<SYSTEM_TASK:>
Request the standard deviation of an Array over any number of axes.
<END_TASK>
<USER_TASK:>
Description:
def std(a, axis=None, ddof=0):
"""
Request the standard deviation of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
:param axis: Axis or axes along which the operation is performed.
The default (axis=None) is to perform the operation
over all the dimensions of the input array.
The axis may be negative, in which case it counts from
the last to the first axis.
If axis is a tuple of ints, the operation is performed
over multiple axes.
:type axis: None, or int, or iterable of ints.
:param int ddof: Delta Degrees of Freedom. The divisor used in
calculations is N - ddof, where N represents the
number of elements. By default ddof is zero.
:return: The Array representing the requested standard deviation.
:rtype: Array
""" |
axes = _normalise_axis(axis, a)
if axes is None or len(axes) != 1:
msg = "This operation is currently limited to a single axis"
raise AxisSupportError(msg)
dtype = (np.array([0], dtype=a.dtype) / 1.).dtype
return _Aggregation(a, axes[0],
_StdStreamsHandler, _StdMaskedStreamsHandler,
dtype, dict(ddof=ddof)) |
<SYSTEM_TASK:>
Request the variance of an Array over any number of axes.
<END_TASK>
<USER_TASK:>
Description:
def var(a, axis=None, ddof=0):
"""
Request the variance of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
:param axis: Axis or axes along which the operation is performed.
The default (axis=None) is to perform the operation
over all the dimensions of the input array.
The axis may be negative, in which case it counts from
the last to the first axis.
If axis is a tuple of ints, the operation is performed
over multiple axes.
:type axis: None, or int, or iterable of ints.
:param int ddof: Delta Degrees of Freedom. The divisor used in
calculations is N - ddof, where N represents the
number of elements. By default ddof is zero.
:return: The Array representing the requested variance.
:rtype: Array
""" |
axes = _normalise_axis(axis, a)
if axes is None or len(axes) != 1:
msg = "This operation is currently limited to a single axis"
raise AxisSupportError(msg)
dtype = (np.array([0], dtype=a.dtype) / 1.).dtype
return _Aggregation(a, axes[0],
_VarStreamsHandler, _VarMaskedStreamsHandler,
dtype, dict(ddof=ddof)) |
<SYSTEM_TASK:>
A function to generate the top level biggus ufunc wrappers.
<END_TASK>
<USER_TASK:>
Description:
def _ufunc_wrapper(ufunc, name=None):
"""
A function to generate the top level biggus ufunc wrappers.
""" |
if not isinstance(ufunc, np.ufunc):
raise TypeError('{} is not a ufunc'.format(ufunc))
ufunc_name = ufunc.__name__
# Get hold of the masked array equivalent, if it exists.
ma_ufunc = getattr(np.ma, ufunc_name, None)
if ufunc.nin == 2 and ufunc.nout == 1:
func = _dual_input_fn_wrapper('np.{}'.format(ufunc_name), ufunc,
ma_ufunc, name)
elif ufunc.nin == 1 and ufunc.nout == 1:
func = _unary_fn_wrapper('np.{}'.format(ufunc_name), ufunc, ma_ufunc,
name)
else:
raise ValueError('Unsupported ufunc {!r} with {} input arrays & {} '
'output arrays.'.format(ufunc_name, ufunc.nin,
ufunc.nout))
return func |
<SYSTEM_TASK:>
Returns the shape that results from slicing an array of the given
<END_TASK>
<USER_TASK:>
Description:
def _sliced_shape(shape, keys):
"""
Returns the shape that results from slicing an array of the given
shape by the given keys.
>>> _sliced_shape(shape=(52350, 70, 90, 180),
... keys=(np.newaxis, slice(None, 10), 3,
... slice(None), slice(2, 3)))
(1, 10, 90, 1)
""" |
keys = _full_keys(keys, len(shape))
sliced_shape = []
shape_dim = -1
for key in keys:
shape_dim += 1
if _is_scalar(key):
continue
elif isinstance(key, slice):
size = len(range(*key.indices(shape[shape_dim])))
sliced_shape.append(size)
elif isinstance(key, np.ndarray) and key.dtype == np.dtype('bool'):
# Numpy boolean indexing.
sliced_shape.append(builtins.sum(key))
elif isinstance(key, (tuple, np.ndarray)):
sliced_shape.append(len(key))
elif key is np.newaxis:
shape_dim -= 1
sliced_shape.append(1)
else:
raise ValueError('Invalid indexing object "{}"'.format(key))
sliced_shape = tuple(sliced_shape)
return sliced_shape |
<SYSTEM_TASK:>
Return a human-readable description of the number of bytes required
<END_TASK>
<USER_TASK:>
Description:
def size(array):
"""
Return a human-readable description of the number of bytes required
to store the data of the given array.
For example::
>>> array.nbytes
14000000
>> biggus.size(array)
'13.35 MiB'
Parameters
----------
array : array-like object
The array object must provide an `nbytes` property.
Returns
-------
out : str
The Array representing the requested mean.
""" |
nbytes = array.nbytes
if nbytes < (1 << 10):
size = '{} B'.format(nbytes)
elif nbytes < (1 << 20):
size = '{:.02f} KiB'.format(nbytes / (1 << 10))
elif nbytes < (1 << 30):
size = '{:.02f} MiB'.format(nbytes / (1 << 20))
elif nbytes < (1 << 40):
size = '{:.02f} GiB'.format(nbytes / (1 << 30))
else:
size = '{:.02f} TiB'.format(nbytes / (1 << 40))
return size |
<SYSTEM_TASK:>
Dispatch the given Chunk onto all the registered output queues.
<END_TASK>
<USER_TASK:>
Description:
def output(self, chunk):
"""
Dispatch the given Chunk onto all the registered output queues.
If the chunk is None, it is silently ignored.
""" |
if chunk is not None:
for queue in self.output_queues:
queue.put(chunk) |
<SYSTEM_TASK:>
Emit the Chunk instances which cover the underlying Array.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Emit the Chunk instances which cover the underlying Array.
The Array is divided into chunks with a size limit of
MAX_CHUNK_SIZE which are emitted into all registered output
queues.
""" |
try:
chunk_index = self.chunk_index_gen(self.array.shape,
self.iteration_order)
for key in chunk_index:
# Now we have the slices that describe the next chunk.
# For example, key might be equivalent to
# `[11:12, 0:3, :, :]`.
# Simply "realise" the data for that region and emit it
# as a Chunk to all registered output queues.
if self.masked:
data = self.array[key].masked_array()
else:
data = self.array[key].ndarray()
output_chunk = Chunk(key, data)
self.output(output_chunk)
except:
self.abort()
raise
else:
for queue in self.output_queues:
queue.put(QUEUE_FINISHED) |
<SYSTEM_TASK:>
Set the given nodes as inputs for this node.
<END_TASK>
<USER_TASK:>
Description:
def add_input_nodes(self, input_nodes):
"""
Set the given nodes as inputs for this node.
Creates a limited-size queue.Queue for each input node and
registers each queue as an output of its corresponding node.
""" |
self.input_queues = [queue.Queue(maxsize=3) for _ in input_nodes]
for input_node, input_queue in zip(input_nodes, self.input_queues):
input_node.add_output_queue(input_queue) |
<SYSTEM_TASK:>
Process the input queues in lock-step, and push any results to
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Process the input queues in lock-step, and push any results to
the registered output queues.
""" |
try:
while True:
input_chunks = [input.get() for input in self.input_queues]
for input in self.input_queues:
input.task_done()
if any(chunk is QUEUE_ABORT for chunk in input_chunks):
self.abort()
return
if any(chunk is QUEUE_FINISHED for chunk in input_chunks):
break
self.output(self.process_chunks(input_chunks))
# Finalise the final chunk (process_chunks does this for all
# but the last chunk).
self.output(self.finalise())
except:
self.abort()
raise
else:
for queue in self.output_queues:
queue.put(QUEUE_FINISHED) |
<SYSTEM_TASK:>
Store the incoming chunk at the corresponding position in the
<END_TASK>
<USER_TASK:>
Description:
def process_chunks(self, chunks):
"""
Store the incoming chunk at the corresponding position in the
result array.
""" |
chunk, = chunks
if chunk.keys:
self.result[chunk.keys] = chunk.data
else:
self.result[...] = chunk.data |
<SYSTEM_TASK:>
Return a key of type int, slice, or tuple that is guaranteed
<END_TASK>
<USER_TASK:>
Description:
def _cleanup_new_key(self, key, size, axis):
"""
Return a key of type int, slice, or tuple that is guaranteed
to be valid for the given dimension size.
Raises IndexError/TypeError for invalid keys.
""" |
if _is_scalar(key):
if key >= size or key < -size:
msg = 'index {0} is out of bounds for axis {1} with' \
' size {2}'.format(key, axis, size)
raise IndexError(msg)
elif isinstance(key, slice):
pass
elif isinstance(key, np.ndarray) and key.dtype == np.dtype('bool'):
if key.size > size:
msg = 'too many boolean indices. Boolean index array ' \
'of size {0} is greater than axis {1} with ' \
'size {2}'.format(key.size, axis, size)
raise IndexError(msg)
elif isinstance(key, collections.Iterable) and \
not isinstance(key, six.string_types):
# Make sure we capture the values in case we've
# been given a one-shot iterable, like a generator.
key = tuple(key)
for sub_key in key:
if sub_key >= size or sub_key < -size:
msg = 'index {0} is out of bounds for axis {1}' \
' with size {2}'.format(sub_key, axis, size)
raise IndexError(msg)
else:
raise TypeError('invalid key {!r}'.format(key))
return key |
<SYSTEM_TASK:>
Return a key of type int, slice, or tuple that represents the
<END_TASK>
<USER_TASK:>
Description:
def _remap_new_key(self, indices, new_key, axis):
"""
Return a key of type int, slice, or tuple that represents the
combination of new_key with the given indices.
Raises IndexError/TypeError for invalid keys.
""" |
size = len(indices)
if _is_scalar(new_key):
if new_key >= size or new_key < -size:
msg = 'index {0} is out of bounds for axis {1}' \
' with size {2}'.format(new_key, axis, size)
raise IndexError(msg)
result_key = indices[new_key]
elif isinstance(new_key, slice):
result_key = indices.__getitem__(new_key)
elif isinstance(new_key, np.ndarray) and \
new_key.dtype == np.dtype('bool'):
# Numpy boolean indexing.
if new_key.size > size:
msg = 'too many boolean indices. Boolean index array ' \
'of size {0} is greater than axis {1} with ' \
'size {2}'.format(new_key.size, axis, size)
raise IndexError(msg)
result_key = tuple(np.array(indices)[new_key])
elif isinstance(new_key, collections.Iterable) and \
not isinstance(new_key, six.string_types):
# Make sure we capture the values in case we've
# been given a one-shot iterable, like a generator.
new_key = tuple(new_key)
for sub_key in new_key:
if sub_key >= size or sub_key < -size:
msg = 'index {0} is out of bounds for axis {1}' \
' with size {2}'.format(sub_key, axis, size)
raise IndexError(msg)
result_key = tuple(indices[key] for key in new_key)
else:
raise TypeError('invalid key {!r}'.format(new_key))
return result_key |
<SYSTEM_TASK:>
Apply the transposition to the target iterable.
<END_TASK>
<USER_TASK:>
Description:
def _apply_axes_mapping(self, target, inverse=False):
"""
Apply the transposition to the target iterable.
Parameters
----------
target - iterable
The iterable to transpose. This would be suitable for things
such as a shape as well as a list of ``__getitem__`` keys.
inverse - bool
Whether to map old dimension to new dimension (forward), or
new dimension to old dimension (inverse). Default is False
(forward).
Returns
-------
A tuple derived from target which has been ordered based on the new
axes.
""" |
if len(target) != self.ndim:
raise ValueError('The target iterable is of length {}, but '
'should be of length {}.'.format(len(target),
self.ndim))
if inverse:
axis_map = self._inverse_axes_map
else:
axis_map = self._forward_axes_map
result = [None] * self.ndim
for axis, item in enumerate(target):
result[axis_map[axis]] = item
return tuple(result) |
<SYSTEM_TASK:>
Given input chunk keys, compute what keys will be needed to put
<END_TASK>
<USER_TASK:>
Description:
def output_keys(self, source_keys):
"""
Given input chunk keys, compute what keys will be needed to put
the result into the result array.
As an example of where this gets used - when we aggregate on a
particular axis, the source keys may be ``(0:2, None:None)``, but for
an aggregation on axis 0, they would result in target values on
dimension 2 only and so be ``(None: None, )``.
""" |
keys = list(source_keys)
# Remove the aggregated axis from the keys.
del keys[self.axis]
return tuple(keys) |
<SYSTEM_TASK:>
Saves the lens currently loaded in the server to a Zemax file
<END_TASK>
<USER_TASK:>
Description:
def zSaveFile(self, fileName):
"""Saves the lens currently loaded in the server to a Zemax file """ |
cmd = "SaveFile,{}".format(fileName)
reply = self._sendDDEcommand(cmd)
return int(float(reply.rstrip())) |
<SYSTEM_TASK:>
Saves the current system to the specified file.
<END_TASK>
<USER_TASK:>
Description:
def SaveAs(self, filename):
"""Saves the current system to the specified file.
@param filename: absolute path (string)
@return: None
@raise: ValueError if path (excluding the zemax file name) is not valid
All future calls to `Save()` will use the same file.
""" |
directory, zfile = _os.path.split(filename)
if zfile.startswith('pyzos_ui_sync_file'):
self._iopticalsystem.SaveAs(filename)
else: # regular file
if not _os.path.exists(directory):
raise ValueError('{} is not valid.'.format(directory))
else:
self._file_to_save_on_Save = filename # store to use in Save()
self._iopticalsystem.SaveAs(filename) |
<SYSTEM_TASK:>
Saves the current system
<END_TASK>
<USER_TASK:>
Description:
def Save(self):
"""Saves the current system""" |
# This method is intercepted to allow ui_sync
if self._file_to_save_on_Save:
self._iopticalsystem.SaveAs(self._file_to_save_on_Save)
else:
self._iopticalsystem.Save() |
<SYSTEM_TASK:>
Sets the default merit function for Sequential Merit Function Editor
<END_TASK>
<USER_TASK:>
Description:
def zSetDefaultMeritFunctionSEQ(self, ofType=0, ofData=0, ofRef=0, pupilInteg=0, rings=0,
arms=0, obscuration=0, grid=0, delVignetted=False, useGlass=False,
glassMin=0, glassMax=1000, glassEdge=0, useAir=False, airMin=0,
airMax=1000, airEdge=0, axialSymm=True, ignoreLatCol=False,
addFavOper=False, startAt=1, relativeXWgt=1.0, overallWgt=1.0,
configNum=0):
"""Sets the default merit function for Sequential Merit Function Editor
Parameters
----------
ofType : integer
optimization function type (0=RMS, ...)
ofData : integer
optimization function data (0=Wavefront, 1=Spot Radius, ...)
ofRef : integer
optimization function reference (0=Centroid, ...)
pupilInteg : integer
pupil integration method (0=Gaussian Quadrature, 1=Rectangular Array)
rings : integer
rings (0=1, 1=2, 2=3, 3=4, ...)
arms : integer
arms (0=6, 1=8, 2=10, 3=12)
obscuration : real
obscuration
delVignetted : boolean
delete vignetted ?
useGlass : boolean
whether to use Glass settings for thickness boundary
glassMin : real
glass mininum thickness
glassMax : real
glass maximum thickness
glassEdge : real
glass edge thickness
useAir : boolean
whether to use Air settings for thickness boundary
airMin : real
air minimum thickness
airMax : real
air maximum thickness
airEdge : real
air edge thickness
axialSymm : boolean
assume axial symmetry
ignoreLatCol : boolean
ignore latent color
addFavOper : boolean
add favorite color
configNum : integer
configuration number (0=All)
startAt : integer
start at
relativeXWgt : real
relative X weight
overallWgt : real
overall weight
""" |
mfe = self.pMFE
wizard = mfe.pSEQOptimizationWizard
wizard.pType = ofType
wizard.pData = ofData
wizard.pReference = ofRef
wizard.pPupilIntegrationMethod = pupilInteg
wizard.pRing = rings
wizard.pArm = arms
wizard.pObscuration = obscuration
wizard.pGrid = grid
wizard.pIsDeleteVignetteUsed = delVignetted
wizard.pIsGlassUsed = useGlass
wizard.pGlassMin = glassMin
wizard.pGlassMax = glassMax
wizard.pGlassEdge = glassEdge
wizard.pIsAirUsed = useAir
wizard.pAirMin = airMin
wizard.pAirMax = airMax
wizard.pAirEdge = airEdge
wizard.pIsAssumeAxialSymmetryUsed = axialSymm
wizard.pIsIgnoreLateralColorUsed = ignoreLatCol
wizard.pConfiguration = configNum
wizard.pIsAddFavoriteOperandsUsed = addFavOper
wizard.pStartAt = startAt
wizard.pRelativeXWeight = relativeXWgt
wizard.pOverallWeight = overallWgt
wizard.CommonSettings.OK() |
<SYSTEM_TASK:>
Process the error labels of a dependent variable 'value' to ensure uniqueness.
<END_TASK>
<USER_TASK:>
Description:
def process_error_labels(value):
""" Process the error labels of a dependent variable 'value' to ensure uniqueness. """ |
observed_error_labels = {}
for error in value.get('errors', []):
label = error.get('label', 'error')
if label not in observed_error_labels:
observed_error_labels[label] = 0
observed_error_labels[label] += 1
if observed_error_labels[label] > 1:
error['label'] = label + '_' + str(observed_error_labels[label])
# append "_1" to first error label that has a duplicate
if observed_error_labels[label] == 2:
for error1 in value.get('errors', []):
error1_label = error1.get('label', 'error')
if error1_label == label:
error1['label'] = label + "_1"
break |
<SYSTEM_TASK:>
Returns a raw string representation of text
<END_TASK>
<USER_TASK:>
Description:
def raw(text):
"""Returns a raw string representation of text""" |
new_string = ''
for char in text:
try:
new_string += escape_dict[char]
except KeyError:
new_string += char
return new_string |
<SYSTEM_TASK:>
Run the main windows message loop.
<END_TASK>
<USER_TASK:>
Description:
def WinMSGLoop():
"""Run the main windows message loop.""" |
LPMSG = POINTER(MSG)
LRESULT = c_ulong
GetMessage = get_winfunc("user32", "GetMessageW", BOOL, (LPMSG, HWND, UINT, UINT))
TranslateMessage = get_winfunc("user32", "TranslateMessage", BOOL, (LPMSG,))
# restype = LRESULT
DispatchMessage = get_winfunc("user32", "DispatchMessageW", LRESULT, (LPMSG,))
msg = MSG()
lpmsg = byref(msg)
while GetMessage(lpmsg, HWND(), 0, 0) > 0:
TranslateMessage(lpmsg)
DispatchMessage(lpmsg) |
<SYSTEM_TASK:>
Request DDE client
<END_TASK>
<USER_TASK:>
Description:
def Request(self, item, timeout=None):
"""Request DDE client
timeout in seconds
Note ... handle the exception within this function.
""" |
if not timeout:
timeout = self.ddetimeout
try:
reply = self.ddec.request(item, int(timeout*1000)) # convert timeout into milliseconds
except DDEError:
err_str = str(sys.exc_info()[1])
error = err_str[err_str.find('err=')+4:err_str.find('err=')+10]
if error == hex(DMLERR_DATAACKTIMEOUT):
print("TIMEOUT REACHED. Please use a higher timeout.\n")
if (sys.version_info > (3, 0)): #this is only evaluated in case of an error
reply = b'-998' #Timeout error value
else:
reply = '-998' #Timeout error value
return reply |
<SYSTEM_TASK:>
Execute a DDE command.
<END_TASK>
<USER_TASK:>
Description:
def execute(self, command):
"""Execute a DDE command.""" |
pData = c_char_p(command)
cbData = DWORD(len(command) + 1)
hDdeData = DDE.ClientTransaction(pData, cbData, self._hConv, HSZ(), CF_TEXT, XTYP_EXECUTE, TIMEOUT_ASYNC, LPDWORD())
if not hDdeData:
raise DDEError("Unable to send command", self._idInst)
DDE.FreeDataHandle(hDdeData) |
<SYSTEM_TASK:>
Get a limited set of comments for a given object.
<END_TASK>
<USER_TASK:>
Description:
def get_molo_comments(parser, token):
"""
Get a limited set of comments for a given object.
Defaults to a limit of 5. Setting the limit to -1 disables limiting.
Set the amount of comments to
usage:
{% get_molo_comments for object as variable_name %}
{% get_molo_comments for object as variable_name limit amount %}
{% get_molo_comments for object as variable_name limit amount child_limit amount %} # noqa
""" |
keywords = token.contents.split()
if len(keywords) != 5 and len(keywords) != 7 and len(keywords) != 9:
raise template.TemplateSyntaxError(
"'%s' tag takes exactly 2,4 or 6 arguments" % (keywords[0],))
if keywords[1] != 'for':
raise template.TemplateSyntaxError(
"first argument to '%s' tag must be 'for'" % (keywords[0],))
if keywords[3] != 'as':
raise template.TemplateSyntaxError(
"first argument to '%s' tag must be 'as'" % (keywords[0],))
if len(keywords) > 5 and keywords[5] != 'limit':
raise template.TemplateSyntaxError(
"third argument to '%s' tag must be 'limit'" % (keywords[0],))
if len(keywords) == 7:
return GetMoloCommentsNode(keywords[2], keywords[4], keywords[6])
if len(keywords) > 7 and keywords[7] != 'child_limit':
raise template.TemplateSyntaxError(
"third argument to '%s' tag must be 'child_limit'"
% (keywords[0],))
if len(keywords) > 7:
return GetMoloCommentsNode(keywords[2], keywords[4],
keywords[6], keywords[8])
return GetMoloCommentsNode(keywords[2], keywords[4]) |
<SYSTEM_TASK:>
Get a limited set of comments for a given object.
<END_TASK>
<USER_TASK:>
Description:
def get_comments_content_object(parser, token):
"""
Get a limited set of comments for a given object.
Defaults to a limit of 5. Setting the limit to -1 disables limiting.
usage:
{% get_comments_content_object for form_object as variable_name %}
""" |
keywords = token.contents.split()
if len(keywords) != 5:
raise template.TemplateSyntaxError(
"'%s' tag takes exactly 2 arguments" % (keywords[0],))
if keywords[1] != 'for':
raise template.TemplateSyntaxError(
"first argument to '%s' tag must be 'for'" % (keywords[0],))
if keywords[3] != 'as':
raise template.TemplateSyntaxError(
"first argument to '%s' tag must be 'as'" % (keywords[0],))
return GetCommentsContentObject(keywords[2], keywords[4]) |
<SYSTEM_TASK:>
Flags a comment on GET.
<END_TASK>
<USER_TASK:>
Description:
def report(request, comment_id):
"""
Flags a comment on GET.
Redirects to whatever is provided in request.REQUEST['next'].
""" |
comment = get_object_or_404(
django_comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
if comment.parent is not None:
messages.info(request, _('Reporting comment replies is not allowed.'))
else:
perform_flag(request, comment)
messages.info(request, _('The comment has been reported.'))
next = request.GET.get('next') or comment.get_absolute_url()
return HttpResponseRedirect(next) |
<SYSTEM_TASK:>
Allows for posting of a Molo Comment, this allows comments to
<END_TASK>
<USER_TASK:>
Description:
def post_molo_comment(request, next=None, using=None):
"""
Allows for posting of a Molo Comment, this allows comments to
be set with the "user_name" as "Anonymous"
""" |
data = request.POST.copy()
if 'submit_anonymously' in data:
data['name'] = 'Anonymous'
# replace with our changed POST data
# ensure we always set an email
data['email'] = request.user.email or '[email protected]'
request.POST = data
return post_comment(request, next=next, using=next) |
<SYSTEM_TASK:>
Convert a 2D feature to a 3D feature by sampling a raster
<END_TASK>
<USER_TASK:>
Description:
def drape(raster, feature):
"""Convert a 2D feature to a 3D feature by sampling a raster
Parameters:
raster (rasterio): raster to provide the z coordinate
feature (dict): fiona feature record to convert
Returns:
result (Point or Linestring): shapely Point or LineString of xyz coordinate triples
""" |
coords = feature['geometry']['coordinates']
geom_type = feature['geometry']['type']
if geom_type == 'Point':
xyz = sample(raster, [coords])
result = Point(xyz[0])
elif geom_type == 'LineString':
xyz = sample(raster, coords)
points = [Point(x, y, z) for x, y, z in xyz]
result = LineString(points)
else:
logging.error('drape not implemented for {}'.format(geom_type))
return result |
<SYSTEM_TASK:>
Sample a raster at given coordinates
<END_TASK>
<USER_TASK:>
Description:
def sample(raster, coords):
"""Sample a raster at given coordinates
Given a list of coordinates, return a list of x,y,z triples with z coordinates sampled from an input raster
Parameters:
raster (rasterio): raster dataset to sample
coords: array of tuples containing coordinate pairs (x,y) or triples (x,y,z)
Returns:
result: array of tuples containing coordinate triples (x,y,z)
""" |
if len(coords[0]) == 3:
logging.info('Input is a 3D geometry, z coordinate will be updated.')
z = raster.sample([(x, y) for x, y, z in coords], indexes=raster.indexes)
else:
z = raster.sample(coords, indexes=raster.indexes)
result = [(vert[0], vert[1], vert_z) for vert, vert_z in zip(coords, z)]
return result |
<SYSTEM_TASK:>
Converts 2D geometries to 3D using GEOS sample through fiona.
<END_TASK>
<USER_TASK:>
Description:
def cli(source_f, raster_f, output, verbose):
"""
Converts 2D geometries to 3D using GEOS sample through fiona.
\b
Example:
drape point.shp elevation.tif -o point_z.shp
""" |
with fiona.open(source_f, 'r') as source:
source_driver = source.driver
source_crs = source.crs
sink_schema = source.schema.copy()
source_geom = source.schema['geometry']
if source_geom == 'Point':
sink_schema['geometry'] = '3D Point'
elif source_geom == 'LineString':
sink_schema['geometry'] = '3D LineString'
elif source_geom == '3D Point' or source_geom == '3D LineString':
pass
else:
click.BadParameter("Source geometry type {} not implemented".format(source_geom))
with rasterio.open(raster_f) as raster:
if source_crs != raster.crs:
click.BadParameter("Features and raster have different CRS.")
if raster.count > 1:
warnings.warn("Found {0} bands in {1}, expected a single band raster".format(raster.bands, raster_f))
supported = ['int16', 'int32', 'float32', 'float64']
if raster.dtypes[0] not in supported:
warnings.warn("Found {0} type in {1}, expected one of {2}".format(raster.dtypes[0]), raster_f, supported)
with fiona.open(
output, 'w',
driver=source_driver,
crs=source_crs,
schema=sink_schema) as sink:
for feature in source:
try:
feature_z = drapery.drape(raster, feature)
sink.write({
'geometry': mapping(feature_z),
'properties': feature['properties'],
})
except Exception:
logging.exception("Error processing feature %s:", feature['id']) |
<SYSTEM_TASK:>
Launch a Python exception from an error that took place in the browser.
<END_TASK>
<USER_TASK:>
Description:
def launch_exception(message):
"""
Launch a Python exception from an error that took place in the browser.
messsage format:
- name: str
- description: str
""" |
error_name = message['name']
error_descr = message['description']
mapping = {
'ReferenceError': NameError,
}
if message['name'] in mapping:
raise mapping[error_name](error_descr)
else:
raise Exception('{}: {}'.format(error_name, error_descr)) |
<SYSTEM_TASK:>
Undoes the work of flatten_dict
<END_TASK>
<USER_TASK:>
Description:
def unflatten_dct(obj):
"""
Undoes the work of flatten_dict
@param {Object} obj 1-D object in the form returned by flattenObj
@returns {Object} The original
:param obj:
:return:
""" |
def reduce_func(accum, key_string_and_value):
key_string = key_string_and_value[0]
value = key_string_and_value[1]
item_key_path = key_string_to_lens_path(key_string)
# All but the last segment gives us the item container len
container_key_path = init(item_key_path)
container = unless(
# If the path has any length (not []) and the value is set, don't do anything
both(always(length(container_key_path)), fake_lens_path_view(container_key_path)),
# Else we are at the top level, so use the existing accum or create a [] or {}
# depending on if our item key is a number or not
lambda x: default_to(
if_else(
lambda segment: segment.isnumeric(),
always([]),
always({})
)(head(item_key_path))
)(x)
)(accum)
# Finally set the container at the itemLensPath
return fake_lens_path_set(
item_key_path,
value,
container
)
return compose(
reduce(
reduce_func,
# null initial value
None
),
to_pairs
)(obj) |
<SYSTEM_TASK:>
Override change view to add extra context enabling moderate tool.
<END_TASK>
<USER_TASK:>
Description:
def change_view(self, request, object_id, form_url='', extra_context=None):
"""
Override change view to add extra context enabling moderate tool.
""" |
context = {
'has_moderate_tool': True
}
if extra_context:
context.update(extra_context)
return super(AdminModeratorMixin, self).change_view(
request=request,
object_id=object_id,
form_url=form_url,
extra_context=context
) |
<SYSTEM_TASK:>
Add aditional moderate url.
<END_TASK>
<USER_TASK:>
Description:
def get_urls(self):
"""
Add aditional moderate url.
""" |
from django.conf.urls import url
urls = super(AdminModeratorMixin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.model_name
return [
url(r'^(.+)/moderate/$',
self.admin_site.admin_view(self.moderate_view),
name='%s_%s_moderate' % info),
] + urls |
<SYSTEM_TASK:>
Return a string identifying the operating system the application
<END_TASK>
<USER_TASK:>
Description:
def operating_system():
"""Return a string identifying the operating system the application
is running on.
:rtype: str
""" |
if platform.system() == 'Darwin':
return 'OS X Version %s' % platform.mac_ver()[0]
distribution = ' '.join(platform.linux_distribution()).strip()
os_platform = platform.platform(True, True)
if distribution:
os_platform += ' (%s)' % distribution
return os_platform |
<SYSTEM_TASK:>
Daemonize if the process is not already running.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Daemonize if the process is not already running.""" |
if self._is_already_running():
LOGGER.error('Is already running')
sys.exit(1)
try:
self._daemonize()
self.controller.start()
except Exception as error:
sys.stderr.write('\nERROR: Startup of %s Failed\n.' %
sys.argv[0].split('/')[-1])
exception_log = self._get_exception_log_path()
if exception_log:
with open(exception_log, 'a') as handle:
timestamp = datetime.datetime.now().isoformat()
handle.write('{:->80}\n'.format(' [START]'))
handle.write('%s Exception [%s]\n' % (sys.argv[0],
timestamp))
handle.write('{:->80}\n'.format(' [INFO]'))
handle.write('Interpreter: %s\n' % sys.executable)
handle.write('CLI arguments: %s\n' % ' '.join(sys.argv))
handle.write('Exception: %s\n' % error)
handle.write('Traceback:\n')
output = traceback.format_exception(*sys.exc_info())
_dev_null = [(handle.write(line),
sys.stdout.write(line)) for line in output]
handle.write('{:->80}\n'.format(' [END]'))
handle.flush()
sys.stderr.write('\nException log: %s\n\n' % exception_log)
sys.exit(1) |
<SYSTEM_TASK:>
Return the group id that the daemon will run with
<END_TASK>
<USER_TASK:>
Description:
def gid(self):
"""Return the group id that the daemon will run with
:rtype: int
""" |
if not self._gid:
if self.controller.config.daemon.group:
self._gid = grp.getgrnam(self.config.daemon.group).gr_gid
else:
self._gid = os.getgid()
return self._gid |
<SYSTEM_TASK:>
Return the user id that the process will run as
<END_TASK>
<USER_TASK:>
Description:
def uid(self):
"""Return the user id that the process will run as
:rtype: int
""" |
if not self._uid:
if self.config.daemon.user:
self._uid = pwd.getpwnam(self.config.daemon.user).pw_uid
else:
self._uid = os.getuid()
return self._uid |
<SYSTEM_TASK:>
Return the normalized path for the connection log, raising an
<END_TASK>
<USER_TASK:>
Description:
def _get_exception_log_path():
"""Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str
""" |
app = sys.argv[0].split('/')[-1]
for exception_log in ['/var/log/%s.errors' % app,
'/var/tmp/%s.errors' % app,
'/tmp/%s.errors' % app]:
if os.access(path.dirname(exception_log), os.W_OK):
return exception_log
return None |
<SYSTEM_TASK:>
Return the normalized path for the pidfile, raising an
<END_TASK>
<USER_TASK:>
Description:
def _get_pidfile_path(self):
"""Return the normalized path for the pidfile, raising an
exception if it can not written to.
:return: str
:raises: ValueError
:raises: OSError
""" |
if self.config.daemon.pidfile:
pidfile = path.abspath(self.config.daemon.pidfile)
if not os.access(path.dirname(pidfile), os.W_OK):
raise ValueError('Cannot write to specified pid file path'
' %s' % pidfile)
return pidfile
app = sys.argv[0].split('/')[-1]
for pidfile in ['%s/pids/%s.pid' % (os.getcwd(), app),
'/var/run/%s.pid' % app,
'/var/run/%s/%s.pid' % (app, app),
'/var/tmp/%s.pid' % app,
'/tmp/%s.pid' % app,
'%s.pid' % app]:
if os.access(path.dirname(pidfile), os.W_OK):
return pidfile
raise OSError('Could not find an appropriate place for a pid file') |
<SYSTEM_TASK:>
Check to see if the process is running, first looking for a pidfile,
<END_TASK>
<USER_TASK:>
Description:
def _is_already_running(self):
"""Check to see if the process is running, first looking for a pidfile,
then shelling out in either case, removing a pidfile if it exists but
the process is not running.
""" |
# Look for the pidfile, if exists determine if the process is alive
pidfile = self._get_pidfile_path()
if os.path.exists(pidfile):
pid = open(pidfile).read().strip()
try:
os.kill(int(pid), 0)
sys.stderr.write('Process already running as pid # %s\n' % pid)
return True
except OSError as error:
LOGGER.debug('Found pidfile, no process # %s', error)
os.unlink(pidfile)
# Check the os for a process that is not this one that looks the same
pattern = ' '.join(sys.argv)
pattern = '[%s]%s' % (pattern[0], pattern[1:])
try:
output = subprocess.check_output('ps a | grep "%s"' % pattern,
shell=True)
except AttributeError:
# Python 2.6
stdin, stdout, stderr = os.popen3('ps a | grep "%s"' % pattern)
output = stdout.read()
except subprocess.CalledProcessError:
return False
pids = [int(pid) for pid in (re.findall(r'^([0-9]+)\s',
output.decode('latin-1')))]
if os.getpid() in pids:
pids.remove(os.getpid())
if not pids:
return False
if len(pids) == 1:
pids = pids[0]
sys.stderr.write('Process already running as pid # %s\n' % pids)
return True |
<SYSTEM_TASK:>
Remove the pid file from the filesystem
<END_TASK>
<USER_TASK:>
Description:
def _remove_pidfile(self):
"""Remove the pid file from the filesystem""" |
LOGGER.debug('Removing pidfile: %s', self.pidfile_path)
try:
os.unlink(self.pidfile_path)
except OSError:
pass |
<SYSTEM_TASK:>
Write the pid file out with the process number in the pid file
<END_TASK>
<USER_TASK:>
Description:
def _write_pidfile(self):
"""Write the pid file out with the process number in the pid file""" |
LOGGER.debug('Writing pidfile: %s', self.pidfile_path)
with open(self.pidfile_path, "w") as handle:
handle.write(str(os.getpid())) |
<SYSTEM_TASK:>
Convert a string from snake case to camel case. For example, "some_var" would become "someVar".
<END_TASK>
<USER_TASK:>
Description:
def to_camel_case(snake_case_string):
"""
Convert a string from snake case to camel case. For example, "some_var" would become "someVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
""" |
parts = snake_case_string.lstrip('_').split('_')
return parts[0] + ''.join([i.title() for i in parts[1:]]) |
<SYSTEM_TASK:>
Convert a string from snake case to camel case with the first letter capitalized. For example, "some_var"
<END_TASK>
<USER_TASK:>
Description:
def to_capitalized_camel_case(snake_case_string):
"""
Convert a string from snake case to camel case with the first letter capitalized. For example, "some_var"
would become "SomeVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
""" |
parts = snake_case_string.split('_')
return ''.join([i.title() for i in parts]) |
<SYSTEM_TASK:>
Convert a string from camel case to snake case. From example, "someVar" would become "some_var".
<END_TASK>
<USER_TASK:>
Description:
def to_snake_case(camel_case_string):
"""
Convert a string from camel case to snake case. From example, "someVar" would become "some_var".
:param camel_case_string: Camel-cased string to convert to snake case.
:return: Snake-cased version of camel_case_string.
""" |
first_pass = _first_camel_case_regex.sub(r'\1_\2', camel_case_string)
return _second_camel_case_regex.sub(r'\1_\2', first_pass).lower() |
<SYSTEM_TASK:>
Make a copy of a dictionary with all keys converted to snake case. This is just calls to_snake_case on
<END_TASK>
<USER_TASK:>
Description:
def keys_to_snake_case(camel_case_dict):
"""
Make a copy of a dictionary with all keys converted to snake case. This is just calls to_snake_case on
each of the keys in the dictionary and returns a new dictionary.
:param camel_case_dict: Dictionary with the keys to convert.
:type camel_case_dict: Dictionary.
:return: Dictionary with the keys converted to snake case.
""" |
return dict((to_snake_case(key), value) for (key, value) in camel_case_dict.items()) |
<SYSTEM_TASK:>
List the deployed lambda functions and print configuration.
<END_TASK>
<USER_TASK:>
Description:
def list_functions(awsclient):
"""List the deployed lambda functions and print configuration.
:return: exit_code
""" |
client_lambda = awsclient.get_client('lambda')
response = client_lambda.list_functions()
for function in response['Functions']:
log.info(function['FunctionName'])
log.info('\t' 'Memory: ' + str(function['MemorySize']))
log.info('\t' 'Timeout: ' + str(function['Timeout']))
log.info('\t' 'Role: ' + str(function['Role']))
log.info('\t' 'Current Version: ' + str(function['Version']))
log.info('\t' 'Last Modified: ' + str(function['LastModified']))
log.info('\t' 'CodeSha256: ' + str(function['CodeSha256']))
log.info('\n')
return 0 |
<SYSTEM_TASK:>
Create or update a lambda function.
<END_TASK>
<USER_TASK:>
Description:
def deploy_lambda(awsclient, function_name, role, handler_filename,
handler_function,
folders, description, timeout, memory, subnet_ids=None,
security_groups=None, artifact_bucket=None,
zipfile=None,
fail_deployment_on_unsuccessful_ping=False,
runtime='python2.7', settings=None, environment=None,
retention_in_days=None
):
"""Create or update a lambda function.
:param awsclient:
:param function_name:
:param role:
:param handler_filename:
:param handler_function:
:param folders:
:param description:
:param timeout:
:param memory:
:param subnet_ids:
:param security_groups:
:param artifact_bucket:
:param zipfile:
:param environment: environment variables
:param retention_in_days: retention time of the cloudwatch logs
:return: exit_code
""" |
# TODO: the signature of this function is too big, clean this up
# also consolidate create, update, config and add waiters!
if lambda_exists(awsclient, function_name):
function_version = _update_lambda(awsclient, function_name,
handler_filename,
handler_function, folders, role,
description, timeout, memory,
subnet_ids, security_groups,
artifact_bucket=artifact_bucket,
zipfile=zipfile,
environment=environment
)
else:
if not zipfile:
return 1
log.info('buffer size: %0.2f MB' % float(len(zipfile) / 1000000.0))
function_version = _create_lambda(awsclient, function_name, role,
handler_filename, handler_function,
folders, description, timeout,
memory, subnet_ids, security_groups,
artifact_bucket, zipfile,
runtime=runtime,
environment=environment)
# configure cloudwatch logs
if retention_in_days:
log_group_name = '/aws/lambda/%s' % function_name
put_retention_policy(awsclient, log_group_name, retention_in_days)
pong = ping(awsclient, function_name, version=function_version)
if 'alive' in str(pong):
log.info(colored.green('Great you\'re already accepting a ping ' +
'in your Lambda function'))
elif fail_deployment_on_unsuccessful_ping and not 'alive' in pong:
log.info(colored.red('Pinging your lambda function failed'))
# we do not deploy alias and fail command
return 1
else:
log.info(colored.red('Please consider adding a reaction to a ' +
'ping event to your lambda function'))
_deploy_alias(awsclient, function_name, function_version)
return 0 |
<SYSTEM_TASK:>
Print out cloudformation metrics for a lambda function.
<END_TASK>
<USER_TASK:>
Description:
def get_metrics(awsclient, name):
"""Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
""" |
metrics = ['Duration', 'Errors', 'Invocations', 'Throttles']
client_cw = awsclient.get_client('cloudwatch')
for metric in metrics:
response = client_cw.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName=metric,
Dimensions=[
{
'Name': 'FunctionName',
'Value': name
},
],
# StartTime=datetime.now() + timedelta(days=-1),
# EndTime=datetime.now(),
StartTime=maya.now().subtract(days=1).datetime(),
EndTime=maya.now().datetime(),
Period=3600,
Statistics=[
'Sum',
],
Unit=unit(metric)
)
log.info('\t%s %s' % (metric,
repr(aggregate_datapoints(response['Datapoints']))))
return 0 |
<SYSTEM_TASK:>
Helper to stop ec2 instances.
<END_TASK>
<USER_TASK:>
Description:
def _stop_ec2_instances(awsclient, ec2_instances, wait=True):
"""Helper to stop ec2 instances.
By default it waits for instances to stop.
:param awsclient:
:param ec2_instances:
:param wait: waits for instances to stop
:return:
""" |
if len(ec2_instances) == 0:
return
client_ec2 = awsclient.get_client('ec2')
# get running instances
running_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['pending', 'running']
}]
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if running_instances:
log.info('Stopping EC2 instances: %s', running_instances)
client_ec2.stop_instances(InstanceIds=running_instances)
if wait:
# wait for instances to stop
waiter_inst_stopped = client_ec2.get_waiter('instance_stopped')
waiter_inst_stopped.wait(InstanceIds=running_instances) |
<SYSTEM_TASK:>
Helper to start ec2 instances
<END_TASK>
<USER_TASK:>
Description:
def _start_ec2_instances(awsclient, ec2_instances, wait=True):
"""Helper to start ec2 instances
:param awsclient:
:param ec2_instances:
:param wait: waits for instances to start
:return:
""" |
if len(ec2_instances) == 0:
return
client_ec2 = awsclient.get_client('ec2')
# get stopped instances
stopped_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['stopping', 'stopped']
}],
'IncludeAllInstances': True
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if stopped_instances:
# start all stopped instances
log.info('Starting EC2 instances: %s', stopped_instances)
client_ec2.start_instances(InstanceIds=stopped_instances)
if wait:
# wait for instances to come up
waiter_inst_running = client_ec2.get_waiter('instance_running')
waiter_inst_running.wait(InstanceIds=stopped_instances)
# wait for status checks
waiter_status_ok = client_ec2.get_waiter('instance_status_ok')
waiter_status_ok.wait(InstanceIds=stopped_instances) |
<SYSTEM_TASK:>
helper to select dbinstances.
<END_TASK>
<USER_TASK:>
Description:
def _filter_db_instances_by_status(awsclient, db_instances, status_list):
"""helper to select dbinstances.
:param awsclient:
:param db_instances:
:param status_list:
:return: list of db_instances that match the filter
""" |
client_rds = awsclient.get_client('rds')
db_instances_with_status = []
for db in db_instances:
response = client_rds.describe_db_instances(
DBInstanceIdentifier=db
)
for entry in response.get('DBInstances', []):
if entry['DBInstanceStatus'] in status_list:
db_instances_with_status.append(db)
return db_instances_with_status |
<SYSTEM_TASK:>
Stop an existing stack on AWS cloud.
<END_TASK>
<USER_TASK:>
Description:
def stop_stack(awsclient, stack_name, use_suspend=False):
"""Stop an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
""" |
exit_code = 0
# check for DisableStop
#disable_stop = conf.get('deployment', {}).get('DisableStop', False)
#if disable_stop:
# log.warn('\'DisableStop\' is set - nothing to do!')
#else:
if not stack_exists(awsclient, stack_name):
log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name)
else:
client_cfn = awsclient.get_client('cloudformation')
client_autoscaling = awsclient.get_client('autoscaling')
client_rds = awsclient.get_client('rds')
client_ec2 = awsclient.get_client('ec2')
resources = all_pages(
client_cfn.list_stack_resources,
{ 'StackName': stack_name },
lambda r: r['StackResourceSummaries']
)
autoscaling_groups = [
r for r in resources
if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup'
]
# lookup all types of scaling processes
# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance
# AlarmNotification, ScheduledActions, AddToLoadBalancer]
response = client_autoscaling.describe_scaling_process_types()
scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])]
for asg in autoscaling_groups:
# find instances in autoscaling group
ec2_instances = all_pages(
client_autoscaling.describe_auto_scaling_instances,
{},
lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', [])
if i['AutoScalingGroupName'] == asg['PhysicalResourceId']],
)
if use_suspend:
# alternative implementation to speed up start
# only problem is that instances must survive stop & start
# suspend all autoscaling processes
log.info('Suspending all autoscaling processes for \'%s\'',
asg['LogicalResourceId'])
response = client_autoscaling.suspend_processes(
AutoScalingGroupName=asg['PhysicalResourceId'],
ScalingProcesses=scaling_process_types
)
_stop_ec2_instances(awsclient, ec2_instances)
else:
# resize autoscaling group (min, max = 0)
log.info('Resize autoscaling group \'%s\' to minSize=0, maxSize=0',
asg['LogicalResourceId'])
response = client_autoscaling.update_auto_scaling_group(
AutoScalingGroupName=asg['PhysicalResourceId'],
MinSize=0,
MaxSize=0
)
if ec2_instances:
running_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['pending', 'running']
}]
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if running_instances:
# wait for instances to terminate
waiter_inst_terminated = client_ec2.get_waiter('instance_terminated')
waiter_inst_terminated.wait(InstanceIds=running_instances)
# setting ECS desiredCount to zero
services = [
r for r in resources
if r['ResourceType'] == 'AWS::ECS::Service'
]
if services:
template, parameters = _get_template_parameters(awsclient, stack_name)
_stop_ecs_services(awsclient, services, template, parameters)
# stopping ec2 instances
instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::EC2::Instance'
]
_stop_ec2_instances(awsclient, instances)
# stopping db instances
db_instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::RDS::DBInstance'
]
running_db_instances = _filter_db_instances_by_status(
awsclient, db_instances, ['available']
)
for db in running_db_instances:
log.info('Stopping RDS instance \'%s\'', db)
client_rds.stop_db_instance(DBInstanceIdentifier=db)
return exit_code |
<SYSTEM_TASK:>
Helper to extract the configured MinSize, MaxSize attributes from the
<END_TASK>
<USER_TASK:>
Description:
def _get_autoscaling_min_max(template, parameters, asg_name):
"""Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
""" |
params = {e['ParameterKey']: e['ParameterValue'] for e in parameters}
asg = template.get('Resources', {}).get(asg_name, None)
if asg:
assert asg['Type'] == 'AWS::AutoScaling::AutoScalingGroup'
min = asg.get('Properties', {}).get('MinSize', None)
max = asg.get('Properties', {}).get('MaxSize', None)
if 'Ref' in min:
min = params.get(min['Ref'], None)
if 'Ref' in max:
max = params.get(max['Ref'], None)
if min and max:
return int(min), int(max) |
<SYSTEM_TASK:>
Helper to extract the configured desiredCount attribute from the
<END_TASK>
<USER_TASK:>
Description:
def _get_service_cluster_desired_count(template, parameters, service_name):
"""Helper to extract the configured desiredCount attribute from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param service_name: logical resource name of the ECS service
:return: cluster, desiredCount
""" |
params = {e['ParameterKey']: e['ParameterValue'] for e in parameters}
service = template.get('Resources', {}).get(service_name, None)
if service:
assert service['Type'] == 'AWS::ECS::Service'
cluster = service.get('Properties', {}).get('Cluster', None)
desired_count = service.get('Properties', {}).get('DesiredCount', None)
if 'Ref' in cluster:
cluster = params.get(cluster['Ref'], None)
if not isinstance(desired_count, int) and 'Ref' in desired_count:
desired_count = params.get(desired_count['Ref'], None)
return cluster, int(desired_count) |
<SYSTEM_TASK:>
Start an existing stack on AWS cloud.
<END_TASK>
<USER_TASK:>
Description:
def start_stack(awsclient, stack_name, use_suspend=False):
"""Start an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
""" |
exit_code = 0
# check for DisableStop
#disable_stop = conf.get('deployment', {}).get('DisableStop', False)
#if disable_stop:
# log.warn('\'DisableStop\' is set - nothing to do!')
#else:
if not stack_exists(awsclient, stack_name):
log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name)
else:
client_cfn = awsclient.get_client('cloudformation')
client_autoscaling = awsclient.get_client('autoscaling')
client_rds = awsclient.get_client('rds')
resources = all_pages(
client_cfn.list_stack_resources,
{ 'StackName': stack_name },
lambda r: r['StackResourceSummaries']
)
autoscaling_groups = [
r for r in resources
if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup'
]
# lookup all types of scaling processes
# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance
# AlarmNotification, ScheduledActions, AddToLoadBalancer]
response = client_autoscaling.describe_scaling_process_types()
scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])]
# starting db instances
db_instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::RDS::DBInstance'
]
stopped_db_instances = _filter_db_instances_by_status(
awsclient, db_instances, ['stopped']
)
for db in stopped_db_instances:
log.info('Starting RDS instance \'%s\'', db)
client_rds.start_db_instance(DBInstanceIdentifier=db)
# wait for db instances to become available
for db in stopped_db_instances:
waiter_db_available = client_rds.get_waiter('db_instance_available')
waiter_db_available.wait(DBInstanceIdentifier=db)
# starting ec2 instances
instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::EC2::Instance'
]
_start_ec2_instances(awsclient, instances)
services = [
r for r in resources
if r['ResourceType'] == 'AWS::ECS::Service'
]
if (autoscaling_groups and not use_suspend) or services:
template, parameters = _get_template_parameters(awsclient, stack_name)
# setting ECS desiredCount back
if services:
_start_ecs_services(awsclient, services, template, parameters)
for asg in autoscaling_groups:
if use_suspend:
# alternative implementation to speed up start
# only problem is that instances must survive stop & start
# find instances in autoscaling group
instances = all_pages(
client_autoscaling.describe_auto_scaling_instances,
{},
lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', [])
if i['AutoScalingGroupName'] == asg['PhysicalResourceId']],
)
_start_ec2_instances(awsclient, instances)
# resume all autoscaling processes
log.info('Resuming all autoscaling processes for \'%s\'',
asg['LogicalResourceId'])
response = client_autoscaling.resume_processes(
AutoScalingGroupName=asg['PhysicalResourceId'],
ScalingProcesses=scaling_process_types
)
else:
# resize autoscaling group back to its original values
log.info('Resize autoscaling group \'%s\' back to original values',
asg['LogicalResourceId'])
min, max = _get_autoscaling_min_max(
template, parameters, asg['LogicalResourceId'])
response = client_autoscaling.update_auto_scaling_group(
AutoScalingGroupName=asg['PhysicalResourceId'],
MinSize=min,
MaxSize=max
)
return exit_code |
<SYSTEM_TASK:>
Property method that returns a bool specifying if the process is
<END_TASK>
<USER_TASK:>
Description:
def is_running(self):
"""Property method that returns a bool specifying if the process is
currently running. This will return true if the state is active, idle
or initializing.
:rtype: bool
""" |
return self._state in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_INITIALIZING] |
<SYSTEM_TASK:>
Invoked whenever a signal is added to the stack.
<END_TASK>
<USER_TASK:>
Description:
def process_signal(self, signum):
"""Invoked whenever a signal is added to the stack.
:param int signum: The signal that was added
""" |
if signum == signal.SIGTERM:
LOGGER.info('Received SIGTERM, initiating shutdown')
self.stop()
elif signum == signal.SIGHUP:
LOGGER.info('Received SIGHUP')
if self.config.reload():
LOGGER.info('Configuration reloaded')
logging.config.dictConfig(self.config.logging)
self.on_configuration_reloaded()
elif signum == signal.SIGUSR1:
self.on_sigusr1()
elif signum == signal.SIGUSR2:
self.on_sigusr2() |
<SYSTEM_TASK:>
The core method for starting the application. Will setup logging,
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""The core method for starting the application. Will setup logging,
toggle the runtime state flag, block on loop, then call shutdown.
Redefine this method if you intend to use an IO Loop or some other
long running process.
""" |
LOGGER.info('%s v%s started', self.APPNAME, self.VERSION)
self.setup()
while not any([self.is_stopping, self.is_stopped]):
self.set_state(self.STATE_SLEEPING)
try:
signum = self.pending_signals.get(True, self.wake_interval)
except queue.Empty:
pass
else:
self.process_signal(signum)
if any([self.is_stopping, self.is_stopped]):
break
self.set_state(self.STATE_ACTIVE)
self.process() |
<SYSTEM_TASK:>
Override to implement shutdown steps.
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""Override to implement shutdown steps.""" |
LOGGER.info('Attempting to stop the process')
self.set_state(self.STATE_STOP_REQUESTED)
# Call shutdown for classes to add shutdown steps
self.shutdown()
# Wait for the current run to finish
while self.is_running and self.is_waiting_to_stop:
LOGGER.info('Waiting for the process to finish')
time.sleep(self.SLEEP_UNIT)
# Change the state to shutting down
if not self.is_stopping:
self.set_state(self.STATE_STOPPING)
# Call a method that may be overwritten to cleanly shutdown
self.on_shutdown()
# Change our state
self.set_state(self.STATE_STOPPED) |
<SYSTEM_TASK:>
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
<END_TASK>
<USER_TASK:>
Description:
def dump(pif, fp, **kwargs):
"""
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
:param pif: Object or list of objects to serialize.
:param fp: File-like object supporting .write() method to write the serialized object(s) to.
:param kwargs: Any options available to json.dump().
""" |
return json.dump(pif, fp, cls=PifEncoder, **kwargs) |
<SYSTEM_TASK:>
Convert content in a JSON-encoded text file to a Physical Information Object or a list of such objects.
<END_TASK>
<USER_TASK:>
Description:
def load(fp, class_=None, **kwargs):
"""
Convert content in a JSON-encoded text file to a Physical Information Object or a list of such objects.
:param fp: File-like object supporting .read() method to deserialize from.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:param kwargs: Any options available to json.load().
:return: Single object derived from :class:`.Pio` or a list of such object.
""" |
return loado(json.load(fp, **kwargs), class_=class_) |
<SYSTEM_TASK:>
Convert content in a JSON-encoded string to a Physical Information Object or a list of such objects.
<END_TASK>
<USER_TASK:>
Description:
def loads(s, class_=None, **kwargs):
"""
Convert content in a JSON-encoded string to a Physical Information Object or a list of such objects.
:param s: String to deserialize.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:param kwargs: Any options available to json.loads().
:return: Single object derived from :class:`.Pio` or a list of such object.
""" |
return loado(json.loads(s, **kwargs), class_=class_) |
<SYSTEM_TASK:>
Convert a dictionary or a list of dictionaries into a single Physical Information Object or a list of such objects.
<END_TASK>
<USER_TASK:>
Description:
def loado(obj, class_=None):
"""
Convert a dictionary or a list of dictionaries into a single Physical Information Object or a list of such objects.
:param obj: Dictionary or list to convert to Physical Information Objects.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio` or a list of such object.
""" |
if isinstance(obj, list):
return [_dict_to_pio(i, class_=class_) for i in obj]
elif isinstance(obj, dict):
return _dict_to_pio(obj, class_=class_)
else:
raise ValueError('expecting list or dictionary as outermost structure') |
<SYSTEM_TASK:>
Convert a single dictionary object to a Physical Information Object.
<END_TASK>
<USER_TASK:>
Description:
def _dict_to_pio(d, class_=None):
"""
Convert a single dictionary object to a Physical Information Object.
:param d: Dictionary to convert.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio`.
""" |
d = keys_to_snake_case(d)
if class_:
return class_(**d)
if 'category' not in d:
raise ValueError('Dictionary does not contains a category field: ' + ', '.join(d.keys()))
elif d['category'] == 'system':
return System(**d)
elif d['category'] == 'system.chemical':
return ChemicalSystem(**d)
elif d['category'] == 'system.chemical.alloy': # Legacy support
return Alloy(**d)
elif d['category'] == 'system.chemical.alloy.phase': # Legacy support
return ChemicalSystem(**d)
raise ValueError('Dictionary does not contain a valid top-level category: ' + str(d['category'])) |
<SYSTEM_TASK:>
Utility function to extract command from docopt arguments.
<END_TASK>
<USER_TASK:>
Description:
def get_command(arguments):
"""Utility function to extract command from docopt arguments.
:param arguments:
:return: command
""" |
cmds = list(filter(lambda k: not (k.startswith('-') or
k.startswith('<')) and arguments[k],
arguments.keys()))
if len(cmds) != 1:
raise Exception('invalid command line!')
return cmds[0] |
<SYSTEM_TASK:>
Dispatch arguments parsed by docopt to the cmd with matching spec.
<END_TASK>
<USER_TASK:>
Description:
def dispatch(cls, arguments, **kwargs):
"""Dispatch arguments parsed by docopt to the cmd with matching spec.
:param arguments:
:param kwargs:
:return: exit_code
""" |
# first match wins
# spec: all '-' elements must match, all others are False;
# '<sth>' elements are converted to call args on order of
# appearance
#
# kwargs are provided to dispatch call and used in func call
for spec, func in cls._specs:
# if command and arguments.get(command) and match(args):
args = [] # specified args in order of appearance
options = list(filter(lambda k: k.startswith('-') and
(arguments[k] or k in spec),
arguments.keys()))
cmds = list(filter(lambda k: not (k.startswith('-') or
k.startswith('<')) and arguments[k],
arguments.keys()))
args_spec = list(filter(lambda k: k.startswith('<'), spec))
cmd_spec = list(filter(lambda k: not (k.startswith('-') or
k.startswith('<')), spec))
for element in spec:
if element.startswith('-'):
# element is an option
if element in options:
args.append(arguments.get(element, False))
options.remove(element)
elif element.startswith('<') and \
not arguments.get(element) is False:
# element is an argument
args.append(arguments.get(element))
if element in args_spec:
args_spec.remove(element)
else:
# element is a command
if element in cmds and element in cmd_spec:
cmds.remove(element)
cmd_spec.remove(element)
if options:
continue # not all options have been matched
if cmds:
continue # not all cmds from command line have been matched
if args_spec:
continue # not all args from spec have been provided
if cmd_spec:
continue # not all cmds from spec have been provided
# all options and cmds matched : call the cmd
# TODO leave out all args to deal with "empty" signature
exit_code = func(*args, **kwargs)
return exit_code
# no matching spec found
raise Exception('No implementation for spec: %s' % arguments) |
<SYSTEM_TASK:>
Return the proper representation for the given integer
<END_TASK>
<USER_TASK:>
Description:
def convert_representation(self, i):
"""
Return the proper representation for the given integer
""" |
if self.number_representation == 'unsigned':
return i
elif self.number_representation == 'signed':
if i & (1 << self.interpreter._bit_width - 1):
return -((~i + 1) & (2**self.interpreter._bit_width - 1))
else:
return i
elif self.number_representation == 'hex':
return hex(i) |
<SYSTEM_TASK:>
Set the generate random flag, unset registers and memory will return a random value.
<END_TASK>
<USER_TASK:>
Description:
def magic_generate_random(self, line):
"""
Set the generate random flag, unset registers and memory will return a random value.
Usage:
Call the magic by itself or with `true` to have registers and memory return a random value
if they are unset and read from, much like how real hardware would work.
Defaults to False, or to not generate random values
`%generate_random`
or
`%generate_random true`
or
`%generate_random false`
""" |
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.generate_random = True
elif line == 'false':
self.interpreter.generate_random = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'} |
<SYSTEM_TASK:>
Postpone execution of instructions until explicitly run
<END_TASK>
<USER_TASK:>
Description:
def magic_postpone_execution(self, line):
"""
Postpone execution of instructions until explicitly run
Usage:
Call this magic with `true` or nothing to postpone execution,
or call with `false` to execute each instruction when evaluated.
This defaults to True.
Note that each cell is executed only executed after all lines in
the cell have been evaluated properly.
`%postpone_execution`
or
`%postpone_execution true`
or
`%postpone_execution false`
""" |
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.postpone_execution = True
elif line == 'false':
self.interpreter.postpone_execution = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'} |
<SYSTEM_TASK:>
Print out the current value of a register
<END_TASK>
<USER_TASK:>
Description:
def magic_register(self, line):
"""
Print out the current value of a register
Usage:
Pass in the register, or a list of registers separated by spaces
A list of registeres can be entered by separating them by a hyphen
`%reg R1`
or
`%reg R0 R5 R6`
or
`%reg R8-R12`
""" |
message = ""
for reg in [i.strip() for i in line.replace(',', '').split()]:
if '-' in reg:
# We have a range (Rn-Rk)
r1, r2 = reg.split('-')
# TODO do we want to allow just numbers?
n1 = re.search(self.interpreter.REGISTER_REGEX, r1).groups()[0]
n2 = re.search(self.interpreter.REGISTER_REGEX, r2).groups()[0]
n1 = self.interpreter.convert_to_integer(n1)
n2 = self.interpreter.convert_to_integer(n2)
for i in range(n1, n2+1):
val = self.interpreter.register[r1[0] + str(i)]
val = self.convert_representation(val)
message += "{}: {}\n".format(r1[0] + str(i), val)
else:
val = self.interpreter.register[reg]
val = self.convert_representation(val)
message += "{}: {}\n".format(reg, val)
stream_content = {'name': 'stdout', 'text': message}
self.send_response(self.iopub_socket, 'stream', stream_content) |
<SYSTEM_TASK:>
Print out the current value of memory
<END_TASK>
<USER_TASK:>
Description:
def magic_memory(self, line):
"""
Print out the current value of memory
Usage:
Pass in the byte of memory to read, separated by spaced
A list of memory contents can be entered by separating them by a hyphen
`%mem 4 5`
or
`%mem 8-12`
""" |
# TODO add support for directives
message = ""
for address in [i.strip() for i in line.replace(',', '').split()]:
if '-' in address:
# We have a range (n-k)
m1, m2 = address.split('-')
n1 = re.search(self.interpreter.IMMEDIATE_NUMBER, m1).groups()[0]
n2 = re.search(self.interpreter.IMMEDIATE_NUMBER, m2).groups()[0]
n1 = self.interpreter.convert_to_integer(n1)
n2 = self.interpreter.convert_to_integer(n2)
for i in range(n1, n2 + 1):
val = self.interpreter.memory[i]
val = self.convert_representation(val)
message += "{}: {}\n".format(str(i), val)
else:
# TODO fix what is the key for memory (currently it's an int, but registers are strings, should it be the same?)
val = self.interpreter.memory[self.interpreter.convert_to_integer(address)]
val = self.convert_representation(val)
message += "{}: {}\n".format(address, val)
stream_content = {'name': 'stdout', 'text': message}
self.send_response(self.iopub_socket, 'stream', stream_content) |
<SYSTEM_TASK:>
Run the current program
<END_TASK>
<USER_TASK:>
Description:
def magic_run(self, line):
"""
Run the current program
Usage:
Call with a numbe rto run that many steps,
or call with no arguments to run to the end of the current program
`%run`
or
`%run 1`
""" |
i = float('inf')
if line.strip():
i = int(line)
try:
with warnings.catch_warnings(record=True) as w:
self.interpreter.run(i)
for warning_message in w:
# TODO should this be stdout or stderr
stream_content = {'name': 'stdout', 'text': 'Warning: ' + str(warning_message.message) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except iarm.exceptions.EndOfProgram as e:
f_name = self.interpreter.program[self.interpreter.register['PC'] - 1].__name__
f_name = f_name[:f_name.find('_')]
message = "Error in {}: ".format(f_name)
stream_content = {'name': 'stdout', 'text': message + str(e) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except Exception as e:
for err in e.args:
stream_content = {'name': 'stderr', 'text': str(err)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': type(e).__name__,
'evalue': str(e),
'traceback': '???'} |
<SYSTEM_TASK:>
Print out the help for magics
<END_TASK>
<USER_TASK:>
Description:
def magic_help(self, line):
"""
Print out the help for magics
Usage:
Call help with no arguments to list all magics,
or call it with a magic to print out it's help info.
`%help`
or
`%help run
""" |
line = line.strip()
if not line:
for magic in self.magics:
stream_content = {'name': 'stdout', 'text': "%{}\n".format(magic)}
self.send_response(self.iopub_socket, 'stream', stream_content)
elif line in self.magics:
# its a magic
stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.magics[line].__doc__)}
self.send_response(self.iopub_socket, 'stream', stream_content)
elif line in self.interpreter.ops:
# it's an instruction
stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.interpreter.ops[line].__doc__)}
self.send_response(self.iopub_socket, 'stream', stream_content)
else:
stream_content = {'name': 'stderr', 'text': "'{}' not a known magic or instruction".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content) |
<SYSTEM_TASK:>
Deploy API Gateway to AWS cloud.
<END_TASK>
<USER_TASK:>
Description:
def deploy_api(awsclient, api_name, api_description, stage_name, api_key,
lambdas, cache_cluster_enabled, cache_cluster_size, method_settings=None):
"""Deploy API Gateway to AWS cloud.
:param awsclient:
:param api_name:
:param api_description:
:param stage_name:
:param api_key:
:param lambdas:
:param cache_cluster_enabled:
:param cache_cluster_size:
:param method_settings:
""" |
if not _api_exists(awsclient, api_name):
if os.path.isfile(SWAGGER_FILE):
# this does an import from swagger file
# the next step does not make sense since there is a check in
# _import_from_swagger for if api is existent!
# _create_api(api_name=api_name, api_description=api_description)
_import_from_swagger(awsclient, api_name, api_description,
stage_name, lambdas)
else:
print('No swagger file (%s) found' % SWAGGER_FILE)
api = _api_by_name(awsclient, api_name)
if api is not None:
_ensure_lambdas_permissions(awsclient, lambdas, api)
_create_deployment(awsclient, api_name, stage_name, cache_cluster_enabled, cache_cluster_size)
_update_stage(awsclient, api['id'], stage_name, method_settings)
_wire_api_key(awsclient, api_name, api_key, stage_name)
else:
print('API name unknown')
else:
if os.path.isfile(SWAGGER_FILE):
_update_from_swagger(awsclient, api_name, api_description,
stage_name, lambdas)
else:
_update_api()
api = _api_by_name(awsclient, api_name)
if api is not None:
_ensure_lambdas_permissions(awsclient, lambdas, api)
_create_deployment(awsclient, api_name, stage_name, cache_cluster_enabled, cache_cluster_size)
_update_stage(awsclient, api['id'], stage_name, method_settings)
else:
print('API name unknown') |
<SYSTEM_TASK:>
Create a new API key as reference for api.conf.
<END_TASK>
<USER_TASK:>
Description:
def create_api_key(awsclient, api_name, api_key_name):
"""Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key
""" |
_sleep()
client_api = awsclient.get_client('apigateway')
print('create api key: %s' % api_key_name)
response = client_api.create_api_key(
name=api_key_name,
description='Created for ' + api_name,
enabled=True
)
#print(json2table(response))
print('Add this api key \'%s\' to your api.conf' % response['id'])
return response['id'] |
<SYSTEM_TASK:>
Print the defined API keys.
<END_TASK>
<USER_TASK:>
Description:
def list_api_keys(awsclient):
"""Print the defined API keys.
""" |
_sleep()
client_api = awsclient.get_client('apigateway')
print('listing api keys')
response = client_api.get_api_keys()['items']
for item in response:
print(json2table(item)) |
<SYSTEM_TASK:>
Add custom domain to your API.
<END_TASK>
<USER_TASK:>
Description:
def deploy_custom_domain(awsclient, api_name, api_target_stage,
api_base_path, domain_name, route_53_record,
cert_name, cert_arn, hosted_zone_id, ensure_cname):
"""Add custom domain to your API.
:param api_name:
:param api_target_stage:
:param api_base_path:
:param domain_name:
:param route_53_record:
:param ssl_cert:
:param cert_name:
:param cert_arn:
:param hosted_zone_id:
:return: exit_code
""" |
api_base_path = _basepath_to_string_if_null(api_base_path)
api = _api_by_name(awsclient, api_name)
if not api:
print("Api %s does not exist, aborting..." % api_name)
# exit(1)
return 1
domain = _custom_domain_name_exists(awsclient, domain_name)
if not domain:
response = _create_custom_domain(awsclient, domain_name, cert_name, cert_arn)
cloudfront_distribution = response['distributionDomainName']
else:
response = _update_custom_domain(awsclient, domain_name, cert_name, cert_arn)
cloudfront_distribution = response['distributionDomainName']
if _base_path_mapping_exists(awsclient, domain_name, api_base_path):
_ensure_correct_base_path_mapping(awsclient, domain_name,
api_base_path, api['id'],
api_target_stage)
else:
_create_base_path_mapping(awsclient, domain_name, api_base_path,
api_target_stage, api['id'])
if ensure_cname:
record_exists, record_correct = \
_record_exists_and_correct(awsclient, hosted_zone_id,
route_53_record,
cloudfront_distribution)
if record_correct:
print('Route53 record correctly set: %s --> %s' % (route_53_record,
cloudfront_distribution))
else:
_ensure_correct_route_53_record(awsclient, hosted_zone_id,
record_name=route_53_record,
record_value=cloudfront_distribution)
print('Route53 record set: %s --> %s' % (route_53_record,
cloudfront_distribution))
else:
print('Skipping creating and checking DNS record')
return 0 |
<SYSTEM_TASK:>
Get the list of lambda functions.
<END_TASK>
<USER_TASK:>
Description:
def get_lambdas(awsclient, config, add_arn=False):
"""Get the list of lambda functions.
:param config:
:param add_arn:
:return: list containing lambda entries
""" |
if 'lambda' in config:
client_lambda = awsclient.get_client('lambda')
lambda_entries = config['lambda'].get('entries', [])
lmbdas = []
for lambda_entry in lambda_entries:
lmbda = {
'name': lambda_entry.get('name', None),
'alias': lambda_entry.get('alias', None),
'swagger_ref': lambda_entry.get('swaggerRef', None)
}
if add_arn:
_sleep()
response_lambda = client_lambda.get_function(
FunctionName=lmbda['name'])
lmbda['arn'] = response_lambda['Configuration']['FunctionArn']
lmbdas.append(lmbda)
return lmbdas
else:
return [] |
<SYSTEM_TASK:>
Helper to apply method_settings to stage
<END_TASK>
<USER_TASK:>
Description:
def _update_stage(awsclient, api_id, stage_name, method_settings):
"""Helper to apply method_settings to stage
:param awsclient:
:param api_id:
:param stage_name:
:param method_settings:
:return:
""" |
# settings docs in response: https://botocore.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.update_stage
client_api = awsclient.get_client('apigateway')
operations = _convert_method_settings_into_operations(method_settings)
if operations:
print('update method settings for stage')
_sleep()
response = client_api.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=operations) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.