text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Prints a table used for Fisher's exact test. Adds row, column, and grand
<END_TASK>
<USER_TASK:>
Description:
def print_2x2_table(table, row_labels, col_labels, fmt="%d"):
"""
Prints a table used for Fisher's exact test. Adds row, column, and grand
totals.
:param table: The four cells of a 2x2 table: [r1c1, r1c2, r2c1, r2c2]
:param row_labels: A length-2 list of row names
:param col_labels: A length-2 list of column names
""" |
grand = sum(table)
# Separate table into components and get row/col sums
t11, t12, t21, t22 = table
# Row sums, col sums, and grand total
r1 = t11 + t12
r2 = t21 + t22
c1 = t11 + t21
c2 = t12 + t22
# Re-cast everything as the appropriate format
t11, t12, t21, t22, c1, c2, r1, r2, grand = [
fmt % i for i in [t11, t12, t21, t22, c1, c2, r1, r2, grand]]
# Construct rows and columns the long way...
rows = [
[""] + col_labels + ['total'],
[row_labels[0], t11, t12, r1],
[row_labels[1], t21, t22, r2],
['total', c1, c2, grand],
]
cols = [
[row[0] for row in rows],
[col_labels[0], t11, t21, c1],
[col_labels[1], t12, t22, c2],
['total', r1, r2, grand],
]
# Get max column width for each column; need this for nice justification
widths = []
for col in cols:
widths.append(max(len(i) for i in col))
# ReST-formatted header
sep = ['=' * i for i in widths]
# Construct the table one row at a time with nice justification
s = []
s.append(' '.join(sep))
s.append(' '.join(i.ljust(j) for i, j in zip(rows[0], widths)))
s.append(' '.join(sep))
for row in rows[1:]:
s.append(' '.join(i.ljust(j) for i, j in zip(row, widths)))
s.append(' '.join(sep) + '\n')
return "\n".join(s) |
<SYSTEM_TASK:>
given a table, print the percentages rather than the totals
<END_TASK>
<USER_TASK:>
Description:
def print_row_perc_table(table, row_labels, col_labels):
"""
given a table, print the percentages rather than the totals
""" |
r1c1, r1c2, r2c1, r2c2 = map(float, table)
row1 = r1c1 + r1c2
row2 = r2c1 + r2c2
blocks = [
(r1c1, row1),
(r1c2, row1),
(r2c1, row2),
(r2c2, row2)]
new_table = []
for cell, row in blocks:
try:
x = cell / row
except ZeroDivisionError:
x = 0
new_table.append(x)
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(True)
del s[5]
return ''.join(s) |
<SYSTEM_TASK:>
given a table, print the cols as percentages
<END_TASK>
<USER_TASK:>
Description:
def print_col_perc_table(table, row_labels, col_labels):
"""
given a table, print the cols as percentages
""" |
r1c1, r1c2, r2c1, r2c2 = map(float, table)
col1 = r1c1 + r2c1
col2 = r1c2 + r2c2
blocks = [
(r1c1, col1),
(r1c2, col2),
(r2c1, col1),
(r2c2, col2)]
new_table = []
for cell, row in blocks:
try:
x = cell / row
except ZeroDivisionError:
x = 0
new_table.append(x)
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(False)
last_space = s[0].rindex(" ")
new_s = [i[:last_space] for i in s]
return '\n'.join(new_s) |
<SYSTEM_TASK:>
Draw ``tree`` to the terminal
<END_TASK>
<USER_TASK:>
Description:
def draw(self, tree, bar_desc=None, save_cursor=True, flush=True):
"""Draw ``tree`` to the terminal
:type tree: dict
:param tree: ``tree`` should be a tree representing a hierarchy; each
key should be a string describing that hierarchy level and value
should also be ``dict`` except for leaves which should be
``BarDescriptors``. See ``BarDescriptor`` for a tree example.
:type bar_desc: BarDescriptor|NoneType
:param bar_desc: For describing non-leaf bars in that will be
drawn from ``tree``; certain attributes such as ``value``
and ``kwargs["max_value"]`` will of course be overridden
if provided.
:type flush: bool
:param flush: If this is set, output written will be flushed
:type save_cursor: bool
:param save_cursor: If this is set, cursor location will be saved before
drawing; this will OVERWRITE a previous save, so be sure to set
this accordingly (to your needs).
""" |
if save_cursor:
self.cursor.save()
tree = deepcopy(tree)
# TODO: Automatically collapse hierarchy so something
# will always be displayable (well, unless the top-level)
# contains too many to display
lines_required = self.lines_required(tree)
ensure(lines_required <= self.cursor.term.height,
LengthOverflowError,
"Terminal is not long ({} rows) enough to fit all bars "
"({} rows).".format(self.cursor.term.height, lines_required))
bar_desc = BarDescriptor(type=Bar) if not bar_desc else bar_desc
self._calculate_values(tree, bar_desc)
self._draw(tree)
if flush:
self.cursor.flush() |
<SYSTEM_TASK:>
Clear lines in terminal below current cursor position as required
<END_TASK>
<USER_TASK:>
Description:
def make_room(self, tree):
"""Clear lines in terminal below current cursor position as required
This is important to do before drawing to ensure sufficient
room at the bottom of your terminal.
:type tree: dict
:param tree: tree as described in ``BarDescriptor``
""" |
lines_req = self.lines_required(tree)
self.cursor.clear_lines(lines_req) |
<SYSTEM_TASK:>
Calculate number of lines required to draw ``tree``
<END_TASK>
<USER_TASK:>
Description:
def lines_required(self, tree, count=0):
"""Calculate number of lines required to draw ``tree``""" |
if all([
isinstance(tree, dict),
type(tree) != BarDescriptor
]):
return sum(self.lines_required(v, count=count)
for v in tree.values()) + 2
elif isinstance(tree, BarDescriptor):
if tree.get("kwargs", {}).get("title_pos") in ["left", "right"]:
return 1
else:
return 2 |
<SYSTEM_TASK:>
Calculate values for drawing bars of non-leafs in ``tree``
<END_TASK>
<USER_TASK:>
Description:
def _calculate_values(self, tree, bar_d):
"""Calculate values for drawing bars of non-leafs in ``tree``
Recurses through ``tree``, replaces ``dict``s with
``(BarDescriptor, dict)`` so ``ProgressTree._draw`` can use
the ``BarDescriptor``s to draw the tree
""" |
if all([
isinstance(tree, dict),
type(tree) != BarDescriptor
]):
# Calculate value and max_value
max_val = 0
value = 0
for k in tree:
# Get descriptor by recursing
bar_desc = self._calculate_values(tree[k], bar_d)
# Reassign to tuple of (new descriptor, tree below)
tree[k] = (bar_desc, tree[k])
value += bar_desc["value"].value
max_val += bar_desc.get("kwargs", {}).get("max_value", 100)
# Merge in values from ``bar_d`` before returning descriptor
kwargs = merge_dicts(
[bar_d.get("kwargs", {}),
dict(max_value=max_val)],
deepcopy=True
)
ret_d = merge_dicts(
[bar_d,
dict(value=Value(floor(value)), kwargs=kwargs)],
deepcopy=True
)
return BarDescriptor(ret_d)
elif isinstance(tree, BarDescriptor):
return tree
else:
raise TypeError("Unexpected type {}".format(type(tree))) |
<SYSTEM_TASK:>
Recurse through ``tree`` and draw all nodes
<END_TASK>
<USER_TASK:>
Description:
def _draw(self, tree, indent=0):
"""Recurse through ``tree`` and draw all nodes""" |
if all([
isinstance(tree, dict),
type(tree) != BarDescriptor
]):
for k, v in sorted(tree.items()):
bar_desc, subdict = v[0], v[1]
args = [self.cursor.term] + bar_desc.get("args", [])
kwargs = dict(title_pos="above", indent=indent, title=k)
kwargs.update(bar_desc.get("kwargs", {}))
b = Bar(*args, **kwargs)
b.draw(value=bar_desc["value"].value, flush=False)
self._draw(subdict, indent=indent + self.indent) |
<SYSTEM_TASK:>
Returns the features and NumPy arrays that were saved with
<END_TASK>
<USER_TASK:>
Description:
def load_features_and_arrays(prefix, mmap_mode='r'):
"""
Returns the features and NumPy arrays that were saved with
save_features_and_arrays.
Parameters
----------
prefix : str
Path to where data are saved
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}
Mode in which to memory-map the file. See np.load for details.
""" |
features = pybedtools.BedTool(prefix + '.features')
arrays = np.load(prefix + '.npz', mmap_mode=mmap_mode)
return features, arrays |
<SYSTEM_TASK:>
Saves NumPy arrays of processed data, along with the features that
<END_TASK>
<USER_TASK:>
Description:
def save_features_and_arrays(features, arrays, prefix, compressed=False,
link_features=False, overwrite=False):
"""
Saves NumPy arrays of processed data, along with the features that
correspond to each row, to files for later use.
Two files will be saved, both starting with `prefix`:
prefix.features : a file of features. If GFF features were provided,
this will be in GFF format, if BED features were provided it will be in
BED format, and so on.
prefix.npz : A NumPy .npz file.
Parameters
----------
arrays : dict of NumPy arrays
Rows in each array should correspond to `features`. This dictionary is
passed to np.savez
features : iterable of Feature-like objects
This is usually the same features that were used to create the array in
the first place.
link_features : bool
If True, then assume that `features` is either a pybedtools.BedTool
pointing to a file, or a filename. In this case, instead of making
a copy, a symlink will be created to the original features. This helps
save disk space.
prefix : str
Path to where data will be saved.
compressed : bool
If True, saves arrays using np.savez_compressed rather than np.savez.
This will save disk space, but will be slower when accessing the data
later.
""" |
if link_features:
if isinstance(features, pybedtools.BedTool):
assert isinstance(features.fn, basestring)
features_filename = features.fn
else:
assert isinstance(features, basestring)
features_filename = features
if overwrite:
force_flag = '-f'
else:
force_flag = ''
cmds = [
'ln', '-s', force_flag, os.path.abspath(features_filename), prefix + '.features']
os.system(' '.join(cmds))
else:
pybedtools.BedTool(features).saveas(prefix + '.features')
if compressed:
np.savez_compressed(
prefix,
**arrays)
else:
np.savez(prefix, **arrays) |
<SYSTEM_TASK:>
Command that prints all device information.
<END_TASK>
<USER_TASK:>
Description:
def list_all(fritz, args):
"""Command that prints all device information.""" |
devices = fritz.get_devices()
for device in devices:
print('#' * 30)
print('name=%s' % device.name)
print(' ain=%s' % device.ain)
print(' id=%s' % device.identifier)
print(' productname=%s' % device.productname)
print(' manufacturer=%s' % device.manufacturer)
print(" present=%s" % device.present)
print(" lock=%s" % device.lock)
print(" devicelock=%s" % device.device_lock)
if device.present is False:
continue
if device.has_switch:
print(" Switch:")
print(" switch_state=%s" % device.switch_state)
if device.has_switch:
print(" Powermeter:")
print(" power=%s" % device.power)
print(" energy=%s" % device.energy)
print(" voltage=%s" % device.voltage)
if device.has_temperature_sensor:
print(" Temperature:")
print(" temperature=%s" % device.temperature)
print(" offset=%s" % device.offset)
if device.has_thermostat:
print(" Thermostat:")
print(" battery_low=%s" % device.battery_low)
print(" battery_level=%s" % device.battery_level)
print(" actual=%s" % device.actual_temperature)
print(" target=%s" % device.target_temperature)
print(" comfort=%s" % device.comfort_temperature)
print(" eco=%s" % device.eco_temperature)
print(" window=%s" % device.window_open)
print(" summer=%s" % device.summer_active)
print(" holiday=%s" % device.holiday_active)
if device.has_alarm:
print(" Alert:")
print(" alert=%s" % device.alert_state) |
<SYSTEM_TASK:>
Command that prints the device statistics.
<END_TASK>
<USER_TASK:>
Description:
def device_statistics(fritz, args):
"""Command that prints the device statistics.""" |
stats = fritz.get_device_statistics(args.ain)
print(stats) |
<SYSTEM_TASK:>
Split an interval into `n` roughly equal portions
<END_TASK>
<USER_TASK:>
Description:
def split_feature(f, n):
"""
Split an interval into `n` roughly equal portions
""" |
if not isinstance(n, int):
raise ValueError('n must be an integer')
orig_feature = copy(f)
step = (f.stop - f.start) / n
for i in range(f.start, f.stop, step):
f = copy(orig_feature)
start = i
stop = min(i + step, orig_feature.stop)
f.start = start
f.stop = stop
yield f
if stop == orig_feature.stop:
break |
<SYSTEM_TASK:>
If string, then convert to an interval; otherwise just return the input
<END_TASK>
<USER_TASK:>
Description:
def tointerval(s):
"""
If string, then convert to an interval; otherwise just return the input
""" |
if isinstance(s, basestring):
m = coord_re.search(s)
if m.group('strand'):
return pybedtools.create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
'.',
'0',
m.group('strand')])
else:
return pybedtools.create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
])
return s |
<SYSTEM_TASK:>
Get maximum width of progress bar
<END_TASK>
<USER_TASK:>
Description:
def max_width(self):
"""Get maximum width of progress bar
:rtype: int
:returns: Maximum column width of progress bar
""" |
value, unit = float(self._width_str[:-1]), self._width_str[-1]
ensure(unit in ["c", "%"], ValueError,
"Width unit must be either 'c' or '%'")
if unit == "c":
ensure(value <= self.columns, ValueError,
"Terminal only has {} columns, cannot draw "
"bar of size {}.".format(self.columns, value))
retval = value
else: # unit == "%"
ensure(0 < value <= 100, ValueError,
"value=={} does not satisfy 0 < value <= 100".format(value))
dec = value / 100
retval = dec * self.columns
return floor(retval) |
<SYSTEM_TASK:>
Check if ``term`` supports ``colors``
<END_TASK>
<USER_TASK:>
Description:
def _supports_colors(term, raise_err, colors):
"""Check if ``term`` supports ``colors``
:raises ColorUnsupportedError: This is raised if ``raise_err``
is ``False`` and a color in ``colors`` is unsupported by ``term``
:type raise_err: bool
:param raise_err: Set to ``False`` to return a ``bool`` indicating
color support rather than raising ColorUnsupportedError
:type colors: [str, ...]
""" |
for color in colors:
try:
if isinstance(color, str):
req_colors = 16 if "bright" in color else 8
ensure(term.number_of_colors >= req_colors,
ColorUnsupportedError,
"{} is unsupported by your terminal.".format(color))
elif isinstance(color, int):
ensure(term.number_of_colors >= color,
ColorUnsupportedError,
"{} is unsupported by your terminal.".format(color))
except ColorUnsupportedError as e:
if raise_err:
raise e
else:
return False
else:
return True |
<SYSTEM_TASK:>
Get string-coloring callable
<END_TASK>
<USER_TASK:>
Description:
def _get_format_callable(term, color, back_color):
"""Get string-coloring callable
Get callable for string output using ``color`` on ``back_color``
on ``term``
:param term: blessings.Terminal instance
:param color: Color that callable will color the string it's passed
:param back_color: Back color for the string
:returns: callable(s: str) -> str
""" |
if isinstance(color, str):
ensure(
any(isinstance(back_color, t) for t in [str, type(None)]),
TypeError,
"back_color must be a str or NoneType"
)
if back_color:
return getattr(term, "_".join(
[color, "on", back_color]
))
elif back_color is None:
return getattr(term, color)
elif isinstance(color, int):
return term.on_color(color)
else:
raise TypeError("Invalid type {} for color".format(
type(color)
)) |
<SYSTEM_TASK:>
Draw the progress bar
<END_TASK>
<USER_TASK:>
Description:
def draw(self, value, newline=True, flush=True):
"""Draw the progress bar
:type value: int
:param value: Progress value relative to ``self.max_value``
:type newline: bool
:param newline: If this is set, a newline will be written after drawing
""" |
# This is essentially winch-handling without having
# to do winch-handling; cleanly redrawing on winch is difficult
# and out of the intended scope of this class; we *can*
# however, adjust the next draw to be proper by re-measuring
# the terminal since the code is mostly written dynamically
# and many attributes and dynamically calculated properties.
self._measure_terminal()
# To avoid zero division, set amount_complete to 100% if max_value has been stupidly set to 0
amount_complete = 1.0 if self.max_value == 0 else value / self.max_value
fill_amount = int(floor(amount_complete * self.max_width))
empty_amount = self.max_width - fill_amount
# e.g., '10/20' if 'fraction' or '50%' if 'percentage'
amount_complete_str = (
u"{}/{}".format(value, self.max_value)
if self._num_rep == "fraction" else
u"{}%".format(int(floor(amount_complete * 100)))
)
# Write title if supposed to be above
if self._title_pos == "above":
title_str = u"{}{}\n".format(
" " * self._indent,
self.title,
)
self._write(title_str, ignore_overflow=True)
# Construct just the progress bar
bar_str = u''.join([
u(self.filled(self._filled_char * fill_amount)),
u(self.empty(self._empty_char * empty_amount)),
])
# Wrap with start and end character
bar_str = u"{}{}{}".format(self.start_char, bar_str, self.end_char)
# Add on title if supposed to be on left or right
if self._title_pos == "left":
bar_str = u"{} {}".format(self.title, bar_str)
elif self._title_pos == "right":
bar_str = u"{} {}".format(bar_str, self.title)
# Add indent
bar_str = u''.join([" " * self._indent, bar_str])
# Add complete percentage or fraction
bar_str = u"{} {}".format(bar_str, amount_complete_str)
# Set back to normal after printing
bar_str = u"{}{}".format(bar_str, self.term.normal)
# Finally, write the completed bar_str
self._write(bar_str, s_length=self.full_line_width)
# Write title if supposed to be below
if self._title_pos == "below":
title_str = u"\n{}{}".format(
" " * self._indent,
self.title,
)
self._write(title_str, ignore_overflow=True)
# Newline to wrap up
if newline:
self.cursor.newline()
if flush:
self.cursor.flush() |
<SYSTEM_TASK:>
Get the value from a text node.
<END_TASK>
<USER_TASK:>
Description:
def get_text(nodelist):
"""Get the value from a text node.""" |
value = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
value.append(node.data)
return ''.join(value) |
<SYSTEM_TASK:>
Send a request with parameters.
<END_TASK>
<USER_TASK:>
Description:
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters.""" |
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip() |
<SYSTEM_TASK:>
Send a login request with paramerters.
<END_TASK>
<USER_TASK:>
Description:
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters.""" |
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge) |
<SYSTEM_TASK:>
Get the DOM elements for the device list.
<END_TASK>
<USER_TASK:>
Description:
def get_device_elements(self):
"""Get the DOM elements for the device list.""" |
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device") |
<SYSTEM_TASK:>
Get the DOM element for the specified device.
<END_TASK>
<USER_TASK:>
Description:
def get_device_element(self, ain):
"""Get the DOM element for the specified device.""" |
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None |
<SYSTEM_TASK:>
Get the list of all known devices.
<END_TASK>
<USER_TASK:>
Description:
def get_devices(self):
"""Get the list of all known devices.""" |
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices |
<SYSTEM_TASK:>
Returns a device specified by the AIN.
<END_TASK>
<USER_TASK:>
Description:
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN.""" |
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device |
<SYSTEM_TASK:>
Get the thermostate state.
<END_TASK>
<USER_TASK:>
Description:
def get_hkr_state(self):
"""Get the thermostate state.""" |
self.update()
try:
return {
126.5: 'off',
127.0: 'on',
self.eco_temperature: 'eco',
self.comfort_temperature: 'comfort'
}[self.target_temperature]
except KeyError:
return 'manual' |
<SYSTEM_TASK:>
Set the state of the thermostat.
<END_TASK>
<USER_TASK:>
Description:
def set_hkr_state(self, state):
"""Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
""" |
try:
value = {
'off': 0,
'on': 100,
'eco': self.eco_temperature,
'comfort': self.comfort_temperature
}[state]
except KeyError:
return
self.set_target_temperature(value) |
<SYSTEM_TASK:>
Writes ``s`` to the terminal output stream
<END_TASK>
<USER_TASK:>
Description:
def write(self, s):
"""Writes ``s`` to the terminal output stream
Writes can be disabled by setting the environment variable
`PROGRESSIVE_NOWRITE` to `'True'`
""" |
should_write_s = os.getenv('PROGRESSIVE_NOWRITE') != "True"
if should_write_s:
self._stream.write(s) |
<SYSTEM_TASK:>
Saves current cursor position, so that it can be restored later
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""Saves current cursor position, so that it can be restored later""" |
self.write(self.term.save)
self._saved = True |
<SYSTEM_TASK:>
Effects a newline by moving the cursor down and clearing
<END_TASK>
<USER_TASK:>
Description:
def newline(self):
"""Effects a newline by moving the cursor down and clearing""" |
self.write(self.term.move_down)
self.write(self.term.clear_bol) |
<SYSTEM_TASK:>
Attach a gffutils.FeatureDB for access to features.
<END_TASK>
<USER_TASK:>
Description:
def attach_db(self, db):
"""
Attach a gffutils.FeatureDB for access to features.
Useful if you want to attach a db after this instance has already been
created.
Parameters
----------
db : gffutils.FeatureDB
""" |
if db is not None:
if isinstance(db, basestring):
db = gffutils.FeatureDB(db)
if not isinstance(db, gffutils.FeatureDB):
raise ValueError(
"`db` must be a filename or a gffutils.FeatureDB")
self._kwargs['db'] = db
self.db = db |
<SYSTEM_TASK:>
Generator of features.
<END_TASK>
<USER_TASK:>
Description:
def features(self, ignore_unknown=False):
"""
Generator of features.
If a gffutils.FeatureDB is attached, returns a pybedtools.Interval for
every feature in the dataframe's index.
Parameters
----------
ignore_unknown : bool
If True, silently ignores features that are not found in the db.
""" |
if not self.db:
raise ValueError("Please attach a gffutils.FeatureDB")
for i in self.data.index:
try:
yield gffutils.helpers.asinterval(self.db[i])
except gffutils.FeatureNotFoundError:
if ignore_unknown:
continue
else:
raise gffutils.FeatureNotFoundError('%s not found' % i) |
<SYSTEM_TASK:>
Returns a copy that only has rows corresponding to feature names in x.
<END_TASK>
<USER_TASK:>
Description:
def reindex_to(self, x, attribute="Name"):
"""
Returns a copy that only has rows corresponding to feature names in x.
Parameters
----------
x : str or pybedtools.BedTool
BED, GFF, GTF, or VCF where the "Name" field (that is, the value
returned by feature['Name']) or any arbitrary attribute
attribute : str
Attribute containing the name of the feature to use as the index.
""" |
names = [i[attribute] for i in x]
new = self.copy()
new.data = new.data.reindex(names)
return new |
<SYSTEM_TASK:>
Radviz plot.
<END_TASK>
<USER_TASK:>
Description:
def radviz(self, column_names, transforms=dict(), **kwargs):
"""
Radviz plot.
Useful for exploratory visualization, a radviz plot can show
multivariate data in 2D. Conceptually, the variables (here, specified
in `column_names`) are distributed evenly around the unit circle. Then
each point (here, each row in the dataframe) is attached to each
variable by a spring, where the stiffness of the spring is proportional
to the value of corresponding variable. The final position of a point
represents the equilibrium position with all springs pulling on it.
In practice, each variable is normalized to 0-1 (by subtracting the
mean and dividing by the range).
This is a very exploratory plot. The order of `column_names` will
affect the results, so it's best to try a couple different orderings.
For other caveats, see [1].
Additional kwargs are passed to self.scatter, so subsetting, callbacks,
and other configuration can be performed using options for that method
(e.g., `genes_to_highlight` is particularly useful).
Parameters
----------
column_names : list
Which columns of the dataframe to consider. The columns provided
should only include numeric data, and they should not contain any
NaN, inf, or -inf values.
transforms : dict
Dictionary mapping column names to transformations that will be
applied just for the radviz plot. For example, np.log1p is
a useful function. If a column name is not in this dictionary, it
will be used as-is.
ax : matplotlib.Axes
If not None, then plot the radviz on this axes. If None, then
a new figure will be created.
kwargs : dict
Additional arguments are passed to self.scatter. Note that not all
possible kwargs for self.scatter are necessarily useful for
a radviz plot (for example, margninal histograms would not be
meaningful).
Notes
-----
This method adds two new variables to self.data: "radviz_x" and
"radviz_y". It then calls the self.scatter method, using these new
variables.
The data transformation was adapted from the
pandas.tools.plotting.radviz function.
References
----------
[1] Hoffman,P.E. et al. (1997) DNA visual and analytic data mining. In
the Proceedings of the IEEE Visualization. Phoenix, AZ, pp.
437-441.
[2] http://www.agocg.ac.uk/reports/visual/casestud/brunsdon/radviz.htm
[3] http://pandas.pydata.org/pandas-docs/stable/visualization.html\
#radviz
""" |
# make a copy of data
x = self.data[column_names].copy()
for k, v in transforms.items():
x[k] = v(x[k])
def normalize(series):
mn = min(series)
mx = max(series)
return (series - mn) / (mx - mn)
df = x.apply(normalize)
to_plot = []
n = len(column_names)
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(n))
for i in range(n)]])
for i in range(len(x)):
row = df.irow(i).values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
to_plot.append((s * row_).sum(axis=0) / row.sum())
x_, y_ = zip(*to_plot)
self.data['radviz_x'] = x_
self.data['radviz_y'] = y_
ax = self.scatter('radviz_x', 'radviz_y', **kwargs)
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, column_names):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax |
<SYSTEM_TASK:>
Remove features not found in the `gffutils.FeatureDB`. This will
<END_TASK>
<USER_TASK:>
Description:
def strip_unknown_features(self):
"""
Remove features not found in the `gffutils.FeatureDB`. This will
typically include 'ambiguous', 'no_feature', etc, but can also be
useful if the database was created from a different one than was used
to create the table.
""" |
if not self.db:
return self
ind = []
for i, gene_id in enumerate(self.data.index):
try:
self.db[gene_id]
ind.append(i)
except gffutils.FeatureNotFoundError:
pass
ind = np.array(ind)
return self.__class__(self.data.ix[ind], **self._kwargs) |
<SYSTEM_TASK:>
Returns a boolean index of genes that have a peak nearby.
<END_TASK>
<USER_TASK:>
Description:
def genes_with_peak(self, peaks, transform_func=None, split=False,
intersect_kwargs=None, id_attribute='ID', *args,
**kwargs):
"""
Returns a boolean index of genes that have a peak nearby.
Parameters
----------
peaks : string or pybedtools.BedTool
If string, then assume it's a filename to a BED/GFF/GTF file of
intervals; otherwise use the pybedtools.BedTool object directly.
transform_func : callable
This function will be applied to each gene object returned by
self.features(). Additional args and kwargs are passed to
`transform_func`. For example, if you're looking for peaks within
1kb upstream of TSSs, then pybedtools.featurefuncs.TSS would be
a useful `transform_func`, and you could supply additional kwargs
of `upstream=1000` and `downstream=0`.
This function can return iterables of features, too. For example,
you might want to look for peaks falling within the exons of
a gene. In this case, `transform_func` should return an iterable
of pybedtools.Interval objects. The only requirement is that the
`name` field of any feature matches the index of the dataframe.
intersect_kwargs : dict
kwargs passed to pybedtools.BedTool.intersect.
id_attribute : str
The attribute in the GTF or GFF file that contains the id of the
gene. For meaningful results to be returned, a gene's ID be also
found in the index of the dataframe.
For GFF files, typically you'd use `id_attribute="ID"`. For GTF
files, you'd typically use `id_attribute="gene_id"`.
""" |
def _transform_func(x):
"""
In order to support transform funcs that return a single feature or
an iterable of features, we need to wrap it
"""
result = transform_func(x)
if isinstance(result, pybedtools.Interval):
result = [result]
for i in result:
if i:
yield result
intersect_kwargs = intersect_kwargs or {}
if not self._cached_features:
self._cached_features = pybedtools\
.BedTool(self.features())\
.saveas()
if transform_func:
if split:
features = self._cached_features\
.split(_transform_func, *args, **kwargs)
else:
features = self._cached_features\
.each(transform_func, *args, **kwargs)
else:
features = self._cached_features
hits = list(set([i[id_attribute] for i in features.intersect(
peaks, **intersect_kwargs)]))
return self.data.index.isin(hits) |
<SYSTEM_TASK:>
Create a BED file with padj encoded as color
<END_TASK>
<USER_TASK:>
Description:
def colormapped_bedfile(self, genome, cmap=None):
"""
Create a BED file with padj encoded as color
Features will be colored according to adjusted pval (phred
transformed). Downregulated features have the sign flipped.
Parameters
----------
cmap : matplotlib colormap
Default is matplotlib.cm.RdBu_r
Notes
-----
Requires a FeatureDB to be attached.
""" |
if self.db is None:
raise ValueError("FeatureDB required")
db = gffutils.FeatureDB(self.db)
def scored_feature_generator(d):
for i in range(len(d)):
try:
feature = db[d.ix[i]]
except gffutils.FeatureNotFoundError:
raise gffutils.FeatureNotFoundError(d.ix[i])
score = -10 * np.log10(d.padj[i])
lfc = d.log2FoldChange[i]
if np.isnan(lfc):
score = 0
if lfc < 0:
score *= -1
feature.score = str(score)
feature = extend_fields(
gff2bed(gffutils.helpers.asinterval(feature)), 9)
fields = feature.fields[:]
fields[6] = fields[1]
fields[7] = fields[2]
fields.append(str(d.padj[i]))
fields.append(str(d.pval[i]))
fields.append('%.3f' % d.log2FoldChange[i])
fields.append('%.3f' % d.baseMeanB[i])
fields.append('%.3f' % d.baseMeanB[i])
yield pybedtools.create_interval_from_list(fields)
x = pybedtools.BedTool(scored_feature_generator(self)).saveas()
norm = x.colormap_normalize()
if cmap is None:
cmap = cm.RdBu_r
cmap = colormap_adjust.cmap_center_point_adjust(
cmap, [norm.vmin, norm.vmax], 0)
def score_zeroer(f):
f.score = '0'
return f
return x.each(add_color, cmap=cmap, norm=norm)\
.sort()\
.each(score_zeroer)\
.truncate_to_chrom(genome)\
.saveas() |
<SYSTEM_TASK:>
Returns an array of genes in `genelist`, using `bins` bins.
<END_TASK>
<USER_TASK:>
Description:
def _array_parallel(fn, cls, genelist, chunksize=250, processes=1, **kwargs):
"""
Returns an array of genes in `genelist`, using `bins` bins.
`genelist` is a list of pybedtools.Interval objects
Splits `genelist` into pieces of size `chunksize`, creating an array
for each chunk and merging ret
A chunksize of 25-100 seems to work well on 8 cores.
""" |
pool = multiprocessing.Pool(processes)
chunks = list(chunker(genelist, chunksize))
# pool.map can only pass a single argument to the mapped function, so you
# need this trick for passing multiple arguments; idea from
# http://stackoverflow.com/questions/5442910/
# python-multiprocessing-pool-map-for-multiple-arguments
#
results = pool.map(
func=_array_star,
iterable=itertools.izip(
itertools.repeat(fn),
itertools.repeat(cls),
chunks,
itertools.repeat(kwargs)))
pool.close()
pool.join()
return results |
<SYSTEM_TASK:>
Unpacks the tuple `args` and calls _array. Needed to pass multiple args to
<END_TASK>
<USER_TASK:>
Description:
def _array_star(args):
"""
Unpacks the tuple `args` and calls _array. Needed to pass multiple args to
a pool.map-ed function
""" |
fn, cls, genelist, kwargs = args
return _array(fn, cls, genelist, **kwargs) |
<SYSTEM_TASK:>
Bin data for bigwig and save to disk.
<END_TASK>
<USER_TASK:>
Description:
def to_npz(self, bigwig, metric='mean0', outdir=None):
"""
Bin data for bigwig and save to disk.
The .npz file will have the pattern
{outdir}/{bigwig}.{chrom}.{windowsize}.{metric}.npz and will have two
arrays, x (genomic coordinates of midpoints of each window) and
y (metric for each window). It can be loaded like this::
d = np.load(filename, mmap_mode='r')
bigwig : str or BigWigSignal object
BigWig data that will be used to create the array
metric : 'covered', 'sum', 'mean0', 'mean'
Metric to store in array, as reported by bigWigAverageOverBed:
* "covered": the number of bases covered by the bigWig.
* "sum": sum of values over all bases covered
* "mean0": average over bases with non-covered bases counted as
zeros
* mean: average over just the covered bases
outdir : str or None
Where to store output filenames. If None, store the file in the
same directory as the bigwig file.
""" |
if isinstance(bigwig, _genomic_signal.BigWigSignal):
bigwig = bigwig.fn
if outdir is None:
outdir = os.path.dirname(bigwig)
basename = os.path.basename(bigwig)
windowsize = self.windowsize
outfiles = []
for chrom in self.chroms:
tmp_output = pybedtools.BedTool._tmp()
windows = self.make_windows(chrom)
outfile = os.path.join(
outdir,
'{basename}.{chrom}.{windowsize}.{metric}'.format(**locals())
+ '.npz')
cmds = [
'bigWigAverageOverBed',
bigwig,
windows,
tmp_output]
os.system(' '.join(cmds))
names = ['name', 'size', 'covered', 'sum', 'mean0', 'mean']
df = pandas.read_table(tmp_output, names=names)
x = df.size.cumsum() - df.size / 2
y = df[metric]
np.savez(outfile, x=x, y=y)
outfiles.append(outfile)
del x, y, df
return outfiles |
<SYSTEM_TASK:>
Compares two genomic signal objects and outputs results as a bedGraph file.
<END_TASK>
<USER_TASK:>
Description:
def compare(signal1, signal2, features, outfn, comparefunc=np.subtract,
batchsize=5000, array_kwargs=None, verbose=False):
"""
Compares two genomic signal objects and outputs results as a bedGraph file.
Can be used for entire genome-wide comparisons due to its parallel nature.
Typical usage would be to create genome-wide windows of equal size to
provide as `features`::
windowsize = 10000
features = pybedtools.BedTool().window_maker(
genome='hg19', w=windowsize)
You will usually want to choose bins for the array based on the final
resolution you would like. Say you would like 10-bp bins in the final
bedGraph; using the example above you would use array_kwargs={'bins':
windowsize/10}. Or, for single-bp resolution (beware: file will be large),
use {'bins': windowsize}.
Here's how it works. This function:
* Takes `batchsize` features at a time from `features`
* Constructs normalized (RPMMR) arrays in parallel for each input
genomic signal object for those `batchsize` features
* Applies `comparefunc` (np.subtract by default) to the arrays to get
a "compared" (e.g., difference matrix by default) for the `batchsize`
features.
* For each row in this matrix, it outputs each nonzero column as
a bedGraph format line in `outfn`
`comparefunc` is a function with the signature::
def f(x, y):
return z
where `x` and `y` will be arrays for `signal1` and `signal2` (normalized to
RPMMR) and `z` is a new array. By default this is np.subtract, but another
common `comparefunc` might be a log2-fold-change function::
def lfc(x, y):
return np.log2(x / y)
:param signal1: A genomic_signal object
:param signal2: Another genomic_signal object
:param features: An iterable of pybedtools.Interval objects. A list will be
created for every `batchsize` features, so you need enough memory for
this.
:param comparefunc: Function to use to compare arrays (default is
np.subtract)
:param outfn: String filename to write bedGraph file
:param batchsize: Number of features (each with length `windowsize` bp) to
process at a time
:param array_kwargs: Kwargs passed directly to genomic_signal.array. Needs
`processes` and `chunksize` if you want parallel processing
:param verbose: Be noisy
""" |
fout = open(outfn, 'w')
fout.write('track type=bedGraph\n')
i = 0
this_batch = []
for feature in features:
if i <= batchsize:
this_batch.append(feature)
i += 1
continue
if verbose:
print 'working on batch of %s' % batchsize
sys.stdout.flush()
arr1 = signal1.array(this_batch, **array_kwargs).astype(float)
arr2 = signal2.array(this_batch, **array_kwargs).astype(float)
arr1 /= signal1.million_mapped_reads()
arr2 /= signal2.million_mapped_reads()
compared = comparefunc(arr1, arr2)
for feature, row in itertools.izip(this_batch, compared):
start = feature.start
bins = len(row)
binsize = len(feature) / len(row)
# Quickly move on if nothing here. speed increase prob best for
# sparse data
if sum(row) == 0:
continue
for j in range(0, len(row)):
score = row[j]
stop = start + binsize
if score != 0:
fout.write('\t'.join([
feature.chrom,
str(start),
str(stop),
str(score)]) + '\n')
start = start + binsize
this_batch = []
i = 0
fout.close() |
<SYSTEM_TASK:>
Create h5py File object with cache specification
<END_TASK>
<USER_TASK:>
Description:
def File(name, mode='a', chunk_cache_mem_size=1024**2, w0=0.75, n_cache_chunks=None, **kwds):
"""Create h5py File object with cache specification
This function is basically just a wrapper around the usual h5py.File constructor,
but accepts two additional keywords:
Parameters
----------
name : str
mode : str
**kwds : dict (as keywords)
Standard h5py.File arguments, passed to its constructor
chunk_cache_mem_size : int
Number of bytes to use for the chunk cache. Defaults to 1024**2 (1MB), which
is also the default for h5py.File -- though it cannot be changed through the
standard interface.
w0 : float between 0.0 and 1.0
Eviction parameter. Defaults to 0.75. "If the application will access the
same data more than once, w0 should be set closer to 0, and if the application
does not, w0 should be set closer to 1."
--- <https://www.hdfgroup.org/HDF5/doc/Advanced/Chunking/>
n_cache_chunks : int
Number of chunks to be kept in cache at a time. Defaults to the (smallest
integer greater than) the square root of the number of elements that can fit
into memory. This is just used for the number of slots (nslots) maintained
in the cache metadata, so it can be set larger than needed with little cost.
""" |
import sys
import numpy as np
import h5py
name = name.encode(sys.getfilesystemencoding())
open(name, mode).close() # Just make sure the file exists
if mode in [m+b for m in ['w', 'w+', 'r+', 'a', 'a+'] for b in ['', 'b']]:
mode = h5py.h5f.ACC_RDWR
else:
mode = h5py.h5f.ACC_RDONLY
if 'dtype' in kwds:
bytes_per_object = np.dtype(kwds['dtype']).itemsize
else:
bytes_per_object = np.dtype(np.float).itemsize # assume float as most likely
if not n_cache_chunks:
n_cache_chunks = int(np.ceil(np.sqrt(chunk_cache_mem_size / bytes_per_object)))
nslots = _find_next_prime(100 * n_cache_chunks)
propfaid = h5py.h5p.create(h5py.h5p.FILE_ACCESS)
settings = list(propfaid.get_cache())
settings[1:] = (nslots, chunk_cache_mem_size, w0)
propfaid.set_cache(*settings)
return h5py.File(h5py.h5f.open(name, flags=mode, fapl=propfaid), **kwds) |
<SYSTEM_TASK:>
Save data from a Chipseq object.
<END_TASK>
<USER_TASK:>
Description:
def save(c, prefix, relative_paths=True):
"""
Save data from a Chipseq object.
Parameters
----------
c : Chipseq object
Chipseq object, most likely after calling the `diffed_array` method
prefix : str
Prefix, including any leading directory paths, to save the data.
relative_paths : bool
If True (default), then the path names in the `prefix.info` file will
be relative to `prefix`. Otherwise, they will be absolute.
The following files will be created:
:prefix.intervals:
A BED file (or GFF, GTF, or VCF as appropriate) of the features used for the array
:prefix.info:
A YAML-format file indicating the IP and control BAM files, any array
kwargs, the database filename, and any minibrowser local coverage args.
These are all needed to reconstruct a new Chipseq object. Path names
will be relative to `prefix`.
:prefix.npz:
A NumPy .npz file with keys 'diffed_array', 'ip_array', and 'control_array'
""" |
dirname = os.path.dirname(prefix)
pybedtools.BedTool(c.features).saveas(prefix + '.intervals')
def usepath(f):
if relative_paths:
return os.path.relpath(f, start=dirname)
else:
return os.path.abspath(f)
with open(prefix + '.info', 'w') as fout:
info = {
'ip_bam': usepath(c.ip.fn),
'control_bam': usepath(c.control.fn),
'array_kwargs': c.array_kwargs,
'dbfn': usepath(c.dbfn),
'browser_local_coverage_kwargs': c.browser_local_coverage_kwargs,
'relative_paths': relative_paths,
}
fout.write(yaml.dump(info, default_flow_style=False))
np.savez(
prefix,
diffed_array=c.diffed_array,
ip_array=c.ip_array,
control_array=c.control_array
) |
<SYSTEM_TASK:>
Streamlined version of matplotlib's `xcorr`, without the plots.
<END_TASK>
<USER_TASK:>
Description:
def xcorr(x, y, maxlags):
"""
Streamlined version of matplotlib's `xcorr`, without the plots.
:param x, y: NumPy arrays to cross-correlate
:param maxlags: Max number of lags; result will be `2*maxlags+1` in length
""" |
xlen = len(x)
ylen = len(y)
assert xlen == ylen
c = np.correlate(x, y, mode=2)
# normalize
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
lags = np.arange(-maxlags, maxlags + 1)
c = c[xlen - 1 - maxlags:xlen + maxlags]
return c |
<SYSTEM_TASK:>
Plot the scaled ChIP-seq data.
<END_TASK>
<USER_TASK:>
Description:
def plot(self, x, row_order=None, imshow_kwargs=None, strip=True):
"""
Plot the scaled ChIP-seq data.
:param x: X-axis to use (e.g, for TSS +/- 1kb with 100 bins, this would
be `np.linspace(-1000, 1000, 100)`)
:param row_order: Array-like object containing row order -- typically
the result of an `np.argsort` call.
:param strip: Include axes along the left side with points that can be
clicked to spawn a minibrowser for that feature.
""" |
nrows = self.diffed_array.shape[0]
if row_order is None:
row_order = np.arange(nrows)
extent = (min(x), max(x), 0, nrows)
axes_info = metaseq.plotutils.matrix_and_line_shell(strip=strip)
fig, matrix_ax, line_ax, strip_ax, cbar_ax = axes_info
_imshow_kwargs = dict(
aspect='auto', extent=extent, interpolation='nearest')
if imshow_kwargs:
_imshow_kwargs.update(imshow_kwargs)
if 'cmap' not in _imshow_kwargs:
_imshow_kwargs['cmap'] = metaseq.colormap_adjust.smart_colormap(
self.diffed_array.min(),
self.diffed_array.max()
)
mappable = matrix_ax.imshow(
self.diffed_array[row_order],
**_imshow_kwargs)
plt.colorbar(mappable, cbar_ax)
line_ax.plot(x, self.diffed_array.mean(axis=0))
if strip_ax:
line, = strip_ax.plot(np.zeros((nrows,)), np.arange(nrows) + 0.5,
**self._strip_kwargs)
line.features = self.features
line.ind = row_order
matrix_ax.axis('tight')
if strip_ax:
strip_ax.xaxis.set_visible(False)
matrix_ax.yaxis.set_visible(False)
matrix_ax.xaxis.set_visible(False)
if self.db:
self.minibrowser = GeneChipseqMiniBrowser(
[self.ip, self.control],
db=self.db,
plotting_kwargs=self.browser_plotting_kwargs,
local_coverage_kwargs=self.browser_local_coverage_kwargs)
else:
self.minibrowser = SignalChipseqMiniBrowser(
[self.ip, self.control],
plotting_kwargs=self.browser_plotting_kwargs,
local_coverage_kwargs=self.browser_local_coverage_kwargs)
fig.canvas.mpl_connect('pick_event', self.callback)
self.fig = fig
self.axes = {
'matrix_ax': matrix_ax,
'strip_ax': strip_ax,
'line_ax': line_ax,
'cbar_ax': cbar_ax
} |
<SYSTEM_TASK:>
Callback function to spawn a mini-browser when a feature is clicked.
<END_TASK>
<USER_TASK:>
Description:
def callback(self, event):
"""
Callback function to spawn a mini-browser when a feature is clicked.
""" |
artist = event.artist
ind = artist.ind
limit = 5
browser = True
if len(event.ind) > limit:
print "more than %s genes selected; not spawning browsers" % limit
browser = False
for i in event.ind:
feature = artist.features[ind[i]]
print feature,
if browser:
self.minibrowser.plot(feature) |
<SYSTEM_TASK:>
Remove all the registered observers for the given event name.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, event=None):
"""
Remove all the registered observers for the given event name.
Arguments:
event (str): event name to remove.
""" |
observers = self._pool.get(event)
if observers:
self._pool[event] = [] |
<SYSTEM_TASK:>
Triggers event observers for the given event name,
<END_TASK>
<USER_TASK:>
Description:
def trigger(self, event, *args, **kw):
"""
Triggers event observers for the given event name,
passing custom variadic arguments.
""" |
observers = self._pool.get(event)
# If no observers registered for the event, do no-op
if not observers or len(observers) == 0:
return None
# Trigger observers coroutines in FIFO sequentially
for fn in observers:
# Review: perhaps this should not wait
yield from fn(*args, **kw) |
<SYSTEM_TASK:>
Repeatedly call `coro` coroutine function until `coro_test` returns `True`.
<END_TASK>
<USER_TASK:>
Description:
def until(coro, coro_test, assert_coro=None, *args, **kw):
"""
Repeatedly call `coro` coroutine function until `coro_test` returns `True`.
This function is the inverse of `paco.whilst()`.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to execute.
coro_test (coroutinefunction): coroutine function to test.
assert_coro (coroutinefunction): optional assertion coroutine used
to determine if the test passed or not.
*args (mixed): optional variadic arguments to pass to `coro` function.
Raises:
TypeError: if input arguments are invalid.
Returns:
list: result values returned by `coro`.
Usage::
calls = 0
async def task():
nonlocal calls
calls += 1
return calls
async def calls_gt_4():
return calls > 4
await paco.until(task, calls_gt_4)
# => [1, 2, 3, 4, 5]
""" |
@asyncio.coroutine
def assert_coro(value):
return not value
return (yield from whilst(coro, coro_test,
assert_coro=assert_coro, *args, **kw)) |
<SYSTEM_TASK:>
Creates a function that accepts one or more arguments of a function and
<END_TASK>
<USER_TASK:>
Description:
def curry(arity_or_fn=None, ignore_kwargs=False, evaluator=None, *args, **kw):
"""
Creates a function that accepts one or more arguments of a function and
either invokes func returning its result if at least arity number of
arguments have been provided, or returns a function that accepts the
remaining function arguments until the function arity is satisfied.
This function is overloaded: you can pass a function or coroutine function
as first argument or an `int` indicating the explicit function arity.
Function arity can be inferred via function signature or explicitly
passed via `arity_or_fn` param.
You can optionally ignore keyword based arguments as well passsing the
`ignore_kwargs` param with `True` value.
This function can be used as decorator.
Arguments:
arity_or_fn (int|function|coroutinefunction): function arity to curry
or function to curry.
ignore_kwargs (bool): ignore keyword arguments as arity to satisfy
during curry.
evaluator (function): use a custom arity evaluator function.
*args (mixed): mixed variadic arguments for partial function
application.
*kwargs (mixed): keyword variadic arguments for partial function
application.
Raises:
TypeError: if function is not a function or a coroutine function.
Returns:
function or coroutinefunction: function will be returned until all the
function arity is satisfied, where a coroutine function will be
returned instead.
Usage::
# Function signature inferred function arity
@paco.curry
async def task(x, y, z=0):
return x * y + z
await task(4)(4)(z=8)
# => 24
# User defined function arity
@paco.curry(4)
async def task(x, y, *args, **kw):
return x * y + args[0] * args[1]
await task(4)(4)(8)(8)
# => 80
# Ignore keyword arguments from arity
@paco.curry(ignore_kwargs=True)
async def task(x, y, z=0):
return x * y
await task(4)(4)
# => 16
""" |
def isvalidarg(x):
return all([
x.kind != x.VAR_KEYWORD,
x.kind != x.VAR_POSITIONAL,
any([
not ignore_kwargs,
ignore_kwargs and x.default == x.empty
])
])
def params(fn):
return inspect.signature(fn).parameters.values()
def infer_arity(fn):
return len([x for x in params(fn) if isvalidarg(x)])
def merge_args(acc, args, kw):
_args, _kw = acc
_args = _args + args
_kw = _kw or {}
_kw.update(kw)
return _args, _kw
def currier(arity, acc, fn, *args, **kw):
"""
Function either continues curring of the arguments
or executes function if desired arguments have being collected.
If function curried is variadic then execution without arguments
will finish curring and trigger the function
"""
# Merge call arguments with accumulated ones
_args, _kw = merge_args(acc, args, kw)
# Get current function call accumulated arity
current_arity = len(args)
# Count keyword params as arity to satisfy, if required
if not ignore_kwargs:
current_arity += len(kw)
# Decrease function arity to satisfy
arity -= current_arity
# Use user-defined custom arity evaluator strategy, if present
currify = evaluator and evaluator(acc, fn)
# If arity is not satisfied, return recursive partial function
if currify is not False and arity > 0:
return functools.partial(currier, arity, (_args, _kw), fn)
# If arity is satisfied, instanciate coroutine and return it
return fn(*_args, **_kw)
def wrapper(fn, *args, **kw):
if not iscallable(fn):
raise TypeError('paco: first argument must a coroutine function, '
'a function or a method.')
# Infer function arity, if required
arity = (arity_or_fn if isinstance(arity_or_fn, int)
else infer_arity(fn))
# Wraps function as coroutine function, if needed.
fn = wraps(fn) if isfunc(fn) else fn
# Otherwise return recursive currier function
return currier(arity, (args, kw), fn, *args, **kw) if arity > 0 else fn
# Return currier function or decorator wrapper
return (wrapper(arity_or_fn, *args, **kw)
if iscallable(arity_or_fn)
else wrapper) |
<SYSTEM_TASK:>
Creates a coroutine function based on the composition of the passed
<END_TASK>
<USER_TASK:>
Description:
def compose(*coros):
"""
Creates a coroutine function based on the composition of the passed
coroutine functions.
Each function consumes the yielded result of the coroutine that follows.
Composing coroutine functions f(), g(), and h() would produce
the result of f(g(h())).
Arguments:
*coros (coroutinefunction): variadic coroutine functions to compose.
Raises:
RuntimeError: if cannot execute a coroutine function.
Returns:
coroutinefunction
Usage::
async def sum_1(num):
return num + 1
async def mul_2(num):
return num * 2
coro = paco.compose(sum_1, mul_2, sum_1)
await coro(2)
# => 7
""" |
# Make list to inherit built-in type methods
coros = list(coros)
@asyncio.coroutine
def reducer(acc, coro):
return (yield from coro(acc))
@asyncio.coroutine
def wrapper(acc):
return (yield from reduce(reducer, coros,
initializer=acc, right=True))
return wrapper |
<SYSTEM_TASK:>
Prompts user for API keys, adds them in an .ini file stored in the same
<END_TASK>
<USER_TASK:>
Description:
def add_config():
"""
Prompts user for API keys, adds them in an .ini file stored in the same
location as that of the script
""" |
genius_key = input('Enter Genius key : ')
bing_key = input('Enter Bing key : ')
CONFIG['keys']['bing_key'] = bing_key
CONFIG['keys']['genius_key'] = genius_key
with open(config_path, 'w') as configfile:
CONFIG.write(configfile) |
<SYSTEM_TASK:>
Return a binary string containing the raw HID report descriptor.
<END_TASK>
<USER_TASK:>
Description:
def getRawReportDescriptor(self):
"""
Return a binary string containing the raw HID report descriptor.
""" |
descriptor = _hidraw_report_descriptor()
size = ctypes.c_uint()
self._ioctl(_HIDIOCGRDESCSIZE, size, True)
descriptor.size = size
self._ioctl(_HIDIOCGRDESC, descriptor, True)
return ''.join(chr(x) for x in descriptor.value[:size.value]) |
<SYSTEM_TASK:>
Returns device name as an unicode object.
<END_TASK>
<USER_TASK:>
Description:
def getName(self, length=512):
"""
Returns device name as an unicode object.
""" |
name = ctypes.create_string_buffer(length)
self._ioctl(_HIDIOCGRAWNAME(length), name, True)
return name.value.decode('UTF-8') |
<SYSTEM_TASK:>
Returns device physical address as a string.
<END_TASK>
<USER_TASK:>
Description:
def getPhysicalAddress(self, length=512):
"""
Returns device physical address as a string.
See hidraw documentation for value signification, as it depends on
device's bus type.
""" |
name = ctypes.create_string_buffer(length)
self._ioctl(_HIDIOCGRAWPHYS(length), name, True)
return name.value |
<SYSTEM_TASK:>
Returns `True` if every element in a given iterable satisfies the coroutine
<END_TASK>
<USER_TASK:>
Description:
def every(coro, iterable, limit=1, loop=None):
"""
Returns `True` if every element in a given iterable satisfies the coroutine
asynchronous test.
If any iteratee coroutine call returns `False`, the process is inmediately
stopped, and `False` will be returned.
You can increase the concurrency limit for a fast race condition scenario.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): coroutine function to call with values
to reduce.
iterable (iterable): an iterable collection yielding
coroutines functions.
limit (int): max concurrency execution limit. Use ``0`` for no limit.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
bool: `True` if all the values passes the test, otherwise `False`.
Usage::
async def gt_10(num):
return num > 10
await paco.every(gt_10, [1, 2, 3, 11])
# => False
await paco.every(gt_10, [11, 12, 13])
# => True
""" |
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
passes = True
# Handle empty iterables
if len(iterable) == 0:
return passes
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Tester function to guarantee the file is canceled.
@asyncio.coroutine
def tester(element):
nonlocal passes
if not passes:
return None
if not (yield from coro(element)):
# Flag as not test passed
passes = False
# Force ignoring pending coroutines
pool.cancel()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(partial(tester, element))
# Wait until all coroutines finish
yield from pool.run()
return passes |
<SYSTEM_TASK:>
Return a future aggregating results from the given coroutine objects
<END_TASK>
<USER_TASK:>
Description:
def gather(*coros_or_futures, limit=0, loop=None, timeout=None,
preserve_order=False, return_exceptions=False):
"""
Return a future aggregating results from the given coroutine objects
with a concurrency execution limit.
If all the tasks are done successfully, the returned future’s result is
the list of results (in the order of the original sequence,
not necessarily the order of results arrival).
If return_exceptions is `True`, exceptions in the tasks are treated the
same as successful results, and gathered in the result list; otherwise,
the first raised exception will be immediately propagated to the
returned future.
All futures must share the same event loop.
This functions is mostly compatible with Python standard
``asyncio.gather``, but providing ordered results and concurrency control
flow.
This function is a coroutine.
Arguments:
*coros_or_futures (coroutines|list): an iterable collection yielding
coroutines functions or futures.
limit (int): max concurrency limit. Use ``0`` for no limit.
timeout can be used to control the maximum number
of seconds to wait before returning. timeout can be an int or
float. If timeout is not specified or None, there is no limit to
the wait time.
preserve_order (bool): preserves results order.
return_exceptions (bool): returns exceptions as valid results.
loop (asyncio.BaseEventLoop): optional event loop to use.
Returns:
list: coroutines returned results.
Usage::
async def sum(x, y):
return x + y
await paco.gather(
sum(1, 2),
sum(None, 'str'),
return_exceptions=True)
# => [3, TypeError("unsupported operand type(s) for +: 'NoneType' and 'str'")] # noqa
""" |
# If no coroutines to schedule, return empty list (as Python stdlib)
if len(coros_or_futures) == 0:
return []
# Support iterable as first argument for better interoperability
if len(coros_or_futures) == 1 and isiter(coros_or_futures[0]):
coros_or_futures = coros_or_futures[0]
# Pre-initialize results
results = [None] * len(coros_or_futures) if preserve_order else []
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Iterate and attach coroutine for defer scheduling
for index, coro in enumerate(coros_or_futures):
# Validate coroutine object
if asyncio.iscoroutinefunction(coro):
coro = coro()
if not asyncio.iscoroutine(coro):
raise TypeError(
'paco: only coroutines or coroutine functions allowed')
# Add coroutine to the executor pool
pool.add(collect(coro, index, results,
preserve_order=preserve_order,
return_exceptions=return_exceptions))
# Wait until all the tasks finishes
yield from pool.run(timeout=timeout, return_exceptions=return_exceptions)
# Returns aggregated results
return results |
<SYSTEM_TASK:>
Wraps a given coroutine function, that when executed, if it takes more
<END_TASK>
<USER_TASK:>
Description:
def timeout(coro, timeout=None, loop=None):
"""
Wraps a given coroutine function, that when executed, if it takes more
than the given timeout in seconds to execute, it will be canceled and
raise an `asyncio.TimeoutError`.
This function is equivalent to Python standard
`asyncio.wait_for()` function.
This function can be used as decorator.
Arguments:
coro (coroutinefunction|coroutine): coroutine to wrap.
timeout (int|float): max wait timeout in seconds.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
coroutinefunction: wrapper coroutine function.
Usage::
await paco.timeout(coro, timeout=10)
""" |
@asyncio.coroutine
def _timeout(coro):
return (yield from asyncio.wait_for(coro, timeout, loop=loop))
@asyncio.coroutine
def wrapper(*args, **kw):
return (yield from _timeout(coro(*args, **kw)))
return _timeout(coro) if asyncio.iscoroutine(coro) else wrapper |
<SYSTEM_TASK:>
Runs coroutines from a given iterable concurrently without waiting until
<END_TASK>
<USER_TASK:>
Description:
def race(iterable, loop=None, timeout=None, *args, **kw):
"""
Runs coroutines from a given iterable concurrently without waiting until
the previous one has completed.
Once any of the tasks completes, the main coroutine
is immediately resolved, yielding the first resolved value.
All coroutines will be executed in the same loop.
This function is a coroutine.
Arguments:
iterable (iterable): an iterable collection yielding
coroutines functions or coroutine objects.
*args (mixed): mixed variadic arguments to pass to coroutines.
loop (asyncio.BaseEventLoop): optional event loop to use.
timeout (int|float): timeout can be used to control the maximum number
of seconds to wait before returning. timeout can be an int or
float. If timeout is not specified or None, there is no limit to
the wait time.
*args (mixed): optional variadic argument to pass to coroutine
function, if provided.
Raises:
TypeError: if ``iterable`` argument is not iterable.
asyncio.TimoutError: if wait timeout is exceeded.
Returns:
filtered values (list): ordered list of resultant values.
Usage::
async def coro1():
await asyncio.sleep(2)
return 1
async def coro2():
return 2
async def coro3():
await asyncio.sleep(1)
return 3
await paco.race([coro1, coro2, coro3])
# => 2
""" |
assert_iter(iterable=iterable)
# Store coros and internal state
coros = []
resolved = False
result = None
# Resolve first yielded data from coroutine and stop pending ones
@asyncio.coroutine
def resolver(index, coro):
nonlocal result
nonlocal resolved
value = yield from coro
if not resolved:
resolved = True
# Flag as not test passed
result = value
# Force canceling pending coroutines
for _index, future in enumerate(coros):
if _index != index:
future.cancel()
# Iterate and attach coroutine for defer scheduling
for index, coro in enumerate(iterable):
# Validate yielded object
isfunction = asyncio.iscoroutinefunction(coro)
if not isfunction and not asyncio.iscoroutine(coro):
raise TypeError(
'paco: coro must be a coroutine or coroutine function')
# Init coroutine function, if required
if isfunction:
coro = coro(*args, **kw)
# Store future tasks
coros.append(ensure_future(resolver(index, coro)))
# Run coroutines concurrently
yield from asyncio.wait(coros, timeout=timeout, loop=loop)
return result |
<SYSTEM_TASK:>
Overload a given callable object to be used with ``|`` operator
<END_TASK>
<USER_TASK:>
Description:
def overload(fn):
"""
Overload a given callable object to be used with ``|`` operator
overloading.
This is especially used for composing a pipeline of
transformation over a single data set.
Arguments:
fn (function): target function to decorate.
Raises:
TypeError: if function or coroutine function is not provided.
Returns:
function: decorated function
""" |
if not isfunction(fn):
raise TypeError('paco: fn must be a callable object')
spec = getargspec(fn)
args = spec.args
if not spec.varargs and (len(args) < 2 or args[1] != 'iterable'):
raise ValueError('paco: invalid function signature or arity')
@functools.wraps(fn)
def decorator(*args, **kw):
# Check function arity
if len(args) < 2:
return PipeOverloader(fn, args, kw)
# Otherwise, behave like a normal wrapper
return fn(*args, **kw)
return decorator |
<SYSTEM_TASK:>
Helper function to consume a synchronous or asynchronous generator.
<END_TASK>
<USER_TASK:>
Description:
def consume(generator): # pragma: no cover
"""
Helper function to consume a synchronous or asynchronous generator.
Arguments:
generator (generator|asyncgenerator): generator to consume.
Returns:
list
""" |
# If synchronous generator, just consume and return as list
if hasattr(generator, '__next__'):
return list(generator)
if not PY_35:
raise RuntimeError(
'paco: asynchronous iterator protocol not supported')
# If asynchronous generator, consume it generator protocol manually
buf = []
while True:
try:
buf.append((yield from generator.__anext__()))
except StopAsyncIteration: # noqa
break
return buf |
<SYSTEM_TASK:>
Returns `True` if the given value is a function or method object.
<END_TASK>
<USER_TASK:>
Description:
def isfunc(x):
"""
Returns `True` if the given value is a function or method object.
Arguments:
x (mixed): value to check.
Returns:
bool
""" |
return any([
inspect.isfunction(x) and not asyncio.iscoroutinefunction(x),
inspect.ismethod(x) and not asyncio.iscoroutinefunction(x)
]) |
<SYSTEM_TASK:>
Asserts if a given values are a coroutine function.
<END_TASK>
<USER_TASK:>
Description:
def assert_corofunction(**kw):
"""
Asserts if a given values are a coroutine function.
Arguments:
**kw (mixed): value to check if it is an iterable.
Raises:
TypeError: if assertion fails.
""" |
for name, value in kw.items():
if not asyncio.iscoroutinefunction(value):
raise TypeError(
'paco: {} must be a coroutine function'.format(name)) |
<SYSTEM_TASK:>
Asserts if a given values implements a valid iterable interface.
<END_TASK>
<USER_TASK:>
Description:
def assert_iter(**kw):
"""
Asserts if a given values implements a valid iterable interface.
Arguments:
**kw (mixed): value to check if it is an iterable.
Raises:
TypeError: if assertion fails.
""" |
for name, value in kw.items():
if not isiter(value):
raise TypeError(
'paco: {} must be an iterable object'.format(name)) |
<SYSTEM_TASK:>
Schedules the execution of a coroutine function every `x` amount of
<END_TASK>
<USER_TASK:>
Description:
def interval(coro, interval=1, times=None, loop=None):
"""
Schedules the execution of a coroutine function every `x` amount of
seconds.
The function returns an `asyncio.Task`, which implements also an
`asyncio.Future` interface, allowing the user to cancel the execution
cycle.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
interval (int/float): number of seconds to repeat the coroutine
execution.
times (int): optional maximum time of executions. Infinite by default.
loop (asyncio.BaseEventLoop, optional): loop to run.
Defaults to asyncio.get_event_loop().
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
future (asyncio.Task): coroutine wrapped as task future.
Useful for cancellation and state checking.
Usage::
# Usage as function
future = paco.interval(coro, 1)
# Cancel it after a while...
await asyncio.sleep(5)
future.cancel()
# Usage as decorator
@paco.interval(10)
async def metrics():
await send_metrics()
future = await metrics()
""" |
assert_corofunction(coro=coro)
# Store maximum allowed number of calls
times = int(times or 0) or float('inf')
@asyncio.coroutine
def schedule(times, *args, **kw):
while times > 0:
# Decrement times counter
times -= 1
# Schedule coroutine
yield from coro(*args, **kw)
yield from asyncio.sleep(interval)
def wrapper(*args, **kw):
return ensure_future(schedule(times, *args, **kw), loop=loop)
return wrapper |
<SYSTEM_TASK:>
Add a grammar rules to _self.rules_, _self.rule2func_,
<END_TASK>
<USER_TASK:>
Description:
def addRule(self, doc, func, _preprocess=True):
"""Add a grammar rules to _self.rules_, _self.rule2func_,
and _self.rule2name_
Comments, lines starting with # and blank lines are stripped from
doc. We also allow limited form of * and + when there it is of
the RHS has a single item, e.g.
stmts ::= stmt+
""" |
fn = func
# remove blanks lines and comment lines, e.g. lines starting with "#"
doc = os.linesep.join([s for s in doc.splitlines() if s and not re.match("^\s*#", s)])
rules = doc.split()
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
if _preprocess:
rule, fn = self.preprocess(rule, func)
# Handle a stripped-down form of *, +, and ?:
# allow only one nonterminal on the right-hand side
if len(rule[1]) == 1:
if rule[1][0] == rule[0]:
raise TypeError("Complete recursive rule %s" % rule2str(rule))
repeat = rule[1][-1][-1]
if repeat in ('*', '+', '?'):
nt = rule[1][-1][:-1]
if repeat == '?':
new_rule_pair = [rule[0], list((nt,))]
self.optional_nt.add(rule[0])
else:
self.list_like_nt.add(rule[0])
new_rule_pair = [rule[0], [rule[0]] + list((nt,))]
new_rule = rule2str(new_rule_pair)
self.addRule(new_rule, func, _preprocess)
if repeat == '+':
second_rule_pair = (lhs, (nt,))
else:
second_rule_pair = (lhs, tuple())
new_rule = rule2str(second_rule_pair)
self.addRule(new_rule, func, _preprocess)
continue
if lhs in self.rules:
if rule in self.rules[lhs]:
if 'dups' in self.debug and self.debug['dups']:
self.duplicate_rule(rule)
continue
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = True
# Note: In empty rules, i.e. len(rule[1] == 0, we don't
# call reductions on explicitly. Instead it is computed
# implicitly.
if self.profile_info is not None and len(rule[1]) > 0:
rule_str = self.reduce_string(rule)
if rule_str not in self.profile_info:
self.profile_info[rule_str] = 0
pass
return |
<SYSTEM_TASK:>
Remove a grammar rules from _self.rules_, _self.rule2func_,
<END_TASK>
<USER_TASK:>
Description:
def remove_rules(self, doc):
"""Remove a grammar rules from _self.rules_, _self.rule2func_,
and _self.rule2name_
""" |
# remove blanks lines and comment lines, e.g. lines starting with "#"
doc = os.linesep.join([s for s in doc.splitlines() if s and not re.match("^\s*#", s)])
rules = doc.split()
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
if lhs not in self.rules:
return
if rule in self.rules[lhs]:
self.rules[lhs].remove(rule)
del self.rule2func[rule]
del self.rule2name[rule]
self.ruleschanged = True
# If we are profiling, remove this rule from that as well
if self.profile_info is not None and len(rule[1]) > 0:
rule_str = self.reduce_string(rule)
if rule_str and rule_str in self.profile_info:
del self.profile_info[rule_str]
pass
pass
pass
return |
<SYSTEM_TASK:>
Show the stacks of completed symbols.
<END_TASK>
<USER_TASK:>
Description:
def errorstack(self, tokens, i, full=False):
"""Show the stacks of completed symbols.
We get this by inspecting the current transitions
possible and from that extracting the set of states
we are in, and from there we look at the set of
symbols before the "dot". If full is True, we
show the entire rule with the dot placement.
Otherwise just the rule up to the dot.
""" |
print("\n-- Stacks of completed symbols:")
states = [s for s in self.edges.values() if s]
# States now has the set of states we are in
state_stack = set()
for state in states:
# Find rules which can follow, but keep only
# the part before the dot
for rule, dot in self.states[state].items:
lhs, rhs = rule
if dot > 0:
if full:
state_stack.add(
"%s ::= %s . %s" %
(lhs,
' '.join(rhs[:dot]),
' '.join(rhs[dot:])))
else:
state_stack.add(
"%s ::= %s" %
(lhs,
' '.join(rhs[:dot])))
pass
pass
pass
for stack in sorted(state_stack):
print(stack) |
<SYSTEM_TASK:>
This is the main entry point from outside.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, tokens, debug=None):
"""This is the main entry point from outside.
Passing in a debug dictionary changes the default debug
setting.
""" |
self.tokens = tokens
if debug:
self.debug = debug
sets = [ [(1, 0), (2, 0)] ]
self.links = {}
if self.ruleschanged:
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = False
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
for i in range(len(tokens)):
sets.append([])
if sets[i] == []:
break
self.makeSet(tokens, sets, i)
else:
sets.append([])
self.makeSet(None, sets, len(tokens))
finalitem = (self.finalState(tokens), 0)
if finalitem not in sets[-2]:
if len(tokens) > 0:
if self.debug.get('errorstack', False):
self.errorstack(tokens, i-1, str(self.debug['errorstack']) == 'full')
self.error(tokens, i-1)
else:
self.error(None, None)
if self.profile_info is not None:
self.dump_profile_info()
return self.buildTree(self._START, finalitem,
tokens, len(sets)-2) |
<SYSTEM_TASK:>
Bump count of the number of times _rule_ was used
<END_TASK>
<USER_TASK:>
Description:
def profile_rule(self, rule):
"""Bump count of the number of times _rule_ was used""" |
rule_str = self.reduce_string(rule)
if rule_str not in self.profile_info:
self.profile_info[rule_str] = 1
else:
self.profile_info[rule_str] += 1 |
<SYSTEM_TASK:>
Show the accumulated results of how many times each rule was used
<END_TASK>
<USER_TASK:>
Description:
def get_profile_info(self):
"""Show the accumulated results of how many times each rule was used""" |
return sorted(self.profile_info.items(),
key=lambda kv: kv[1],
reverse=False)
return |
<SYSTEM_TASK:>
Partial function implementation designed
<END_TASK>
<USER_TASK:>
Description:
def partial(coro, *args, **kw):
"""
Partial function implementation designed
for coroutines, allowing variadic input arguments.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
*args (mixed): mixed variadic arguments for partial application.
Raises:
TypeError: if ``coro`` is not a coroutine function.
Returns:
coroutinefunction
Usage::
async def pow(x, y):
return x ** y
pow_2 = paco.partial(pow, 2)
await pow_2(4)
# => 16
""" |
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper(*_args, **_kw):
call_args = args + _args
kw.update(_kw)
return (yield from coro(*call_args, **kw))
return wrapper |
<SYSTEM_TASK:>
evaluate simple expression
<END_TASK>
<USER_TASK:>
Description:
def eval_expr(expr_str, show_tokens=False, showast=False,
showgrammar=False, compile_mode='exec'):
"""
evaluate simple expression
""" |
parser_debug = {'rules': False, 'transition': False,
'reduce': showgrammar,
'errorstack': True, 'context': True }
parsed = parse_expr(expr_str, show_tokens=show_tokens,
parser_debug=parser_debug)
if showast:
print(parsed)
assert parsed == 'expr', 'Should have parsed grammar start'
evaluator = ExprEvaluator()
# What we've been waiting for: Generate source from AST!
return evaluator.traverse(parsed) |
<SYSTEM_TASK:>
Concurrently maps values yielded from an iterable, passing then
<END_TASK>
<USER_TASK:>
Description:
def map(coro, iterable, limit=0, loop=None, timeout=None,
return_exceptions=False, *args, **kw):
"""
Concurrently maps values yielded from an iterable, passing then
into an asynchronous coroutine function.
Mapped values will be returned as list.
Items order will be preserved based on origin iterable order.
Concurrency level can be configurable via ``limit`` param.
This function is the asynchronous equivalent port Python built-in
`map()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutinefunction): map coroutine function to use.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
limit (int): max concurrency limit. Use ``0`` for no limit.
loop (asyncio.BaseEventLoop): optional event loop to use.
timeout (int|float): timeout can be used to control the maximum number
of seconds to wait before returning. timeout can be an int or
float. If timeout is not specified or None, there is no limit to
the wait time.
return_exceptions (bool): returns exceptions as valid results.
*args (mixed): optional variadic arguments to be passed to the
coroutine map function.
Returns:
list: ordered list of values yielded by coroutines
Usage::
async def mul_2(num):
return num * 2
await paco.map(mul_2, [1, 2, 3, 4, 5])
# => [2, 4, 6, 8, 10]
""" |
# Call each iterable but collecting yielded values
return (yield from each(coro, iterable,
limit=limit, loop=loop,
timeout=timeout, collect=True,
return_exceptions=return_exceptions)) |
<SYSTEM_TASK:>
Generic decorator for coroutines helper functions allowing
<END_TASK>
<USER_TASK:>
Description:
def decorate(fn):
"""
Generic decorator for coroutines helper functions allowing
multiple variadic initialization arguments.
This function is intended to be used internally.
Arguments:
fn (function): target function to decorate.
Raises:
TypeError: if function or coroutine function is not provided.
Returns:
function: decorated function.
""" |
if not isfunction(fn):
raise TypeError('paco: fn must be a callable object')
@functools.wraps(fn)
def decorator(*args, **kw):
# If coroutine object is passed
for arg in args:
if iscoro_or_corofunc(arg):
return fn(*args, **kw)
# Explicit argument must be at least a coroutine
if len(args) and args[0] is None:
raise TypeError('paco: first argument cannot be empty')
def wrapper(coro, *_args, **_kw):
# coro must be a valid type
if not iscoro_or_corofunc(coro):
raise TypeError('paco: first argument must be a '
'coroutine or coroutine function')
# Merge call arguments
_args = ((coro,) + (args + _args))
kw.update(_kw)
# Trigger original decorated function
return fn(*_args, **kw)
return wrapper
return decorator |
<SYSTEM_TASK:>
Creates a throttled coroutine function that only invokes
<END_TASK>
<USER_TASK:>
Description:
def throttle(coro, limit=1, timeframe=1,
return_value=None, raise_exception=False):
"""
Creates a throttled coroutine function that only invokes
``coro`` at most once per every time frame of seconds or milliseconds.
Provide options to indicate whether func should be invoked on the
leading and/or trailing edge of the wait timeout.
Subsequent calls to the throttled coroutine
return the result of the last coroutine invocation.
This function can be used as decorator.
Arguments:
coro (coroutinefunction):
coroutine function to wrap with throttle strategy.
limit (int):
number of coroutine allowed execution in the given time frame.
timeframe (int|float):
throttle limit time frame in seconds.
return_value (mixed):
optional return if the throttle limit is reached.
Returns the latest returned value by default.
raise_exception (bool):
raise exception if throttle limit is reached.
Raises:
RuntimeError: if cannot throttle limit reached (optional).
Returns:
coroutinefunction
Usage::
async def mul_2(num):
return num * 2
# Use as simple wrapper
throttled = paco.throttle(mul_2, limit=1, timeframe=2)
await throttled(2)
# => 4
await throttled(3) # ignored!
# => 4
await asyncio.sleep(2)
await throttled(3) # executed!
# => 6
# Use as decorator
@paco.throttle(limit=1, timeframe=2)
async def mul_2(num):
return num * 2
await mul_2(2)
# => 4
await mul_2(3) # ignored!
# => 4
await asyncio.sleep(2)
await mul_2(3) # executed!
# => 6
""" |
assert_corofunction(coro=coro)
# Store execution limits
limit = max(int(limit), 1)
remaning = limit
# Turn seconds in milliseconds
timeframe = timeframe * 1000
# Keep call state
last_call = now()
# Cache latest retuned result
result = None
def stop():
if raise_exception:
raise RuntimeError('paco: coroutine throttle limit exceeded')
if return_value:
return return_value
return result
def elapsed():
return now() - last_call
@asyncio.coroutine
def wrapper(*args, **kw):
nonlocal result
nonlocal remaning
nonlocal last_call
if elapsed() > timeframe:
# Reset reamining calls counter
remaning = limit
# Update last call time
last_call = now()
elif elapsed() < timeframe and remaning <= 0:
return stop()
# Decrease remaining limit
remaning -= 1
# Schedule coroutine passing arguments and cache result
result = yield from coro(*args, **kw)
return result
return wrapper |
<SYSTEM_TASK:>
Repeatedly call `coro` coroutine function while `coro_test` returns `True`.
<END_TASK>
<USER_TASK:>
Description:
def whilst(coro, coro_test, assert_coro=None, *args, **kw):
"""
Repeatedly call `coro` coroutine function while `coro_test` returns `True`.
This function is the inverse of `paco.until()`.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to execute.
coro_test (coroutinefunction): coroutine function to test.
assert_coro (coroutinefunction): optional assertion coroutine used
to determine if the test passed or not.
*args (mixed): optional variadic arguments to pass to `coro` function.
Raises:
TypeError: if input arguments are invalid.
Returns:
list: result values returned by `coro`.
Usage::
calls = 0
async def task():
nonlocal calls
calls += 1
return calls
async def calls_lt_4():
return calls > 4
await paco.until(task, calls_lt_4)
# => [1, 2, 3, 4, 5]
""" |
assert_corofunction(coro=coro, coro_test=coro_test)
# Store yielded values by coroutine
results = []
# Set assertion coroutine
assert_coro = assert_coro or assert_true
# Execute coroutine until a certain
while (yield from assert_coro((yield from coro_test()))):
results.append((yield from coro(*args, **kw)))
return results |
<SYSTEM_TASK:>
Load a CSV from a file path.
<END_TASK>
<USER_TASK:>
Description:
def load_csv(ctx, model, path, header=None, header_exclude=None, **fmtparams):
"""Load a CSV from a file path.
:param ctx: Anthem context
:param model: Odoo model name or model klass from env
:param path: absolute or relative path to CSV file.
If a relative path is given you must provide a value for
`ODOO_DATA_PATH` in your environment
or set `--odoo-data-path` option.
:param header: whitelist of CSV columns to load
:param header_exclude: blacklist of CSV columns to not load
:param fmtparams: keyword params for `csv_unireader`
Usage example::
from pkg_resources import Requirement, resource_string
req = Requirement.parse('my-project')
load_csv(ctx, ctx.env['res.users'],
resource_string(req, 'data/users.csv'),
delimiter=',')
""" |
if not os.path.isabs(path):
if ctx.options.odoo_data_path:
path = os.path.join(ctx.options.odoo_data_path, path)
else:
raise AnthemError(
'Got a relative path. '
'Please, provide a value for `ODOO_DATA_PATH` '
'in your environment or set `--odoo-data-path` option.'
)
with open(path, 'rb') as data:
load_csv_stream(ctx, model, data,
header=header, header_exclude=header_exclude,
**fmtparams) |
<SYSTEM_TASK:>
Load a CSV from a stream.
<END_TASK>
<USER_TASK:>
Description:
def load_csv_stream(ctx, model, data,
header=None, header_exclude=None, **fmtparams):
"""Load a CSV from a stream.
:param ctx: current anthem context
:param model: model name as string or model klass
:param data: csv data to load
:param header: csv fieldnames whitelist
:param header_exclude: csv fieldnames blacklist
Usage example::
from pkg_resources import Requirement, resource_stream
req = Requirement.parse('my-project')
load_csv_stream(ctx, ctx.env['res.users'],
resource_stream(req, 'data/users.csv'),
delimiter=',')
""" |
_header, _rows = read_csv(data, **fmtparams)
header = header if header else _header
if _rows:
# check if passed header contains all the fields
if header != _header and not header_exclude:
# if not, we exclude the rest of the fields
header_exclude = [x for x in _header if x not in header]
if header_exclude:
# exclude fields from header as well as respective values
header = [x for x in header if x not in header_exclude]
# we must loop trough all the rows too to pop values
# since odoo import works only w/ reader and not w/ dictreader
pop_idxs = [_header.index(x) for x in header_exclude]
rows = []
for i, row in enumerate(_rows):
rows.append(
[x for j, x in enumerate(row) if j not in pop_idxs]
)
else:
rows = list(_rows)
if rows:
load_rows(ctx, model, header, rows) |
<SYSTEM_TASK:>
formats python2 statements
<END_TASK>
<USER_TASK:>
Description:
def format_python2_stmts(python_stmts, show_tokens=False, showast=False,
showgrammar=False, compile_mode='exec'):
"""
formats python2 statements
""" |
parser_debug = {'rules': False, 'transition': False,
'reduce': showgrammar,
'errorstack': True, 'context': True, 'dups': True }
parsed = parse_python2(python_stmts, show_tokens=show_tokens,
parser_debug=parser_debug)
assert parsed == 'file_input', 'Should have parsed grammar start'
formatter = Python2Formatter()
if showast:
print(parsed)
# What we've been waiting for: Generate source from AST!
python2_formatted_str = formatter.traverse(parsed)
return python2_formatted_str |
<SYSTEM_TASK:>
Add extra information about request handler and its params
<END_TASK>
<USER_TASK:>
Description:
def document(info=None, input=None, output=None):
"""
Add extra information about request handler and its params
""" |
def wrapper(func):
if info is not None:
setattr(func, "_swg_info", info)
if input is not None:
setattr(func, "_swg_input", input)
if output is not None:
setattr(func, "_swg_output", output)
return func
return wrapper |
<SYSTEM_TASK:>
A thunk is a subroutine that is created, often automatically, to assist
<END_TASK>
<USER_TASK:>
Description:
def thunk(coro):
"""
A thunk is a subroutine that is created, often automatically, to assist
a call to another subroutine.
Creates a thunk coroutine which returns coroutine function that accepts no
arguments and when invoked it schedules the wrapper coroutine and
returns the final result.
See Wikipedia page for more information about Thunk subroutines:
https://en.wikipedia.org/wiki/Thunk
Arguments:
value (coroutinefunction): wrapped coroutine function to invoke.
Returns:
coroutinefunction
Usage::
async def task():
return 'foo'
coro = paco.thunk(task)
await coro()
# => 'foo'
await coro()
# => 'foo'
""" |
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper():
return (yield from coro())
return wrapper |
<SYSTEM_TASK:>
Apply function of two arguments cumulatively to the items of sequence,
<END_TASK>
<USER_TASK:>
Description:
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None):
"""
Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15
""" |
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
acc = initializer
# If interable is empty, just return the initializer value
if len(iterable) == 0:
return initializer
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
def reducer(element):
@asyncio.coroutine
def wrapper():
nonlocal acc
acc = yield from coro(acc, element)
return wrapper
# Support right reduction
if right:
iterable.reverse()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(reducer(element))
# Wait until all coroutines finish
yield from pool.run(ignore_empty=True)
# Returns final reduced value
return acc |
<SYSTEM_TASK:>
Wraps a given coroutine function to be executed only a certain amount
<END_TASK>
<USER_TASK:>
Description:
def times(coro, limit=1, raise_exception=False, return_value=None):
"""
Wraps a given coroutine function to be executed only a certain amount
of times.
If the execution limit is exceeded, the last execution return value will
be returned as result.
You can optionally define a custom return value on exceeded via
`return_value` param.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
limit (int): max limit of coroutine executions.
raise_exception (bool): raise exception if execution times exceeded.
return_value (mixed): value to return when execution times exceeded.
Raises:
TypeError: if coro argument is not a coroutine function.
RuntimeError: if max execution excedeed (optional).
Returns:
coroutinefunction
Usage::
async def mul_2(num):
return num * 2
timed = paco.times(mul_2, 3)
await timed(2)
# => 4
await timed(3)
# => 6
await timed(4)
# => 8
await timed(5) # ignored!
# => 8
""" |
assert_corofunction(coro=coro)
# Store call times
limit = max(limit, 1)
times = limit
# Store result from last execution
result = None
@asyncio.coroutine
def wrapper(*args, **kw):
nonlocal limit
nonlocal result
# Check execution limit
if limit == 0:
if raise_exception:
raise RuntimeError(ExceptionMessage.format(times))
if return_value:
return return_value
return result
# Decreases counter
limit -= 1
# If return_value is present, do not memoize result
if return_value:
return (yield from coro(*args, **kw))
# Schedule coroutine and memoize result
result = yield from coro(*args, **kw)
return result
return wrapper |
<SYSTEM_TASK:>
Decorator to show a description of the running function
<END_TASK>
<USER_TASK:>
Description:
def log(func=None, name=None, timing=True, timestamp=False):
""" Decorator to show a description of the running function
By default, it outputs the first line of the docstring.
If the docstring is empty, it displays the name of the function.
Alternatively, if a ``name`` is specified, it will display that only.
It can be called as ``@log`` or as
``@log(name='abc, timing=True, timestamp=True)``.
""" |
# support to be called as @log or as @log(name='')
if func is None:
return functools.partial(log, name=name, timing=timing,
timestamp=timestamp)
@functools.wraps(func)
def decorated(*args, **kwargs):
assert len(args) > 0 and hasattr(args[0], 'log'), \
"The first argument of the decorated function must be a Context"
ctx = args[0]
message = name
if message is None:
if func.__doc__:
message = func.__doc__.splitlines()[0].strip()
if message is None:
message = func.__name__
with ctx.log(message, timing=timing, timestamp=timestamp):
return func(*args, **kwargs)
return decorated |
<SYSTEM_TASK:>
Returns a coroutine function that when called, always returns
<END_TASK>
<USER_TASK:>
Description:
def constant(value, delay=None):
"""
Returns a coroutine function that when called, always returns
the provided value.
This function has an alias: `paco.identity`.
Arguments:
value (mixed): value to constantly return when coroutine is called.
delay (int/float): optional return value delay in seconds.
Returns:
coroutinefunction
Usage::
coro = paco.constant('foo')
await coro()
# => 'foo'
await coro()
# => 'foo'
""" |
@asyncio.coroutine
def coro():
if delay:
yield from asyncio.sleep(delay)
return value
return coro |
<SYSTEM_TASK:>
Make an iterator that drops elements from the iterable as long as the
<END_TASK>
<USER_TASK:>
Description:
def dropwhile(coro, iterable, loop=None):
"""
Make an iterator that drops elements from the iterable as long as the
predicate is true; afterwards, returns every element.
Note, the iterator does not produce any output until the predicate first
becomes false, so it may have a lengthy start-up time.
This function is pretty much equivalent to Python standard
`itertools.dropwhile()`, but designed to be used with async coroutines.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): coroutine function to call with values
to reduce.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
filtered values (list): ordered list of resultant values.
Usage::
async def filter(num):
return num < 4
await paco.dropwhile(filter, [1, 2, 3, 4, 5, 1])
# => [4, 5, 1]
""" |
drop = False
@asyncio.coroutine
def assert_fn(element):
nonlocal drop
if element and not drop:
return False
if not element and not drop:
drop = True
return True if drop else element
@asyncio.coroutine
def filter_fn(element):
return (yield from coro(element))
return (yield from filter(filter_fn, iterable,
assert_fn=assert_fn, limit=1, loop=loop)) |
<SYSTEM_TASK:>
Create or update a record matching xmlid with values
<END_TASK>
<USER_TASK:>
Description:
def create_or_update(ctx, model, xmlid, values):
""" Create or update a record matching xmlid with values """ |
if isinstance(model, basestring):
model = ctx.env[model]
record = ctx.env.ref(xmlid, raise_if_not_found=False)
if record:
record.update(values)
else:
record = model.create(values)
add_xmlid(ctx, record, xmlid)
return record |
<SYSTEM_TASK:>
Make sure we get a record instance even if we pass an xmlid.
<END_TASK>
<USER_TASK:>
Description:
def safe_record(ctx, item):
"""Make sure we get a record instance even if we pass an xmlid.""" |
if isinstance(item, basestring):
return ctx.env.ref(item)
return item |
<SYSTEM_TASK:>
Context manager to switch current company.
<END_TASK>
<USER_TASK:>
Description:
def switch_company(ctx, company):
"""Context manager to switch current company.
Accepts both company record and xmlid.
""" |
current_company = ctx.env.user.company_id
ctx.env.user.company_id = safe_record(ctx, company)
yield ctx
ctx.env.user.company_id = current_company |
<SYSTEM_TASK:>
Creates a continuation coroutine function with some arguments
<END_TASK>
<USER_TASK:>
Description:
def apply(coro, *args, **kw):
"""
Creates a continuation coroutine function with some arguments
already applied.
Useful as a shorthand when combined with other control flow functions.
Any arguments passed to the returned function are added to the arguments
originally passed to apply.
This is similar to `paco.partial()`.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
*args (mixed): mixed variadic arguments for partial application.
*kwargs (mixed): mixed variadic keyword arguments for partial
application.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
coroutinefunction: wrapped coroutine function.
Usage::
async def hello(name, mark='!'):
print('Hello, {name}{mark}'.format(name=name, mark=mark))
hello_mike = paco.apply(hello, 'Mike')
await hello_mike()
# => Hello, Mike!
hello_mike = paco.apply(hello, 'Mike', mark='?')
await hello_mike()
# => Hello, Mike?
""" |
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper(*_args, **_kw):
# Explicitely ignore wrapper arguments
return (yield from coro(*args, **kw))
return wrapper |
<SYSTEM_TASK:>
Convenient shortcut alias to ``loop.run_until_complete``.
<END_TASK>
<USER_TASK:>
Description:
def run(coro, loop=None):
"""
Convenient shortcut alias to ``loop.run_until_complete``.
Arguments:
coro (coroutine): coroutine object to schedule.
loop (asyncio.BaseEventLoop): optional event loop to use.
Defaults to: ``asyncio.get_event_loop()``.
Returns:
mixed: returned value by coroutine.
Usage::
async def mul_2(num):
return num * 2
paco.run(mul_2(4))
# => 8
""" |
loop = loop or asyncio.get_event_loop()
return loop.run_until_complete(coro) |
<SYSTEM_TASK:>
Wait for the Futures and coroutine objects given by the sequence
<END_TASK>
<USER_TASK:>
Description:
def wait(*coros_or_futures, limit=0, timeout=None, loop=None,
return_exceptions=False, return_when='ALL_COMPLETED'):
"""
Wait for the Futures and coroutine objects given by the sequence
futures to complete, with optional concurrency limit.
Coroutines will be wrapped in Tasks.
``timeout`` can be used to control the maximum number of seconds to
wait before returning. timeout can be an int or float.
If timeout is not specified or None, there is no limit to the wait time.
If ``return_exceptions`` is True, exceptions in the tasks are treated the
same as successful results, and gathered in the result list; otherwise,
the first raised exception will be immediately propagated to the
returned future.
``return_when`` indicates when this function should return.
It must be one of the following constants of the concurrent.futures module.
All futures must share the same event loop.
This functions is mostly compatible with Python standard
``asyncio.wait()``.
Arguments:
*coros_or_futures (iter|list):
an iterable collection yielding coroutines functions.
limit (int):
optional concurrency execution limit. Use ``0`` for no limit.
timeout (int/float):
maximum number of seconds to wait before returning.
return_exceptions (bool):
exceptions in the tasks are treated the same as successful results,
instead of raising them.
return_when (str):
indicates when this function should return.
loop (asyncio.BaseEventLoop):
optional event loop to use.
*args (mixed):
optional variadic argument to pass to the coroutines function.
Returns:
tuple: Returns two sets of Future: (done, pending).
Raises:
TypeError: in case of invalid coroutine object.
ValueError: in case of empty set of coroutines or futures.
TimeoutError: if execution takes more than expected.
Usage::
async def sum(x, y):
return x + y
done, pending = await paco.wait(
sum(1, 2),
sum(3, 4))
[task.result() for task in done]
# => [3, 7]
""" |
# Support iterable as first argument for better interoperability
if len(coros_or_futures) == 1 and isiter(coros_or_futures[0]):
coros_or_futures = coros_or_futures[0]
# If no coroutines to schedule, return empty list
# Mimics asyncio behaviour.
if len(coros_or_futures) == 0:
raise ValueError('paco: set of coroutines/futures is empty')
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop,
coros=coros_or_futures)
# Wait until all the tasks finishes
return (yield from pool.run(timeout=timeout,
return_when=return_when,
return_exceptions=return_exceptions)) |
<SYSTEM_TASK:>
Run the given coroutine functions in series, each one
<END_TASK>
<USER_TASK:>
Description:
def series(*coros_or_futures, timeout=None,
loop=None, return_exceptions=False):
"""
Run the given coroutine functions in series, each one
running once the previous execution has completed.
If any coroutines raises an exception, no more
coroutines are executed. Otherwise, the coroutines returned values
will be returned as `list`.
``timeout`` can be used to control the maximum number of seconds to
wait before returning. timeout can be an int or float.
If timeout is not specified or None, there is no limit to the wait time.
If ``return_exceptions`` is True, exceptions in the tasks are treated the
same as successful results, and gathered in the result list; otherwise,
the first raised exception will be immediately propagated to the
returned future.
All futures must share the same event loop.
This functions is basically the sequential execution version of
``asyncio.gather()``. Interface compatible with ``asyncio.gather()``.
This function is a coroutine.
Arguments:
*coros_or_futures (iter|list):
an iterable collection yielding coroutines functions.
timeout (int/float):
maximum number of seconds to wait before returning.
return_exceptions (bool):
exceptions in the tasks are treated the same as successful results,
instead of raising them.
loop (asyncio.BaseEventLoop):
optional event loop to use.
*args (mixed):
optional variadic argument to pass to the coroutines function.
Returns:
list: coroutines returned results.
Raises:
TypeError: in case of invalid coroutine object.
ValueError: in case of empty set of coroutines or futures.
TimeoutError: if execution takes more than expected.
Usage::
async def sum(x, y):
return x + y
await paco.series(
sum(1, 2),
sum(2, 3),
sum(3, 4))
# => [3, 5, 7]
""" |
return (yield from gather(*coros_or_futures,
loop=loop, limit=1, timeout=timeout,
return_exceptions=return_exceptions)) |
<SYSTEM_TASK:>
Executes the coroutine function ``x`` number of times,
<END_TASK>
<USER_TASK:>
Description:
def repeat(coro, times=1, step=1, limit=1, loop=None):
"""
Executes the coroutine function ``x`` number of times,
and accumulates results in order as you would use with ``map``.
Execution concurrency is configurable using ``limit`` param.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to schedule.
times (int): number of times to execute the coroutine.
step (int): increment iteration step, as with ``range()``.
limit (int): concurrency execution limit. Defaults to 10.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if coro is not a coroutine function.
Returns:
list: accumulated yielded values returned by coroutine.
Usage::
async def mul_2(num):
return num * 2
await paco.repeat(mul_2, times=5)
# => [2, 4, 6, 8, 10]
""" |
assert_corofunction(coro=coro)
# Iterate and attach coroutine for defer scheduling
times = max(int(times), 1)
iterable = range(1, times + 1, step)
# Run iterable times
return (yield from map(coro, iterable, limit=limit, loop=loop)) |
<SYSTEM_TASK:>
Wrap a given coroutine function that is restricted to one execution.
<END_TASK>
<USER_TASK:>
Description:
def once(coro, raise_exception=False, return_value=None):
"""
Wrap a given coroutine function that is restricted to one execution.
Repeated calls to the coroutine function will return the value of the first
invocation.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
raise_exception (bool): raise exception if execution times exceeded.
return_value (mixed): value to return when execution times exceeded,
instead of the memoized one from last invocation.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
coroutinefunction
Usage::
async def mul_2(num):
return num * 2
once = paco.once(mul_2)
await once(2)
# => 4
await once(3)
# => 4
once = paco.once(mul_2, return_value='exceeded')
await once(2)
# => 4
await once(3)
# => 'exceeded'
""" |
return times(coro,
limit=1,
return_value=return_value,
raise_exception=raise_exception) |
<SYSTEM_TASK:>
Returns a coroutine function wrapper that will defer the given coroutine
<END_TASK>
<USER_TASK:>
Description:
def defer(coro, delay=1):
"""
Returns a coroutine function wrapper that will defer the given coroutine
execution for a certain amount of seconds in a non-blocking way.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
delay (int/float): number of seconds to defer execution.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
filtered values (list): ordered list of resultant values.
Usage::
# Usage as function
await paco.defer(coro, delay=1)
await paco.defer(coro, delay=0.5)
# Usage as decorator
@paco.defer(delay=1)
async def mul_2(num):
return num * 2
await mul_2(2)
# => 4
""" |
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper(*args, **kw):
# Wait until we're done
yield from asyncio.sleep(delay)
return (yield from coro(*args, **kw))
return wrapper |
<SYSTEM_TASK:>
Executes a given coroutine and optionally catches exceptions, returning
<END_TASK>
<USER_TASK:>
Description:
def safe_run(coro, return_exceptions=False):
"""
Executes a given coroutine and optionally catches exceptions, returning
them as value. This function is intended to be used internally.
""" |
try:
result = yield from coro
except Exception as err:
if return_exceptions:
result = err
else:
raise err
return result |
<SYSTEM_TASK:>
Collect is used internally to execute coroutines and collect the returned
<END_TASK>
<USER_TASK:>
Description:
def collect(coro, index, results,
preserve_order=False,
return_exceptions=False):
"""
Collect is used internally to execute coroutines and collect the returned
value. This function is intended to be used internally.
""" |
result = yield from safe_run(coro, return_exceptions=return_exceptions)
if preserve_order:
results[index] = result
else:
results.append(result) |
<SYSTEM_TASK:>
Resets the executer scheduler internal state.
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
"""
Resets the executer scheduler internal state.
Raises:
RuntimeError: is the executor is still running.
""" |
if self.running:
raise RuntimeError('paco: executor is still running')
self.pool.clear()
self.observer.clear()
self.semaphore = asyncio.Semaphore(self.limit, loop=self.loop) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.