code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def nmap_scan():
"""
Scans the given hosts with nmap.
"""
# Create the search and config objects
hs = HostSearch()
config = Config()
# Static options to be able to figure out what options to use depending on the input the user gives.
nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']
options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}
# Create an argument parser
hs_parser = hs.argparser
argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \
description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap")
argparser.add_argument('type', metavar='type', \
help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \
type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')
arguments, extra_nmap_args = argparser.parse_known_args()
# Fix the tags for the search
tags = nmap_types[nmap_types.index(arguments.type):]
tags = ["!nmap_" + tag for tag in tags]
hosts = hs.get_hosts(tags=tags)
hosts = [host for host in hosts]
# Create the nmap arguments
nmap_args = []
nmap_args.extend(extra_nmap_args)
nmap_args.extend(options[arguments.type].split(' '))
# Run nmap
print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts)))
if len(hosts):
result = nmap(nmap_args, [str(h.address) for h in hosts])
# Import the nmap result
for host in hosts:
host.add_tag("nmap_{}".format(arguments.type))
host.save()
print_notification("Nmap done, importing results")
stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True)
stats['scanned_hosts'] = len(hosts)
stats['type'] = arguments.type
Logger().log('nmap_scan', "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats)
else:
print_notification("No hosts found") | Scans the given hosts with nmap. |
def ensure_mapping_format(variables):
""" ensure variables are in mapping format.
Args:
variables (list/dict): original variables
Returns:
dict: ensured variables in dict format
Examples:
>>> variables = [
{"a": 1},
{"b": 2}
]
>>> print(ensure_mapping_format(variables))
{
"a": 1,
"b": 2
}
"""
if isinstance(variables, list):
variables_dict = {}
for map_dict in variables:
variables_dict.update(map_dict)
return variables_dict
elif isinstance(variables, dict):
return variables
else:
raise exceptions.ParamsError("variables format error!") | ensure variables are in mapping format.
Args:
variables (list/dict): original variables
Returns:
dict: ensured variables in dict format
Examples:
>>> variables = [
{"a": 1},
{"b": 2}
]
>>> print(ensure_mapping_format(variables))
{
"a": 1,
"b": 2
} |
def paint(self, painter, option, index):
"""Paint checkbox and text
_
|_| My label
"""
body_rect = QtCore.QRectF(option.rect)
check_rect = QtCore.QRectF(body_rect)
check_rect.setWidth(check_rect.height())
check_rect.adjust(6, 6, -6, -6)
check_color = colors["idle"]
if index.data(model.IsProcessing) is True:
check_color = colors["active"]
elif index.data(model.HasFailed) is True:
check_color = colors["warning"]
elif index.data(model.HasSucceeded) is True:
check_color = colors["ok"]
elif index.data(model.HasProcessed) is True:
check_color = colors["ok"]
metrics = painter.fontMetrics()
label_rect = QtCore.QRectF(option.rect.adjusted(
check_rect.width() + 12, 2, 0, -2))
assert label_rect.width() > 0
label = index.data(model.Label)
label = metrics.elidedText(label,
QtCore.Qt.ElideRight,
label_rect.width() - 20)
font_color = colors["idle"]
if not index.data(model.IsChecked):
font_color = colors["inactive"]
# Maintain reference to state, so we can restore it once we're done
painter.save()
# Draw label
painter.setFont(fonts["h4"])
painter.setPen(QtGui.QPen(font_color))
painter.drawText(label_rect, label)
# Draw action icon
if index.data(model.ActionIconVisible):
painter.save()
if index.data(model.ActionIdle):
color = colors["idle"]
elif index.data(model.IsProcessing):
color = colors["active"]
elif index.data(model.ActionFailed):
color = colors["warning"]
else:
color = colors["ok"]
painter.setFont(fonts["smallAwesome"])
painter.setPen(QtGui.QPen(color))
icon_rect = QtCore.QRectF(option.rect.adjusted(
label_rect.width() + 1, label_rect.height() / 3, 0, 0))
painter.drawText(icon_rect, icons["action"])
painter.restore()
# Draw checkbox
pen = QtGui.QPen(check_color, 1)
painter.setPen(pen)
if index.data(model.IsOptional):
painter.drawRect(check_rect)
if index.data(model.IsChecked):
painter.fillRect(check_rect, check_color)
elif not index.data(model.IsIdle) and index.data(model.IsChecked):
painter.fillRect(check_rect, check_color)
if option.state & QtWidgets.QStyle.State_MouseOver:
painter.fillRect(body_rect, colors["hover"])
if option.state & QtWidgets.QStyle.State_Selected:
painter.fillRect(body_rect, colors["selected"])
# Ok, we're done, tidy up.
painter.restore() | Paint checkbox and text
_
|_| My label |
def parse(self, url):
"""
Return a configuration dict from a URL
"""
parsed_url = urlparse.urlparse(url)
try:
default_config = self.CONFIG[parsed_url.scheme]
except KeyError:
raise ValueError(
'unrecognised URL scheme for {}: {}'.format(
self.__class__.__name__, url))
handler = self.get_handler_for_scheme(parsed_url.scheme)
config = copy.deepcopy(default_config)
return handler(parsed_url, config) | Return a configuration dict from a URL |
def call_multiple_modules(module_gen):
"""Call each module
module_gen should be a iterator
"""
for args_seq in module_gen:
module_name_or_path = args_seq[0]
with replace_sys_args(args_seq):
if re.match(VALID_PACKAGE_RE, module_name_or_path):
runpy.run_module(module_name_or_path,
run_name='__main__')
else:
runpy.run_path(module_name_or_path,
run_name='__main__') | Call each module
module_gen should be a iterator |
def _setEncoderParams(self):
"""
Set the radius, resolution and range. These values are updated when minval
and/or maxval change.
"""
self.rangeInternal = float(self.maxval - self.minval)
self.resolution = float(self.rangeInternal) / (self.n - self.w)
self.radius = self.w * self.resolution
self.range = self.rangeInternal + self.resolution
# nInternal represents the output area excluding the possible padding on each side
self.nInternal = self.n - 2 * self.padding
# Invalidate the bucket values cache so that they get recomputed
self._bucketValues = None | Set the radius, resolution and range. These values are updated when minval
and/or maxval change. |
def convert_money(amount, currency_from, currency_to):
"""
Convert 'amount' from 'currency_from' to 'currency_to' and return a Money
instance of the converted amount.
"""
new_amount = base_convert_money(amount, currency_from, currency_to)
return moneyed.Money(new_amount, currency_to) | Convert 'amount' from 'currency_from' to 'currency_to' and return a Money
instance of the converted amount. |
def _find_usage_cloudtrail(self):
"""Calculate current usage for CloudTrail related metrics"""
trail_list = self.conn.describe_trails()['trailList']
trail_count = len(trail_list) if trail_list else 0
for trail in trail_list:
data_resource_count = 0
if self.conn._client_config.region_name == trail['HomeRegion']:
response = self.conn.get_event_selectors(
TrailName=trail['Name']
)
event_selectors = response['EventSelectors']
for event_selector in event_selectors:
data_resource_count += len(
event_selector.get('DataResources', [])
)
self.limits['Event Selectors Per Trail']._add_current_usage(
len(event_selectors),
aws_type='AWS::CloudTrail::EventSelector',
resource_id=trail['Name']
)
self.limits['Data Resources Per Trail']._add_current_usage(
data_resource_count,
aws_type='AWS::CloudTrail::DataResource',
resource_id=trail['Name']
)
else:
logger.debug(
'Ignoring event selectors and data resources for '
'CloudTrail %s in non-home region' % trail['Name']
)
self.limits['Trails Per Region']._add_current_usage(
trail_count,
aws_type=self.aws_type
) | Calculate current usage for CloudTrail related metrics |
def has_access(user, required_roles, match_all=True):
"""Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply
Args:
user (:obj:`User`): User object
required_roles (`list` of `str`): List of roles that the user must have applied
match_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will
return `True`
Returns:
`bool`
"""
# Admins have access to everything
if ROLE_ADMIN in user.roles:
return True
if isinstance(required_roles, str):
if required_roles in user.roles:
return True
return False
# If we received a list of roles to match against
if match_all:
for role in required_roles:
if role not in user.roles:
return False
return True
else:
for role in required_roles:
if role in user.roles:
return True
return False | Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply
Args:
user (:obj:`User`): User object
required_roles (`list` of `str`): List of roles that the user must have applied
match_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will
return `True`
Returns:
`bool` |
def _load_item(self, key):
'''Load the specified item from the [flask] section. Type is
determined by the type of the equivalent value in app.default_config
or string if unknown.'''
key_u = key.upper()
default = current_app.default_config.get(key_u)
# One of the default config vars is a timedelta - interpret it
# as an int and construct using it
if isinstance(default, datetime.timedelta):
current_app.config[key_u] = datetime.timedelta(self.getint('flask', key))
elif isinstance(default, bool):
current_app.config[key_u] = self.getboolean('flask', key)
elif isinstance(default, float):
current_app.config[key_u] = self.getfloat('flask', key)
elif isinstance(default, int):
current_app.config[key_u] = self.getint('flask', key)
else:
# All the string keys need to be coerced into str()
# because Flask expects some of them not to be unicode
current_app.config[key_u] = str(self.get('flask', key)) | Load the specified item from the [flask] section. Type is
determined by the type of the equivalent value in app.default_config
or string if unknown. |
def plot_burstness(corpus, B, **kwargs):
"""
Generate a figure depicting burstness profiles for ``feature``.
Parameters
----------
B
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Examples
--------
.. code-block:: python
>>> from tethne.analyze.corpus import burstness
>>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True)
>>> fig.savefig('~/burstness.png')
Years prior to the first occurrence of each feature are grayed out. Periods
in which the feature was bursty are depicted by colored blocks, the opacity
of which indicates burstness intensity.
.. figure:: _static/images/burstness.png
:width: 600
:align: center
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
except ImportError:
raise RuntimeError('This method requires the package matplotlib.')
color = kwargs.get('color', 'red')
# Get width based on slices.
years = sorted(corpus.indices['date'].keys())
width = years[1] - years[0]
height = 1.0
fig = plt.figure(figsize=(10,len(B)/4.))
f = 1
axes = {}
for key, value in B.iteritems():
x,y = value
ax = fig.add_subplot(len(B),1,f)
f+=1
ax.set_yticks([])
ax.set_xbound(min(years), max(years) + 1)
if not f == len(B)+1: # Only show xticks on the bottom subplot.
ax.set_xticklabels([])
# Block out years until first occurrence of feature.
rect = mpatches.Rectangle((min(years), 0), sorted(x)[0]-min(years),
height, fill=True, linewidth=0.0)
rect.set_facecolor('black')
rect.set_alpha(0.3)
ax.add_patch(rect)
# Add a rectangle for each year, shaded according to burstness state.
for d in xrange(min(x), max(x)):
try:
i = x.index(d)
except ValueError:
continue
xy = (d, 0.)
state = y[i]
rect = mpatches.Rectangle(xy, width, height,
fill=True, linewidth=0.0)
rect.set_facecolor(color)
rect.set_alpha(state)
ax.add_patch(rect)
ax.set_ylabel(key, rotation=0,
horizontalalignment='right',
verticalalignment='center')
plt.subplots_adjust(left=0.5)
fig.tight_layout(h_pad=0.25)
plt.show() | Generate a figure depicting burstness profiles for ``feature``.
Parameters
----------
B
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Examples
--------
.. code-block:: python
>>> from tethne.analyze.corpus import burstness
>>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True)
>>> fig.savefig('~/burstness.png')
Years prior to the first occurrence of each feature are grayed out. Periods
in which the feature was bursty are depicted by colored blocks, the opacity
of which indicates burstness intensity.
.. figure:: _static/images/burstness.png
:width: 600
:align: center |
def get_provisioned_table_write_units(table_name):
""" Returns the number of provisioned write units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of write units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
write_units = int(
desc[u'Table'][u'ProvisionedThroughput'][u'WriteCapacityUnits'])
logger.debug('{0} - Currently provisioned write units: {1:d}'.format(
table_name, write_units))
return write_units | Returns the number of provisioned write units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of write units |
def replace(self, *args, **kwargs):
"""
replace(lower=None, upper=None, lower_inc=None, upper_inc=None)
Returns a new instance of self with the given arguments replaced. It
takes the exact same arguments as the constructor.
>>> intrange(1, 5).replace(upper=10)
intrange([1,10))
>>> intrange(1, 10).replace(lower_inc=False)
intrange([2,10))
>>> intrange(1, 10).replace(5)
intrange([5,10))
Note that range objects are immutable and are never modified in place.
"""
replacements = {
"lower" : self.lower,
"upper" : self.upper,
"lower_inc" : self.lower_inc,
"upper_inc" : self.upper_inc
}
replacements.update(
dict(zip(("lower", "upper", "lower_inc", "upper_inc"), args)))
replacements.update(kwargs)
return self.__class__(**replacements) | replace(lower=None, upper=None, lower_inc=None, upper_inc=None)
Returns a new instance of self with the given arguments replaced. It
takes the exact same arguments as the constructor.
>>> intrange(1, 5).replace(upper=10)
intrange([1,10))
>>> intrange(1, 10).replace(lower_inc=False)
intrange([2,10))
>>> intrange(1, 10).replace(5)
intrange([5,10))
Note that range objects are immutable and are never modified in place. |
def __check_table_rules(configuration):
""" Do some basic checks on the configuration """
for table_name in configuration['tables']:
table = configuration['tables'][table_name]
# Check that increase/decrease units is OK
valid_units = ['percent', 'units']
if table['increase_reads_unit'] not in valid_units:
print('increase-reads-unit must be set to either percent or units')
sys.exit(1)
if table['decrease_reads_unit'] not in valid_units:
print('decrease-reads-unit must be set to either percent or units')
sys.exit(1)
if table['increase_writes_unit'] not in valid_units:
print(
'increase-writes-unit must be set to either percent or units')
sys.exit(1)
if table['decrease_writes_unit'] not in valid_units:
print(
'decrease-writes-unit must be set to either percent or units')
sys.exit(1)
if ('increase_consumed_reads_unit' in table
and table['increase_consumed_reads_unit']
and table['increase_consumed_reads_unit'] not in valid_units):
print(
'increase-consumed-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_consumed_writes_unit' in table
and table['increase_consumed_writes_unit']
and table['increase_consumed_writes_unit'] not in valid_units):
print(
'increase-consumed-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_consumed_reads_unit' in table
and table['increase_throttled_by_consumed_reads_unit']
and table['increase_throttled_by_consumed_reads_unit']
not in valid_units):
print(
'increase-throttled-by-consumed-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_consumed_writes_unit' in table
and table['increase_throttled_by_consumed_writes_unit']
and table['increase_throttled_by_consumed_writes_unit']
not in valid_units):
print(
'increase-throttled-by-consumed-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_provisioned_reads_unit' in table
and table['increase_throttled_by_provisioned_reads_unit']
and table['increase_throttled_by_provisioned_reads_unit']
not in valid_units):
print(
'increase-throttled-by-provisioned-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_provisioned_writes_unit' in table
and table['increase_throttled_by_provisioned_writes_unit']
and table['increase_throttled_by_provisioned_writes_unit']
not in valid_units):
print(
'increase-throttled-by-provisioned-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
# Check lookback-window start
if table['lookback_window_start'] < 1:
print(
'lookback-window-start must be a value higher than 1, '
'as DynamoDB sends CloudWatch data every minute')
sys.exit(1)
# Check sns-message-types
valid_sns_message_types = [
'scale-up',
'scale-down',
'high-throughput-alarm',
'low-throughput-alarm']
if table['sns_message_types']:
for sns_type in table['sns_message_types']:
if sns_type not in valid_sns_message_types:
print('Warning: Invalid sns-message-type: {0}'.format(
sns_type))
table['sns_message_types'].remove(sns_type)
# Ensure values > 0 for some important configuration options
options = [
'reads_lower_threshold',
'reads_upper_threshold',
'increase_reads_with',
'decrease_reads_with',
'writes_lower_threshold',
'writes_upper_threshold',
'increase_writes_with',
'decrease_writes_with',
'min_provisioned_reads',
'max_provisioned_reads',
'min_provisioned_writes',
'max_provisioned_writes',
'num_read_checks_before_scale_down',
'num_write_checks_before_scale_down',
'increase_consumed_reads_with',
'increase_consumed_writes_with'
]
# Config options without a mandatory default
# should be allowed a None value
non_default = [
'increase_consumed_reads_with',
'increase_consumed_writes_with'
]
for option in options:
if (option in non_default
and option in table
and table[option] and table[option] < 1):
print('{0} may not be lower than 1 for table {1}'.format(
option, table_name))
sys.exit(1)
if (option in table
and option not in non_default
and table[option] < 1):
print('{0} may not be lower than 1 for table {1}'.format(
option, table_name))
sys.exit(1)
if (int(table['min_provisioned_reads']) >
int(table['max_provisioned_reads'])):
print(
'min_provisioned_reads ({0}) may not be higher than '
'max_provisioned_reads ({1}) for table {2}'.format(
table['min_provisioned_reads'],
table['max_provisioned_reads'],
table_name))
sys.exit(1)
elif (int(table['min_provisioned_writes']) >
int(table['max_provisioned_writes'])):
print(
'min_provisioned_writes ({0}) may not be higher than '
'max_provisioned_writes ({1}) for table {2}'.format(
table['min_provisioned_writes'],
table['max_provisioned_writes'],
table_name))
sys.exit(1) | Do some basic checks on the configuration |
def markdown(text, html=False, valid_tags=GFM_TAGS):
"""
Return Markdown rendered text using GitHub Flavoured Markdown,
with HTML escaped and syntax-highlighting enabled.
"""
if text is None:
return None
if html:
return Markup(sanitize_html(markdown_convert_html(gfm(text)), valid_tags=valid_tags))
else:
return Markup(markdown_convert_text(gfm(text))) | Return Markdown rendered text using GitHub Flavoured Markdown,
with HTML escaped and syntax-highlighting enabled. |
def distance_matrix(a, b, periodic):
'''Calculate a distrance matrix between coordinates sets a and b
'''
a = a
b = b[:, np.newaxis]
return periodic_distance(a, b, periodic) | Calculate a distrance matrix between coordinates sets a and b |
def add_dataset(self, name=None, label=None,
x_column_label=None, y_column_label=None, index=None, control=False):
"""Add a dataset to a specific plot.
This method adds a dataset to a plot. Its functional use is imperative
to the plot generation. It handles adding new files as well
as indexing to files that are added to other plots.
All Args default to None. However, these are note the defaults
in the code. See DataImportContainer attributes for defaults in code.
Args:
name (str, optional): Name (path) for file.
Required if reading from a file (at least one).
Required if file_name is not in "general". Must be ".txt" or ".hdf5".
Can include path from working directory.
label (str, optional): Column label in the dataset corresponding to desired SNR value.
Required if reading from a file (at least one).
x_column_label/y_column_label (str, optional): Column label from input file identifying
x/y values. This can override setting in "general". Default
is `x`/`y`.
index (int, optional): Index of plot with preloaded data.
Required if not loading a file.
control (bool, optional): If True, this dataset is set to the control.
This is needed for Ratio plots. It sets
the baseline. Default is False.
Raises:
ValueError: If no options are passes. This means no file indication
nor index.
"""
if name is None and label is None and index is None:
raise ValueError("Attempting to add a dataset without"
+ "supplying index or file information.")
if index is None:
trans_dict = DataImportContainer()
if name is not None:
trans_dict.file_name = name
if label is not None:
trans_dict.label = label
if x_column_label is not None:
trans_dict.x_column_label = x_column_label
if y_column_label is not None:
trans_dict.y_column_label = y_column_label
if control:
self.control = trans_dict
else:
# need to append file to file list.
if 'file' not in self.__dict__:
self.file = []
self.file.append(trans_dict)
else:
if control:
self.control = DataImportContainer()
self.control.index = index
else:
# need to append index to index list.
if 'indices' not in self.__dict__:
self.indices = []
self.indices.append(index)
return | Add a dataset to a specific plot.
This method adds a dataset to a plot. Its functional use is imperative
to the plot generation. It handles adding new files as well
as indexing to files that are added to other plots.
All Args default to None. However, these are note the defaults
in the code. See DataImportContainer attributes for defaults in code.
Args:
name (str, optional): Name (path) for file.
Required if reading from a file (at least one).
Required if file_name is not in "general". Must be ".txt" or ".hdf5".
Can include path from working directory.
label (str, optional): Column label in the dataset corresponding to desired SNR value.
Required if reading from a file (at least one).
x_column_label/y_column_label (str, optional): Column label from input file identifying
x/y values. This can override setting in "general". Default
is `x`/`y`.
index (int, optional): Index of plot with preloaded data.
Required if not loading a file.
control (bool, optional): If True, this dataset is set to the control.
This is needed for Ratio plots. It sets
the baseline. Default is False.
Raises:
ValueError: If no options are passes. This means no file indication
nor index. |
def check(text):
"""Suggest the preferred forms."""
err = "misc.waxed"
msg = u"The modifier following 'waxed' must be an adj.: '{}' is correct"
waxes = ["wax", "waxes", "waxed", "waxing"]
modifiers = [("ebullient", "ebulliently"),
("ecstatic", "ecstatically"),
("eloquent", "eloquently"),
("enthusiastic", "enthusiastically"),
("euphoric", "euphorically"),
("indignant", "indignantly"),
("lyrical", "lyrically"),
("melancholic", "melancholically"),
("metaphorical", "metaphorically"),
("nostalgic", "nostalgically"),
("patriotic", "patriotically"),
("philosophical", "philosophically"),
("poetic", "poetically"),
("rhapsodic", "rhapsodically"),
("romantic", "romantically"),
("sentimental", "sentimentally")
]
def pairs(word):
return [[word + ' ' + pair[0], [word + ' ' + pair[1]]]
for pair in modifiers]
preferred = []
for word in waxes:
preferred += pairs(word)
return preferred_forms_check(text, preferred, err, msg) | Suggest the preferred forms. |
def flush(self):
"""
Flush the write buffers of the stream if applicable.
"""
if self._writable:
with self._seek_lock:
self._flush_raw_or_buffered()
# Clear the buffer
self._write_buffer = bytearray(self._buffer_size)
self._buffer_seek = 0 | Flush the write buffers of the stream if applicable. |
def convert_weights_to_numpy(weights_dict):
"""Convert weights to numpy"""
return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy())
for k, v in weights_dict.items()]) | Convert weights to numpy |
def get_component_product(self, other):
"""Returns the component product of this vector and the given
other vector."""
return Point(self.x * other.x, self.y * other.y) | Returns the component product of this vector and the given
other vector. |
def _handle_lrr(self, data):
"""
Handle Long Range Radio messages.
:param data: LRR message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.LRRMessage`
"""
msg = LRRMessage(data)
if not self._ignore_lrr_states:
self._lrr_system.update(msg)
self.on_lrr_message(message=msg)
return msg | Handle Long Range Radio messages.
:param data: LRR message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.LRRMessage` |
def set(self, key, val, bucket):
""" Set a cached item by key
WARN: Regardless if the item is already in the cache,
it will be udpated with the new value.
"""
if bucket not in self._cache:
self._cache[bucket] = {}
self._cache[bucket][key] = val | Set a cached item by key
WARN: Regardless if the item is already in the cache,
it will be udpated with the new value. |
def _send_request(self, xml_request):
""" Send the prepared XML request block to the CPS using the corect protocol.
Args:
xml_request -- A fully formed xml request string for the CPS.
Returns:
The raw xml response string.
Raises:
ConnectionError -- Can't establish a connection with the server.
"""
if self._scheme == 'http':
return self._send_http_request(xml_request)
else:
return self._send_socket_request(xml_request) | Send the prepared XML request block to the CPS using the corect protocol.
Args:
xml_request -- A fully formed xml request string for the CPS.
Returns:
The raw xml response string.
Raises:
ConnectionError -- Can't establish a connection with the server. |
def child_task(self, q, l, gq, gl):
'''child process - this holds GUI elements'''
mp_util.child_close_fds()
from ..lib import wx_processguard
from ..lib.wx_loader import wx
from MAVProxy.modules.mavproxy_misseditor import missionEditorFrame
self.app = wx.App(False)
self.app.frame = missionEditorFrame.MissionEditorFrame(parent=None,id=wx.ID_ANY)
self.app.frame.set_event_queue(q)
self.app.frame.set_event_queue_lock(l)
self.app.frame.set_gui_event_queue(gq)
self.app.frame.set_gui_event_queue_lock(gl)
self.app.frame.Show()
self.app.MainLoop() | child process - this holds GUI elements |
def return_markers(self):
"""Reads the notes of the Ktlx recordings.
"""
ent_file = self._filename.with_suffix('.ent')
if not ent_file.exists():
ent_file = self._filename.with_suffix('.ent.old')
try:
ent_notes = _read_ent(ent_file)
except (FileNotFoundError, PermissionError):
markers = []
else:
allnote = []
for n in ent_notes:
try:
n['value'].keys()
allnote.append(n['value'])
except AttributeError:
lg.debug('Note of length {} was not '
'converted to dict'.format(n['length']))
s_freq = self._hdr['erd']['sample_freq']
pcname = '0CFEBE72-DA20-4b3a-A8AC-CDD41BFE2F0D'
note_time = []
note_name = []
note_note = []
for n in allnote:
if n['Text'] == 'Analyzed Data Note':
continue
if not n['Text']:
continue
if 'User' not in n['Data'].keys():
continue
user1 = n['Data']['User'] == 'Persyst'
user2 = False # n['Data']['User'] == 'eeg'
user3 = n['Data']['User'] == pcname
user4 = n['Data']['User'] == 'XLSpike - Intracranial'
user5 = n['Data']['User'] == 'XLEvent - Intracranial'
if user1 or user2 or user3 or user4 or user5:
continue
if len(n['Data']['User']) == 0:
note_name.append('-unknown-')
else:
note_name.append(n['Data']['User'].split()[0])
note_time.append(n['Stamp'] / s_freq)
note_note.append(n['Text'])
markers = []
for time, name, note in zip(note_time, note_name, note_note):
m = {'name': note + ' (' + name + ')',
'start': time,
'end': time,
'chan': None,
}
markers.append(m)
return markers | Reads the notes of the Ktlx recordings. |
def get_student_email(cmd_args, endpoint=''):
"""Attempts to get the student's email. Returns the email, or None."""
log.info("Attempting to get student email")
if cmd_args.local:
return None
access_token = authenticate(cmd_args, endpoint=endpoint, force=False)
if not access_token:
return None
try:
return get_info(cmd_args, access_token)['email']
except IOError as e:
return None | Attempts to get the student's email. Returns the email, or None. |
def to_unicode(string):
"""
Ensure a passed string is unicode
"""
if isinstance(string, six.binary_type):
return string.decode('utf8')
if isinstance(string, six.text_type):
return string
if six.PY2:
return unicode(string)
return str(string) | Ensure a passed string is unicode |
def compute_avg_adj_deg(G):
r"""
Compute the average adjacency degree for each node.
The average adjacency degree is the average of the degrees of a node and
its neighbors.
Parameters
----------
G: Graph
Graph on which the statistic is extracted
"""
return np.sum(np.dot(G.A, G.A), axis=1) / (np.sum(G.A, axis=1) + 1.) | r"""
Compute the average adjacency degree for each node.
The average adjacency degree is the average of the degrees of a node and
its neighbors.
Parameters
----------
G: Graph
Graph on which the statistic is extracted |
def get_asset_lookup_session_for_repository(self, repository_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the asset lookup service for the
given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - an
``AssetLookupSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_asset_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return AssetLookupSession(
self._provider_manager.get_asset_lookup_session_for_repository(repository_id,
proxy),
self._config_map) | Gets the ``OsidSession`` associated with the asset lookup service for the
given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - an
``AssetLookupSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_asset_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_lookup()`` and
``supports_visible_federation()`` are ``true``.* |
def batch_annotate_files(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Service that performs image detection and annotation for a batch of files.
Now only "application/pdf", "image/tiff" and "image/gif" are supported.
This service will extract at most the first 10 frames (gif) or pages
(pdf or tiff) from each file provided and perform detection and annotation
for each image extracted.
Example:
>>> from google.cloud import vision_v1p4beta1
>>>
>>> client = vision_v1p4beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> response = client.batch_annotate_files(requests)
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p4beta1.types.AnnotateFileRequest]]): The list of file annotation requests. Right now we support only one
AnnotateFileRequest in BatchAnnotateFilesRequest.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p4beta1.types.AnnotateFileRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p4beta1.types.BatchAnnotateFilesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_annotate_files" not in self._inner_api_calls:
self._inner_api_calls[
"batch_annotate_files"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_annotate_files,
default_retry=self._method_configs["BatchAnnotateFiles"].retry,
default_timeout=self._method_configs["BatchAnnotateFiles"].timeout,
client_info=self._client_info,
)
request = image_annotator_pb2.BatchAnnotateFilesRequest(requests=requests)
return self._inner_api_calls["batch_annotate_files"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Service that performs image detection and annotation for a batch of files.
Now only "application/pdf", "image/tiff" and "image/gif" are supported.
This service will extract at most the first 10 frames (gif) or pages
(pdf or tiff) from each file provided and perform detection and annotation
for each image extracted.
Example:
>>> from google.cloud import vision_v1p4beta1
>>>
>>> client = vision_v1p4beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> response = client.batch_annotate_files(requests)
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p4beta1.types.AnnotateFileRequest]]): The list of file annotation requests. Right now we support only one
AnnotateFileRequest in BatchAnnotateFilesRequest.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p4beta1.types.AnnotateFileRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p4beta1.types.BatchAnnotateFilesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def load(source, **kwargs) -> JsonObj:
""" Deserialize a JSON source.
:param source: a URI, File name or a .read()-supporting file-like object containing a JSON document
:param kwargs: arguments. see: json.load for details
:return: JsonObj representing fp
"""
if isinstance(source, str):
if '://' in source:
req = Request(source)
req.add_header("Accept", "application/json, text/json;q=0.9")
with urlopen(req) as response:
jsons = response.read()
else:
with open(source) as f:
jsons = f.read()
elif hasattr(source, "read"):
jsons = source.read()
else:
raise TypeError("Unexpected type {} for source {}".format(type(source), source))
return loads(jsons, **kwargs) | Deserialize a JSON source.
:param source: a URI, File name or a .read()-supporting file-like object containing a JSON document
:param kwargs: arguments. see: json.load for details
:return: JsonObj representing fp |
def earthquake_contour_preprocessor(impact_function):
"""Preprocessor to create contour from an earthquake
:param impact_function: Impact function to run.
:type impact_function: ImpactFunction
:return: The contour layer.
:rtype: QgsMapLayer
"""
contour_path = create_smooth_contour(impact_function.hazard)
if os.path.exists(contour_path):
from safe.gis.tools import load_layer
return load_layer(contour_path, tr('Contour'), 'ogr')[0] | Preprocessor to create contour from an earthquake
:param impact_function: Impact function to run.
:type impact_function: ImpactFunction
:return: The contour layer.
:rtype: QgsMapLayer |
def get_range_info(array, component):
"""Get the data range of the array's component"""
r = array.GetRange(component)
comp_range = {}
comp_range['min'] = r[0]
comp_range['max'] = r[1]
comp_range['component'] = array.GetComponentName(component)
return comp_range | Get the data range of the array's component |
def extern_store_bytes(self, context_handle, bytes_ptr, bytes_len):
"""Given a context and raw bytes, return a new Handle to represent the content."""
c = self._ffi.from_handle(context_handle)
return c.to_value(binary_type(self._ffi.buffer(bytes_ptr, bytes_len))) | Given a context and raw bytes, return a new Handle to represent the content. |
def ComputeRoot(hashes):
"""
Compute the root hash.
Args:
hashes (list): the list of hashes to build the root from.
Returns:
bytes: the root hash.
"""
if not len(hashes):
raise Exception('Hashes must have length')
if len(hashes) == 1:
return hashes[0]
tree = MerkleTree(hashes)
return tree.Root.Hash | Compute the root hash.
Args:
hashes (list): the list of hashes to build the root from.
Returns:
bytes: the root hash. |
def get_agent(msg):
""" Handy hack to handle legacy messages where 'agent' was a list. """
agent = msg['msg']['agent']
if isinstance(agent, list):
agent = agent[0]
return agent | Handy hack to handle legacy messages where 'agent' was a list. |
def choice(self, board: Union[chess.Board, int], *, minimum_weight: int = 1, exclude_moves: Container[chess.Move] = (), random=random) -> Entry:
"""
Uniformly selects a random entry for the given position.
:raises: :exc:`IndexError` if no entries are found.
"""
chosen_entry = None
for i, entry in enumerate(self.find_all(board, minimum_weight=minimum_weight, exclude_moves=exclude_moves)):
if chosen_entry is None or random.randint(0, i) == i:
chosen_entry = entry
if chosen_entry is None:
raise IndexError()
return chosen_entry | Uniformly selects a random entry for the given position.
:raises: :exc:`IndexError` if no entries are found. |
def mkdir(path, mode=0o755, delete=False):
"""Make a directory.
Create a leaf directory and all intermediate ones.
Works like ``mkdir``, except that any intermediate path segment (not just
the rightmost) will be created if it does not exist. This is recursive.
Args:
path (str): Directory to create
mode (int): Directory mode
delete (bool): Delete directory/file if exists
Returns:
bool: True if succeeded else False
"""
logger.info("mkdir: %s" % path)
if os.path.isdir(path):
if not delete:
return True
if not remove(path):
return False
try:
os.makedirs(path, mode)
return True
except Exception:
logger.exception("Failed to mkdir: %s" % path)
return False | Make a directory.
Create a leaf directory and all intermediate ones.
Works like ``mkdir``, except that any intermediate path segment (not just
the rightmost) will be created if it does not exist. This is recursive.
Args:
path (str): Directory to create
mode (int): Directory mode
delete (bool): Delete directory/file if exists
Returns:
bool: True if succeeded else False |
def _path_pair(self, s):
"""Parse two paths separated by a space."""
# TODO: handle a space in the first path
if s.startswith(b'"'):
parts = s[1:].split(b'" ', 1)
else:
parts = s.split(b' ', 1)
if len(parts) != 2:
self.abort(errors.BadFormat, '?', '?', s)
elif parts[1].startswith(b'"') and parts[1].endswith(b'"'):
parts[1] = parts[1][1:-1]
elif parts[1].startswith(b'"') or parts[1].endswith(b'"'):
self.abort(errors.BadFormat, '?', '?', s)
return [_unquote_c_string(s) for s in parts] | Parse two paths separated by a space. |
def releases(self):
r"""
A dictionary that maps release identifiers to :class:`Release` objects.
Here's an example based on a mirror of the git project's repository
which shows the last ten releases based on tags, where each release
identifier captures a tag without its 'v' prefix:
>>> from pprint import pprint
>>> from vcs_repo_mgr.backends.git import GitRepo
>>> repository = GitRepo(remote='https://github.com/git/git.git',
... release_scheme='tags',
... release_filter=r'^v(\d+(?:\.\d+)*)$')
>>> pprint(repository.ordered_releases[-10:])
[Release(revision=Revision(..., tag='v2.2.2', ...), identifier='2.2.2'),
Release(revision=Revision(..., tag='v2.3.0', ...), identifier='2.3.0'),
Release(revision=Revision(..., tag='v2.3.1', ...), identifier='2.3.1'),
Release(revision=Revision(..., tag='v2.3.2', ...), identifier='2.3.2'),
Release(revision=Revision(..., tag='v2.3.3', ...), identifier='2.3.3'),
Release(revision=Revision(..., tag='v2.3.4', ...), identifier='2.3.4'),
Release(revision=Revision(..., tag='v2.3.5', ...), identifier='2.3.5'),
Release(revision=Revision(..., tag='v2.3.6', ...), identifier='2.3.6'),
Release(revision=Revision(..., tag='v2.3.7', ...), identifier='2.3.7'),
Release(revision=Revision(..., tag='v2.4.0', ...), identifier='2.4.0')]
"""
available_releases = {}
available_revisions = getattr(self, self.release_scheme)
for identifier, revision in available_revisions.items():
match = self.compiled_filter.match(identifier)
if match:
# If the regular expression contains a capturing group we
# set the release identifier to the captured substring
# instead of the complete tag/branch identifier.
captures = match.groups()
if captures:
identifier = captures[0]
available_releases[identifier] = Release(
revision=revision,
identifier=identifier,
)
return available_releases | r"""
A dictionary that maps release identifiers to :class:`Release` objects.
Here's an example based on a mirror of the git project's repository
which shows the last ten releases based on tags, where each release
identifier captures a tag without its 'v' prefix:
>>> from pprint import pprint
>>> from vcs_repo_mgr.backends.git import GitRepo
>>> repository = GitRepo(remote='https://github.com/git/git.git',
... release_scheme='tags',
... release_filter=r'^v(\d+(?:\.\d+)*)$')
>>> pprint(repository.ordered_releases[-10:])
[Release(revision=Revision(..., tag='v2.2.2', ...), identifier='2.2.2'),
Release(revision=Revision(..., tag='v2.3.0', ...), identifier='2.3.0'),
Release(revision=Revision(..., tag='v2.3.1', ...), identifier='2.3.1'),
Release(revision=Revision(..., tag='v2.3.2', ...), identifier='2.3.2'),
Release(revision=Revision(..., tag='v2.3.3', ...), identifier='2.3.3'),
Release(revision=Revision(..., tag='v2.3.4', ...), identifier='2.3.4'),
Release(revision=Revision(..., tag='v2.3.5', ...), identifier='2.3.5'),
Release(revision=Revision(..., tag='v2.3.6', ...), identifier='2.3.6'),
Release(revision=Revision(..., tag='v2.3.7', ...), identifier='2.3.7'),
Release(revision=Revision(..., tag='v2.4.0', ...), identifier='2.4.0')] |
def ec(ns=None, cn=None, di=None, lo=None, iq=None, ico=None):
# pylint: disable=redefined-outer-name
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.EnumerateClasses`.
Enumerate the subclasses of a class, or the top-level classes in a
namespace.
Parameters:
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the namespace of the `cn` parameter if
specified as a `CIMClassName`, or to the default namespace of the
connection.
cn (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class whose subclasses are to be enumerated (case
independent).
`None` will enumerate the top-level classes.
If specified as a `CIMClassName` object, its `host` attribute will be
ignored.
di (:class:`py:bool`):
DeepInheritance flag: Include also indirect subclasses.
`None` will cause the server default of `False` to be used.
lo (:class:`py:bool`):
LocalOnly flag: Exclude inherited properties.
`None` will cause the server default of `True` to be used.
iq (:class:`py:bool`):
IncludeQualifiers flag: Include qualifiers.
`None` will cause the server default of `True` to be used.
ico (:class:`py:bool`):
IncludeClassOrigin flag: Include class origin information for
properties and methods in the retrieved class.
`None` will cause the server default of `False` to be used.
Returns:
list of :class:`~pywbem.CIMClass`:
The enumerated classes.
"""
return CONN.EnumerateClasses(ns,
ClassName=cn,
DeepInheritance=di,
LocalOnly=lo,
IncludeQualifiers=iq,
IncludeClassOrigin=ico) | This function is a wrapper for
:meth:`~pywbem.WBEMConnection.EnumerateClasses`.
Enumerate the subclasses of a class, or the top-level classes in a
namespace.
Parameters:
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the namespace of the `cn` parameter if
specified as a `CIMClassName`, or to the default namespace of the
connection.
cn (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class whose subclasses are to be enumerated (case
independent).
`None` will enumerate the top-level classes.
If specified as a `CIMClassName` object, its `host` attribute will be
ignored.
di (:class:`py:bool`):
DeepInheritance flag: Include also indirect subclasses.
`None` will cause the server default of `False` to be used.
lo (:class:`py:bool`):
LocalOnly flag: Exclude inherited properties.
`None` will cause the server default of `True` to be used.
iq (:class:`py:bool`):
IncludeQualifiers flag: Include qualifiers.
`None` will cause the server default of `True` to be used.
ico (:class:`py:bool`):
IncludeClassOrigin flag: Include class origin information for
properties and methods in the retrieved class.
`None` will cause the server default of `False` to be used.
Returns:
list of :class:`~pywbem.CIMClass`:
The enumerated classes. |
def matches(self, stream):
"""Check if this selector matches the given stream
Args:
stream (DataStream): The stream to check
Returns:
bool: True if this selector matches the stream
"""
if self.match_type != stream.stream_type:
return False
if self.match_id is not None:
return self.match_id == stream.stream_id
if self.match_spec == DataStreamSelector.MatchUserOnly:
return not stream.system
elif self.match_spec == DataStreamSelector.MatchSystemOnly:
return stream.system
elif self.match_spec == DataStreamSelector.MatchUserAndBreaks:
return (not stream.system) or (stream.system and (stream.stream_id in DataStream.KnownBreakStreams))
# The other case is that match_spec is MatchCombined, which matches everything
# regardless of system of user flag
return True | Check if this selector matches the given stream
Args:
stream (DataStream): The stream to check
Returns:
bool: True if this selector matches the stream |
def put_abs (self, r, c, ch):
'''Screen array starts at 1 index.'''
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
if isinstance(ch, bytes):
ch = self._decode(ch)[0]
else:
ch = ch[0]
self.w[r-1][c-1] = ch | Screen array starts at 1 index. |
def _do_close(self):
"""Tear down this object, after we've agreed to close
with the server."""
AMQP_LOGGER.debug('Closed channel #%d', self.channel_id)
self.is_open = False
channel_id, self.channel_id = self.channel_id, None
connection, self.connection = self.connection, None
if connection:
connection.channels.pop(channel_id, None)
connection._avail_channel_ids.append(channel_id)
self.callbacks.clear()
self.cancel_callbacks.clear()
self.events.clear()
self.no_ack_consumers.clear() | Tear down this object, after we've agreed to close
with the server. |
def bucket_to_dataframe(name, buckets, append_name=None):
'''A function that turns elasticsearch aggregation buckets into dataframes
:param name: The name of the bucket (will be a column in the dataframe)
:type name: str
:param bucket: a bucket from elasticsearch results
:type bucket: list[dict]
:returns: pandas.DataFrame
'''
expanded_buckets = []
for item in buckets:
if type(item) is dict:
single_dict = item
else:
single_dict = item.to_dict()
single_dict[name] = single_dict.pop('doc_count')
if append_name:
persistance_dict = single_dict.copy()
for key in persistance_dict.keys():
single_dict[append_name + '.' + key] = single_dict.pop(key)
expanded_buckets.append(single_dict)
return pd.DataFrame(expanded_buckets) | A function that turns elasticsearch aggregation buckets into dataframes
:param name: The name of the bucket (will be a column in the dataframe)
:type name: str
:param bucket: a bucket from elasticsearch results
:type bucket: list[dict]
:returns: pandas.DataFrame |
def credit_card_number(self, card_type=None):
""" Returns a valid credit card number. """
card = self._credit_card_type(card_type)
prefix = self.random_element(card.prefixes)
number = self._generate_number(self.numerify(prefix), card.length)
return number | Returns a valid credit card number. |
def _search_ldap(self, ldap, con, username):
"""
Searches LDAP for user, assumes ldap_search is set.
:param ldap: The ldap module reference
:param con: The ldap connection
:param username: username to match with auth_ldap_uid_field
:return: ldap object array
"""
if self.auth_ldap_append_domain:
username = username + "@" + self.auth_ldap_append_domain
if self.auth_ldap_search_filter:
filter_str = "(&%s(%s=%s))" % (
self.auth_ldap_search_filter,
self.auth_ldap_uid_field,
username,
)
else:
filter_str = "(%s=%s)" % (self.auth_ldap_uid_field, username)
user = con.search_s(
self.auth_ldap_search,
ldap.SCOPE_SUBTREE,
filter_str,
[
self.auth_ldap_firstname_field,
self.auth_ldap_lastname_field,
self.auth_ldap_email_field,
],
)
if user:
if not user[0][0]:
return None
return user | Searches LDAP for user, assumes ldap_search is set.
:param ldap: The ldap module reference
:param con: The ldap connection
:param username: username to match with auth_ldap_uid_field
:return: ldap object array |
def multi_muscle_align(data, samples, ipyclient):
"""
Sends the cluster bits to nprocessors for muscle alignment. They return
with indel.h5 handles to be concatenated into a joint h5.
"""
LOGGER.info("starting alignments")
## get client
lbview = ipyclient.load_balanced_view()
start = time.time()
printstr = " aligning clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## submit clustbits as jobs to engines. The chunkfiles are removed when they
## are finished so this job can even be restarted if it was half finished,
## though that is probably rare.
path = os.path.join(data.tmpdir, data.name + ".chunk_*")
clustbits = glob.glob(path)
jobs = {}
for idx in xrange(len(clustbits)):
args = [data, samples, clustbits[idx]]
jobs[idx] = lbview.apply(persistent_popen_align3, *args)
allwait = len(jobs)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## print progress while bits are aligning
while 1:
finished = [i.ready() for i in jobs.values()]
fwait = sum(finished)
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(allwait, fwait, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if all(finished):
break
## check for errors in muscle_align_across
keys = jobs.keys()
for idx in keys:
if not jobs[idx].successful():
LOGGER.error("error in persistent_popen_align %s", jobs[idx].exception())
raise IPyradWarningExit("error in step 6 {}".format(jobs[idx].exception()))
del jobs[idx]
print("") | Sends the cluster bits to nprocessors for muscle alignment. They return
with indel.h5 handles to be concatenated into a joint h5. |
def approveproposal(self, proposal_ids, account=None, approver=None, **kwargs):
""" Approve Proposal
:param list proposal_id: Ids of the proposals
:param str appprover: The account or key to use for approval
(defaults to ``account``)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
from .proposal import Proposal
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, blockchain_instance=self)
is_key = approver and approver[:3] == self.prefix
if not approver and not is_key:
approver = account
elif approver and not is_key:
approver = Account(approver, blockchain_instance=self)
else:
approver = PublicKey(approver)
if not isinstance(proposal_ids, (list, set, tuple)):
proposal_ids = {proposal_ids}
op = []
for proposal_id in proposal_ids:
proposal = Proposal(proposal_id, blockchain_instance=self)
update_dict = {
"fee": {"amount": 0, "asset_id": "1.3.0"},
"fee_paying_account": account["id"],
"proposal": proposal["id"],
"prefix": self.prefix,
}
if is_key:
update_dict.update({"key_approvals_to_add": [str(approver)]})
else:
update_dict.update({"active_approvals_to_add": [approver["id"]]})
op.append(operations.Proposal_update(**update_dict))
if is_key:
self.txbuffer.appendSigner(approver, "active")
return self.finalizeOp(op, account["name"], "active", **kwargs)
return self.finalizeOp(op, approver, "active", **kwargs) | Approve Proposal
:param list proposal_id: Ids of the proposals
:param str appprover: The account or key to use for approval
(defaults to ``account``)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``) |
def get_combo(self, symbol):
""" get group by child symbol """
for parent, legs in self.instrument_combos.items():
if symbol == parent or symbol in legs.keys():
return {
"parent": self.get_instrument(parent),
"legs": legs,
}
return {
"parent": None,
"legs": {},
} | get group by child symbol |
def _JModule(spec, javaname):
""" (internal) Front end for creating a java module dynamically """
cls = _JImportFactory(spec, javaname)
out = cls(spec.name)
return out | (internal) Front end for creating a java module dynamically |
def can_overlap(self, contig, strand=None):
"""
Is this locus on the same contig and (optionally) on the same strand?
"""
return (self.on_contig(contig) and
(strand is None or self.on_strand(strand))) | Is this locus on the same contig and (optionally) on the same strand? |
def populateViewTree(self, view):
'''
Populates the View tree.
'''
vuid = view.getUniqueId()
text = view.__smallStr__()
if view.getParent() is None:
self.viewTree.insert('', Tkinter.END, vuid, text=text)
else:
self.viewTree.insert(view.getParent().getUniqueId(), Tkinter.END, vuid, text=text, tags=('ttk'))
self.viewTree.set(vuid, 'T', '*' if view.isTarget() else ' ')
self.viewTree.tag_bind('ttk', '<1>', self.viewTreeItemClicked) | Populates the View tree. |
def query_one(cls, *args, **kwargs):
""" Same as collection.find_one, but return Document then dict """
doc = cls._coll.find_one(*args, **kwargs)
if doc:
return cls.from_storage(doc) | Same as collection.find_one, but return Document then dict |
def urljoin(base, path=None):
"""Join a base url with a relative path."""
# /foo/bar + baz makes /foo/bar/baz instead of /foo/baz
if path is None:
url = base
else:
if not base.endswith('/'):
base += '/'
url = urllib.parse.urljoin(base, str(path))
return url | Join a base url with a relative path. |
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None):
"""
Sets up Spark RDD across S3 or GS objects specified by dataPath.
Returns RDD of <string bucket keyname, string buffer> k/v pairs.
"""
from .utils import connection_with_anon, connection_with_gs
path = addextension(path, ext)
scheme, bucket_name, keylist = self.getfiles(
path, start=start, stop=stop, recursive=recursive)
if not keylist:
raise FileNotFoundError("No objects found for '%s'" % path)
credentials = self.credentials
self.nfiles = len(keylist)
if spark and isinstance(self.engine, spark):
def getsplit(kvIter):
if scheme == 's3' or scheme == 's3n':
conn = connection_with_anon(credentials)
bucket = conn.get_bucket(bucket_name)
elif scheme == 'gs':
conn = boto.storage_uri(bucket_name, 'gs')
bucket = conn.get_bucket()
else:
raise NotImplementedError("No file reader implementation for URL scheme " + scheme)
for kv in kvIter:
idx, keyname = kv
key = bucket.get_key(keyname)
buf = key.get_contents_as_string()
yield idx, buf, keyname
npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles
rdd = self.engine.parallelize(enumerate(keylist), npartitions)
return rdd.mapPartitions(getsplit)
else:
if scheme == 's3' or scheme == 's3n':
conn = connection_with_anon(credentials)
bucket = conn.get_bucket(bucket_name)
elif scheme == 'gs':
conn = connection_with_gs(bucket_name)
bucket = conn.get_bucket()
else:
raise NotImplementedError("No file reader implementation for URL scheme " + scheme)
def getsplit(kv):
idx, keyName = kv
key = bucket.get_key(keyName)
buf = key.get_contents_as_string()
return idx, buf, keyName
return [getsplit(kv) for kv in enumerate(keylist)] | Sets up Spark RDD across S3 or GS objects specified by dataPath.
Returns RDD of <string bucket keyname, string buffer> k/v pairs. |
def open_netcdf_writer(self, flatten=False, isolate=False, timeaxis=1):
"""Prepare a new |NetCDFInterface| object for writing data."""
self._netcdf_writer = netcdftools.NetCDFInterface(
flatten=bool(flatten),
isolate=bool(isolate),
timeaxis=int(timeaxis)) | Prepare a new |NetCDFInterface| object for writing data. |
def _add_text_ngrams(self, witness, minimum, maximum):
"""Adds n-gram data from `witness` to the data store.
:param witness: witness to get n-grams from
:type witness: `WitnessText`
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
"""
text_id = self._get_text_id(witness)
self._logger.info('Adding n-grams ({} <= n <= {}) for {}'.format(
minimum, maximum, witness.get_filename()))
skip_sizes = []
for size in range(minimum, maximum + 1):
if self._has_ngrams(text_id, size):
self._logger.info(
'{}-grams are already in the database'.format(size))
skip_sizes.append(size)
for size, ngrams in witness.get_ngrams(minimum, maximum, skip_sizes):
self._add_text_size_ngrams(text_id, size, ngrams) | Adds n-gram data from `witness` to the data store.
:param witness: witness to get n-grams from
:type witness: `WitnessText`
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int` |
def _get_motor_parameters(json_file):
"""Returns a dictionary with joints as keys, and a description (dict) of each joint as value"""
with open(json_file) as motor_fd:
global_config = json.load(motor_fd)
motors = global_config["motors"]
# Returned dict
motor_config = {}
# Add motor to the config
for motor in motors:
motor_config[motor] = motors[motor]
return motor_config | Returns a dictionary with joints as keys, and a description (dict) of each joint as value |
def has_nrows(
state,
incorrect_msg="Your query returned a table with {{n_stu}} row{{'s' if n_stu > 1 else ''}} while it should return a table with {{n_sol}} row{{'s' if n_sol > 1 else ''}}.",
):
"""Test whether the student and solution query results have equal numbers of rows.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the number of rows in the student and solution query don't match.
"""
# check that query returned something
has_result(state)
# assumes that columns cannot be jagged in size
n_stu = len(next(iter(state.student_result.values())))
n_sol = len(next(iter(state.solution_result.values())))
if n_stu != n_sol:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol}
)
state.do_test(_msg)
return state | Test whether the student and solution query results have equal numbers of rows.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the number of rows in the student and solution query don't match. |
def _hijacked_run_baton_query(
self, baton_binary: BatonBinary, program_arguments: List[str]=None, input_data: Any=None) -> List[Dict]:
"""
Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that
originate from code called from frames with the ids in `self._hijack_frame_ids`.
:param baton_binary: see `BatonRunner.run_baton_query`
:param program_arguments: see `BatonRunner.run_baton_query`
:param input_data: see `BatonRunner.run_baton_query`
:return: see `BatonRunner.run_baton_query`
"""
if baton_binary == BatonBinary.BATON_CHMOD:
current_frame = inspect.currentframe()
def frame_code_in_same_file(frame) -> bool:
return frame_back.f_code.co_filename == current_frame.f_code.co_filename
frame_back = current_frame.f_back
assert frame_code_in_same_file(frame_back)
while frame_back is not None and frame_code_in_same_file(frame_back):
if id(frame_back) in self._hijack_frame_ids:
return self._original_run_baton_query(baton_binary, [BATON_CHMOD_RECURSIVE_FLAG], input_data)
frame_back = frame_back.f_back
return self._original_run_baton_query(baton_binary, program_arguments, input_data) | Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that
originate from code called from frames with the ids in `self._hijack_frame_ids`.
:param baton_binary: see `BatonRunner.run_baton_query`
:param program_arguments: see `BatonRunner.run_baton_query`
:param input_data: see `BatonRunner.run_baton_query`
:return: see `BatonRunner.run_baton_query` |
def _axis(self, axis):
"""
Return the corresponding labels taking into account the axis.
The axis could be horizontal (0) or vertical (1).
"""
return self.df.columns if axis == 0 else self.df.index | Return the corresponding labels taking into account the axis.
The axis could be horizontal (0) or vertical (1). |
def dump(obj, attributes = True, _refset = None):
"Show full value of a data object"
if _refset is None:
_refset = set()
if obj is None:
return None
elif isinstance(obj, DataObject):
if id(obj) in _refset:
attributes = False
else:
_refset.add(id(obj))
cls = type(obj)
clsname = getattr(cls, '__module__', '<unknown>') + '.' + getattr(cls, '__name__', '<unknown>')
baseresult = {'_type': clsname, '_key': obj.getkey()}
if not attributes:
return baseresult
else:
baseresult.update((k,dump(v, attributes, _refset)) for k,v in vars(obj).items() if k[:1] != '_')
_refset.remove(id(obj))
return baseresult
elif isinstance(obj, ReferenceObject):
if obj._ref is not None:
return dump(obj._ref, attributes, _refset)
else:
return {'_ref':obj.getkey()}
elif isinstance(obj, WeakReferenceObject):
return {'_weakref':obj.getkey()}
elif isinstance(obj, DataObjectSet):
return dump(list(obj.dataset()))
elif isinstance(obj, dict):
return dict((k, dump(v, attributes, _refset)) for k,v in obj.items())
elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set):
return [dump(v, attributes, _refset) for v in obj]
else:
return obj | Show full value of a data object |
def expr_str(expr, sc_expr_str_fn=standard_sc_expr_str):
"""
Returns the string representation of the expression 'expr', as in a Kconfig
file.
Passing subexpressions of expressions to this function works as expected.
sc_expr_str_fn (default: standard_sc_expr_str):
This function is called for every symbol/choice (hence "sc") appearing in
the expression, with the symbol/choice as the argument. It is expected to
return a string to be used for the symbol/choice.
This can be used e.g. to turn symbols/choices into links when generating
documentation, or for printing the value of each symbol/choice after it.
Note that quoted values are represented as constants symbols
(Symbol.is_constant == True).
"""
if expr.__class__ is not tuple:
return sc_expr_str_fn(expr)
if expr[0] is AND:
return "{} && {}".format(_parenthesize(expr[1], OR, sc_expr_str_fn),
_parenthesize(expr[2], OR, sc_expr_str_fn))
if expr[0] is OR:
# This turns A && B || C && D into "(A && B) || (C && D)", which is
# redundant, but more readable
return "{} || {}".format(_parenthesize(expr[1], AND, sc_expr_str_fn),
_parenthesize(expr[2], AND, sc_expr_str_fn))
if expr[0] is NOT:
if expr[1].__class__ is tuple:
return "!({})".format(expr_str(expr[1], sc_expr_str_fn))
return "!" + sc_expr_str_fn(expr[1]) # Symbol
# Relation
#
# Relation operands are always symbols (quoted strings are constant
# symbols)
return "{} {} {}".format(sc_expr_str_fn(expr[1]), _REL_TO_STR[expr[0]],
sc_expr_str_fn(expr[2])) | Returns the string representation of the expression 'expr', as in a Kconfig
file.
Passing subexpressions of expressions to this function works as expected.
sc_expr_str_fn (default: standard_sc_expr_str):
This function is called for every symbol/choice (hence "sc") appearing in
the expression, with the symbol/choice as the argument. It is expected to
return a string to be used for the symbol/choice.
This can be used e.g. to turn symbols/choices into links when generating
documentation, or for printing the value of each symbol/choice after it.
Note that quoted values are represented as constants symbols
(Symbol.is_constant == True). |
def pid_exists(pid):
""" Determines if a system process identifer exists in process table.
"""
try:
os.kill(pid, 0)
except OSError as exc:
return exc.errno == errno.EPERM
else:
return True | Determines if a system process identifer exists in process table. |
def sequence_to_graph(G, seq, color='black'):
"""
Automatically construct graph given a sequence of characters.
"""
for x in seq:
if x.endswith("_1"): # Mutation
G.node(x, color=color, width="0.1", shape="circle", label="")
else:
G.node(x, color=color)
for a, b in pairwise(seq):
G.edge(a, b, color=color) | Automatically construct graph given a sequence of characters. |
def is_left(point0, point1, point2):
""" Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
"""
return ((point1[0] - point0[0]) * (point2[1] - point0[1])) - ((point2[0] - point0[0]) * (point1[1] - point0[1])) | Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line |
def arg_tup_to_dict(argument_tuples):
"""Given a set of argument tuples, set their value in a data dictionary if not blank"""
data = dict()
for arg_name, arg_val in argument_tuples:
if arg_val is not None:
if arg_val is True:
arg_val = 'true'
elif arg_val is False:
arg_val = 'false'
data[arg_name] = arg_val
return data | Given a set of argument tuples, set their value in a data dictionary if not blank |
def remove(src, rel, dst):
"""
Returns an SQL statement that removes edges from
the SQL backing store. Either `src` or `dst` may
be specified, even both.
:param src: The source node.
:param rel: The relation.
:param dst: The destination node.
"""
smt = 'DELETE FROM %s' % rel
queries = []
params = []
if src is not None:
queries.append('src = ?')
params.append(src)
if dst is not None:
queries.append('dst = ?')
params.append(dst)
if not queries:
return smt, params
smt = '%s WHERE %s' % (smt, ' AND '.join(queries))
return smt, params | Returns an SQL statement that removes edges from
the SQL backing store. Either `src` or `dst` may
be specified, even both.
:param src: The source node.
:param rel: The relation.
:param dst: The destination node. |
def ipoib_interfaces():
"""Return a list of IPOIB capable ethernet interfaces"""
interfaces = []
for interface in network_interfaces():
try:
driver = re.search('^driver: (.+)$', subprocess.check_output([
'ethtool', '-i',
interface]), re.M).group(1)
if driver in IPOIB_DRIVERS:
interfaces.append(interface)
except Exception:
log("Skipping interface %s" % interface, level=INFO)
continue
return interfaces | Return a list of IPOIB capable ethernet interfaces |
def extract_cookies(self, response, request, referrer_host=None):
'''Wrapped ``extract_cookies``.
Args:
response: An instance of :class:`.http.request.Response`.
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
'''
new_response = HTTPResponseInfoWrapper(response)
new_request = convert_http_request(request, referrer_host)
self._cookie_jar.extract_cookies(new_response, new_request) | Wrapped ``extract_cookies``.
Args:
response: An instance of :class:`.http.request.Response`.
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL. |
def print_tree(
expr, attr='operands', padding='', exclude_type=None, depth=None,
unicode=True, srepr_leaves=False, _last=False, _root=True, _level=0,
_print=True):
"""Print a tree representation of the structure of `expr`
Args:
expr (Expression): expression to render
attr (str): The attribute from which to get the children of `expr`
padding (str): Whitespace by which the entire tree is idented
exclude_type (type): Type (or list of types) which should never be
expanded recursively
depth (int or None): Maximum depth of the tree to be printed
unicode (bool): If True, use unicode line-drawing symbols for the tree,
and print expressions in a unicode representation.
If False, use an ASCII approximation.
srepr_leaves (bool): Whether or not to render leaves with `srepr`,
instead of `ascii`/`unicode`
See also:
:func:`tree` return the result as a string, instead of printing it
"""
from qnet.printing import srepr
lines = []
if unicode:
draw = {'leaf': '└─ ', 'branch': '├─ ', 'line': '│'}
sub_render = _shorten_render_unicode()
else:
draw = {'leaf': '+- ', 'branch': '+- ', 'line': '|'}
sub_render = _shorten_render_ascii()
to_str = lambda expr: render_head_repr(
expr, sub_render=sub_render, key_sub_render=sub_render)
if _root:
lines.append(". " + to_str(expr))
else:
if _last:
lines.append(padding[:-1] + draw['leaf'] + to_str(expr))
else:
lines.append(padding[:-1] + draw['branch'] + to_str(expr))
padding = padding + ' '
try:
children = getattr(expr, attr)
except AttributeError:
children = []
if exclude_type is not None:
if isinstance(expr, exclude_type):
children = []
if depth is not None:
if depth <= _level:
children = []
for count, child in enumerate(children):
if hasattr(child, attr):
if count == len(children)-1:
lines += print_tree(
child, attr, padding + ' ',
exclude_type=exclude_type, depth=depth, unicode=unicode,
srepr_leaves=srepr_leaves, _last=True, _root=False,
_level=_level+1)
else:
lines += print_tree(
child, attr, padding + draw['line'],
exclude_type=exclude_type, depth=depth, unicode=unicode,
srepr_leaves=srepr_leaves, _last=False, _root=False,
_level=_level+1)
else:
if count == len(children)-1:
if srepr_leaves:
lines.append(padding + draw['leaf'] + srepr(child))
else:
lines.append(padding + draw['leaf'] + to_str(child))
else:
if srepr_leaves:
lines.append(padding + draw['branch'] + srepr(child))
else:
lines.append(padding + draw['branch'] + to_str(child))
if _root:
if _print:
print("\n".join(lines))
else:
return lines
else:
return lines | Print a tree representation of the structure of `expr`
Args:
expr (Expression): expression to render
attr (str): The attribute from which to get the children of `expr`
padding (str): Whitespace by which the entire tree is idented
exclude_type (type): Type (or list of types) which should never be
expanded recursively
depth (int or None): Maximum depth of the tree to be printed
unicode (bool): If True, use unicode line-drawing symbols for the tree,
and print expressions in a unicode representation.
If False, use an ASCII approximation.
srepr_leaves (bool): Whether or not to render leaves with `srepr`,
instead of `ascii`/`unicode`
See also:
:func:`tree` return the result as a string, instead of printing it |
def deploy_directory(directory, auth=None):
"""Deploy all files in a given directory.
:param str directory: the path to a directory
:param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of
:class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`.
"""
for file in os.listdir(directory):
full_path = os.path.join(directory, file)
if file.endswith(BELANNO_EXTENSION):
name = file[:-len(BELANNO_EXTENSION)]
log.info('deploying annotation %s', full_path)
deploy_annotation(full_path, name, auth=auth)
elif file.endswith(BELNS_EXTENSION):
name = file[:-len(BELNS_EXTENSION)]
log.info('deploying namespace %s', full_path)
deploy_namespace(full_path, name, auth=auth)
elif file.endswith(BEL_EXTENSION):
name = file[:-len(BEL_EXTENSION)]
log.info('deploying knowledge %s', full_path)
deploy_knowledge(full_path, name, auth=auth)
else:
log.debug('not deploying %s', full_path) | Deploy all files in a given directory.
:param str directory: the path to a directory
:param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of
:class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`. |
def get_current_cmus():
"""
Get the current song from cmus.
"""
result = subprocess.run('cmus-remote -Q'.split(' '), check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
info = {}
for line in result.stdout.decode().split('\n'):
line = line.split(' ')
if line[0] != 'tag':
continue
key = line[1]
if key in ['album', 'title', 'artist', 'albumartist'] and\
key not in info:
info[key] = ' '.join(line[2:])
if 'albumartist' in info:
info['artist'] = info['albumartist']
del info['albumartist']
return Song(**info) | Get the current song from cmus. |
def get_frame_locals(stepback=0):
"""Returns locals dictionary from a given frame.
:param int stepback:
:rtype: dict
"""
with Frame(stepback=stepback) as frame:
locals_dict = frame.f_locals
return locals_dict | Returns locals dictionary from a given frame.
:param int stepback:
:rtype: dict |
def getEdgeDirected(self, networkId, edgeId, verbose=None):
"""
Returns true if the edge specified by the `edgeId` and `networkId` parameters is directed.
:param networkId: SUID of the network containing the edge
:param edgeId: SUID of the edge
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/edges/'+str(edgeId)+'/isDirected', method="GET", verbose=verbose, parse_params=False)
return response | Returns true if the edge specified by the `edgeId` and `networkId` parameters is directed.
:param networkId: SUID of the network containing the edge
:param edgeId: SUID of the edge
:param verbose: print more
:returns: 200: successful operation |
def volumes_from(self, value):
"""
:param value:
:return:
"""
volumes_from = []
if isinstance(value, list):
for volume_from in value:
if not isinstance(volume_from, six.string_types):
raise TypeError("each bind must be a str. {0} was passed".format(volume_from))
volumes_from.append(self._convert_volume_from(volume_from))
elif isinstance(value, six.string_types):
volumes_from.append(self._convert_volume_from(value))
elif value is None:
pass
else:
raise ValueError(
"""When passing binds they must be in one of the
following formats: container_path, host_path:container_path,
or host_path:container_path:permissions"""
)
self._volumes_from = volumes_from | :param value:
:return: |
def overlay_gateway_map_vlan_vni_mapping_vid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan_vni_mapping = ET.SubElement(map, "vlan-vni-mapping")
vid = ET.SubElement(vlan_vni_mapping, "vid")
vid.text = kwargs.pop('vid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def render_fields(dictionary,
*fields,
**opts):
'''
This function works similarly to
:mod:`render_field <salt.modules.napalm_formula.render_field>` but for a
list of fields from the same dictionary, rendering, indenting and
distributing them on separate lines.
dictionary
The dictionary to traverse.
fields
A list of field names or paths in the dictionary.
indent: ``0``
The indentation to use, prepended to the rendered field.
separator: ``\\n``
The separator to use between fields.
CLI Example:
.. code-block:: bash
salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description
Jinja usage example:
.. code-block:: jinja
{%- set config={'mtu': 68, 'description': 'Interface description'} %}
{{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }}
The Jinja example above would generate the following configuration:
.. code-block:: text
mtu "68"
description "Interface description"
'''
results = []
for field in fields:
res = render_field(dictionary, field, **opts)
if res:
results.append(res)
if 'indent' not in opts:
opts['indent'] = 0
if 'separator' not in opts:
opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent'])
return opts['separator'].join(results) | This function works similarly to
:mod:`render_field <salt.modules.napalm_formula.render_field>` but for a
list of fields from the same dictionary, rendering, indenting and
distributing them on separate lines.
dictionary
The dictionary to traverse.
fields
A list of field names or paths in the dictionary.
indent: ``0``
The indentation to use, prepended to the rendered field.
separator: ``\\n``
The separator to use between fields.
CLI Example:
.. code-block:: bash
salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description
Jinja usage example:
.. code-block:: jinja
{%- set config={'mtu': 68, 'description': 'Interface description'} %}
{{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }}
The Jinja example above would generate the following configuration:
.. code-block:: text
mtu "68"
description "Interface description" |
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type, 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS) | Return the appropriate remapping for the given field, or None. |
def invenio_query_factory(parser=None, walkers=None):
"""Create a parser returning Elastic Search DSL query instance."""
parser = parser or Main
walkers = walkers or [PypegConverter()]
walkers.append(ElasticSearchDSL())
def invenio_query(pattern):
query = pypeg2.parse(pattern, parser, whitespace="")
for walker in walkers:
query = query.accept(walker)
return query
return invenio_query | Create a parser returning Elastic Search DSL query instance. |
def default_values_of(func):
"""Return the defaults of the function `func`."""
signature = inspect.signature(func)
return [k
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty or
v.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD] | Return the defaults of the function `func`. |
def window(self, vec):
"""Apply a window to the coefficients defined by *vec*. *vec* must
have length *nmax* + 1. This is good way to filter the pattern by
windowing in the coefficient domain.
Example::
>>> vec = numpy.linspace(0, 1, c.nmax + 1)
>>> c.window(vec)
Args:
vec (numpy.array): Vector of values to apply in the n direction of
the data. Has length *nmax* + 1.
Returns:
Nothing, applies the window to the data in place.
"""
slce = slice(None, None, None)
self.__setitem__((slce, 0), self.__getitem__((slce, 0)) * vec)
for m in xrange(1, self.mmax + 1):
self.__setitem__((slce, -m), self.__getitem__((slce, -m)) * vec[m:])
self.__setitem__((slce, m), self.__getitem__((slce, m)) * vec[m:]) | Apply a window to the coefficients defined by *vec*. *vec* must
have length *nmax* + 1. This is good way to filter the pattern by
windowing in the coefficient domain.
Example::
>>> vec = numpy.linspace(0, 1, c.nmax + 1)
>>> c.window(vec)
Args:
vec (numpy.array): Vector of values to apply in the n direction of
the data. Has length *nmax* + 1.
Returns:
Nothing, applies the window to the data in place. |
def capture_vm_image(self, service_name, deployment_name, role_name, options):
'''
Creates a copy of the operating system virtual hard disk (VHD) and all
of the data VHDs that are associated with the Virtual Machine, saves
the VHD copies in the same storage location as the original VHDs, and
registers the copies as a VM Image in the image repository that is
associated with the specified subscription.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
options:
An instance of CaptureRoleAsVMImage class.
options.os_state:
Required. Specifies the state of the operating system in the image.
Possible values are: Generalized, Specialized
A Virtual Machine that is fully configured and running contains a
Specialized operating system. A Virtual Machine on which the
Sysprep command has been run with the generalize option contains a
Generalized operating system. If you capture an image from a
generalized Virtual Machine, the machine is deleted after the image
is captured. It is recommended that all Virtual Machines are shut
down before capturing an image.
options.vm_image_name:
Required. Specifies the name of the VM Image.
options.vm_image_label:
Required. Specifies the label of the VM Image.
options.description:
Optional. Specifies the description of the VM Image.
options.language:
Optional. Specifies the language of the VM Image.
options.image_family:
Optional. Specifies a value that can be used to group VM Images.
options.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('options', options)
_validate_not_none('options.os_state', options.os_state)
_validate_not_none('options.vm_image_name', options.vm_image_name)
_validate_not_none('options.vm_image_label', options.vm_image_label)
return self._perform_post(
self._get_capture_vm_image_path(service_name, deployment_name, role_name),
_XmlSerializer.capture_vm_image_to_xml(options),
as_async=True) | Creates a copy of the operating system virtual hard disk (VHD) and all
of the data VHDs that are associated with the Virtual Machine, saves
the VHD copies in the same storage location as the original VHDs, and
registers the copies as a VM Image in the image repository that is
associated with the specified subscription.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
options:
An instance of CaptureRoleAsVMImage class.
options.os_state:
Required. Specifies the state of the operating system in the image.
Possible values are: Generalized, Specialized
A Virtual Machine that is fully configured and running contains a
Specialized operating system. A Virtual Machine on which the
Sysprep command has been run with the generalize option contains a
Generalized operating system. If you capture an image from a
generalized Virtual Machine, the machine is deleted after the image
is captured. It is recommended that all Virtual Machines are shut
down before capturing an image.
options.vm_image_name:
Required. Specifies the name of the VM Image.
options.vm_image_label:
Required. Specifies the label of the VM Image.
options.description:
Optional. Specifies the description of the VM Image.
options.language:
Optional. Specifies the language of the VM Image.
options.image_family:
Optional. Specifies a value that can be used to group VM Images.
options.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image. |
def cancel(**kwargs):
"""Cancels work items based on their criteria.
Args:
**kwargs: Same parameters as the query() method.
Returns:
The number of tasks that were canceled.
"""
task_list = _query(**kwargs)
for task in task_list:
task.status = WorkQueue.CANCELED
task.finished = datetime.datetime.utcnow()
db.session.add(task)
return len(task_list) | Cancels work items based on their criteria.
Args:
**kwargs: Same parameters as the query() method.
Returns:
The number of tasks that were canceled. |
def build_mutation_pruner_plugin() -> LaserPlugin:
""" Creates an instance of the mutation pruner plugin"""
from mythril.laser.ethereum.plugins.implementations.mutation_pruner import (
MutationPruner,
)
return MutationPruner() | Creates an instance of the mutation pruner plugin |
def read_random_state(self, group=None):
"""Reads the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state from.
Returns
-------
tuple
A tuple with 5 elements that can be passed to numpy.set_state.
"""
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
arr = self[dataset_name][:]
s = self[dataset_name].attrs["s"]
pos = self[dataset_name].attrs["pos"]
has_gauss = self[dataset_name].attrs["has_gauss"]
cached_gauss = self[dataset_name].attrs["cached_gauss"]
return s, arr, pos, has_gauss, cached_gauss | Reads the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state from.
Returns
-------
tuple
A tuple with 5 elements that can be passed to numpy.set_state. |
def compare_outputs(expected, output, **kwargs):
"""
Compares expected values and output.
Returns None if no error, an exception message otherwise.
"""
SkipDim1 = kwargs.pop("SkipDim1", False)
NoProb = kwargs.pop("NoProb", False)
Dec4 = kwargs.pop("Dec4", False)
Dec3 = kwargs.pop("Dec3", False)
Dec2 = kwargs.pop("Dec2", False)
Disc = kwargs.pop("Disc", False)
Mism = kwargs.pop("Mism", False)
Opp = kwargs.pop("Opp", False)
if Opp and not NoProb:
raise ValueError("Opp is only available if NoProb is True")
if Dec4:
kwargs["decimal"] = min(kwargs["decimal"], 4)
if Dec3:
kwargs["decimal"] = min(kwargs["decimal"], 3)
if Dec2:
kwargs["decimal"] = min(kwargs["decimal"], 2)
if isinstance(expected, numpy.ndarray) and isinstance(output, numpy.ndarray):
if SkipDim1:
# Arrays like (2, 1, 2, 3) becomes (2, 2, 3) as one dimension is useless.
expected = expected.reshape(tuple([d for d in expected.shape if d > 1]))
output = output.reshape(tuple([d for d in expected.shape if d > 1]))
if NoProb:
# One vector is (N,) with scores, negative for class 0
# positive for class 1
# The other vector is (N, 2) score in two columns.
if len(output.shape) == 2 and output.shape[1] == 2 and len(expected.shape) == 1:
output = output[:, 1]
elif len(output.shape) == 1 and len(expected.shape) == 1:
pass
elif len(expected.shape) == 1 and len(output.shape) == 2 and \
expected.shape[0] == output.shape[0] and output.shape[1] == 1:
output = output[:, 0]
elif expected.shape != output.shape:
raise NotImplementedError("No good shape: {0} != {1}".format(expected.shape, output.shape))
if Opp:
output = -output
if len(expected.shape) == 1 and len(output.shape) == 2 and output.shape[1] == 1:
output = output.ravel()
if len(expected.shape) == 2 and len(output.shape) == 1 and expected.shape[1] == 1:
expected = expected.ravel()
if not numpy.issubdtype(expected.dtype, numpy.number):
try:
assert_array_equal(expected, output)
except Exception as e:
if Disc:
# Bug to be fixed later.
return ExpectedAssertionError(str(e))
else:
return OnnxRuntimeAssertionError(str(e))
else:
try:
assert_array_almost_equal(expected, output, **kwargs)
except Exception as e:
expected_ = expected.ravel()
output_ = output.ravel()
if len(expected_) == len(output_):
diff = numpy.abs(expected_ - output_).max()
elif Mism:
return ExpectedAssertionError("dimension mismatch={0}, {1}\n{2}".format(expected.shape, output.shape, e))
else:
return OnnxRuntimeAssertionError("dimension mismatch={0}, {1}\n{2}".format(expected.shape, output.shape, e))
if Disc:
# Bug to be fixed later.
return ExpectedAssertionError("max diff(expected, output)={0}\n{1}".format(diff, e))
else:
return OnnxRuntimeAssertionError("max diff(expected, output)={0}\n{1}".format(diff, e))
else:
return OnnxRuntimeAssertionError("Unexpected types {0} != {1}".format(type(expected), type(output)))
return None | Compares expected values and output.
Returns None if no error, an exception message otherwise. |
def load_file(self, file_path, share_name, directory_name, file_name, **kwargs):
"""
Upload a file to Azure File Share.
:param file_path: Path to the file to load.
:type file_path: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_path()` takes.
:type kwargs: object
"""
self.connection.create_file_from_path(share_name, directory_name,
file_name, file_path, **kwargs) | Upload a file to Azure File Share.
:param file_path: Path to the file to load.
:type file_path: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_path()` takes.
:type kwargs: object |
def _dispatch(name, *args, **kwargs):
"""
Dispatch to apply.
"""
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer | Dispatch to apply. |
def addChild(self, child_id):
"""Add a child to current workitem
:param child_id: the child workitem id/number
(integer or equivalent string)
"""
self.log.debug("Try to add a child <Workitem %s> to current "
"<Workitem %s>",
child_id,
self)
self._addChildren([child_id])
self.log.info("Successfully add a child <Workitem %s> to current "
"<Workitem %s>",
child_id,
self) | Add a child to current workitem
:param child_id: the child workitem id/number
(integer or equivalent string) |
def institute(context, institute_id, sanger_recipient, coverage_cutoff, frequency_cutoff,
display_name, remove_sanger):
"""
Update an institute
"""
adapter = context.obj['adapter']
LOG.info("Running scout update institute")
try:
adapter.update_institute(
internal_id=institute_id,
sanger_recipient=sanger_recipient,
coverage_cutoff=coverage_cutoff,
frequency_cutoff=frequency_cutoff,
display_name=display_name,
remove_sanger=remove_sanger,
)
except Exception as err:
LOG.warning(err)
context.abort() | Update an institute |
def removeDuplicates(inFileName, outFileName) :
"""removes duplicated lines from a 'inFileName' CSV file, the results are witten in 'outFileName'"""
f = open(inFileName)
legend = f.readline()
data = ''
h = {}
h[legend] = 0
lines = f.readlines()
for l in lines :
if not h.has_key(l) :
h[l] = 0
data += l
f.flush()
f.close()
f = open(outFileName, 'w')
f.write(legend+data)
f.flush()
f.close() | removes duplicated lines from a 'inFileName' CSV file, the results are witten in 'outFileName |
def to_comm(self, light_request=False):
'''
Convert `self` to :class:`.Archive`.
Returns:
obj: :class:`.Archive` instance.
'''
data = None
if not light_request:
tmp_fn = path_to_zip(self.dir_pointer)
data = read_as_base64(tmp_fn)
os.unlink(tmp_fn)
return Archive(
isbn=self.isbn,
uuid=self.uuid,
aleph_id=self.aleph_id,
b64_data=data,
dir_pointer=self.dir_pointer,
) | Convert `self` to :class:`.Archive`.
Returns:
obj: :class:`.Archive` instance. |
def get_roles(self, principal, object=None, no_group_roles=False):
"""Get all the roles attached to given `principal`, on a given
`object`.
:param principal: a :class:`User` or :class:`Group`
:param object: an :class:`Entity`
:param no_group_roles: If `True`, return only direct roles, not roles
acquired through group membership.
"""
assert principal
if hasattr(principal, "is_anonymous") and principal.is_anonymous:
return [AnonymousRole]
query = db.session.query(RoleAssignment.role)
if isinstance(principal, Group):
filter_principal = RoleAssignment.group == principal
else:
filter_principal = RoleAssignment.user == principal
if not no_group_roles:
groups = [g.id for g in principal.groups]
if groups:
filter_principal |= RoleAssignment.group_id.in_(groups)
query = query.filter(filter_principal)
if object is not None:
assert isinstance(object, Entity)
query = query.filter(RoleAssignment.object == object)
roles = {i[0] for i in query.all()}
if object is not None:
for attr, role in (("creator", Creator), ("owner", Owner)):
if getattr(object, attr) == principal:
roles.add(role)
return list(roles) | Get all the roles attached to given `principal`, on a given
`object`.
:param principal: a :class:`User` or :class:`Group`
:param object: an :class:`Entity`
:param no_group_roles: If `True`, return only direct roles, not roles
acquired through group membership. |
def default(self, obj):
"""Overriding the default JSONEncoder.default for NDB support."""
obj_type = type(obj)
# NDB Models return a repr to calls from type().
if obj_type not in self._ndb_type_encoding:
if hasattr(obj, '__metaclass__'):
obj_type = obj.__metaclass__
else:
# Try to encode subclasses of types
for ndb_type in NDB_TYPES:
if isinstance(obj, ndb_type):
obj_type = ndb_type
break
fn = self._ndb_type_encoding.get(obj_type)
if fn:
return fn(obj)
return json.JSONEncoder.default(self, obj) | Overriding the default JSONEncoder.default for NDB support. |
def convert_date(value, parameter):
'''
Converts to datetime.date:
'', '-', None convert to parameter default
The first matching format in settings.DATE_INPUT_FORMATS converts to datetime
'''
value = _check_default(value, parameter, ( '', '-', None ))
if value is None or isinstance(value, datetime.date):
return value
for fmt in settings.DATE_INPUT_FORMATS:
try:
return datetime.datetime.strptime(value, fmt).date()
except (ValueError, TypeError):
continue
raise ValueError("`{}` does not match a format in settings.DATE_INPUT_FORMATS".format(value)) | Converts to datetime.date:
'', '-', None convert to parameter default
The first matching format in settings.DATE_INPUT_FORMATS converts to datetime |
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Digest object and decode it into its
constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(Digest, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.hashing_algorithm.read(tstream, kmip_version=kmip_version)
self.digest_value.read(tstream, kmip_version=kmip_version)
self.key_format_type.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate() | Read the data encoding the Digest object and decode it into its
constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0. |
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume() | Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.