repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
tcalmant/ipopo | pelix/rsa/__init__.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/__init__.py#L1563-L1579 | def copy_non_reserved(props, target):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
"""
Copies all properties with non-reserved names from ``props`` to ``target``
:param props: A dictionary of properties
:param target: Another dictionary
:return: The target dictionary
"""
target.update(
{
key: value
for key, value in props.items()
if not is_reserved_property(key)
}
)
return target | [
"def",
"copy_non_reserved",
"(",
"props",
",",
"target",
")",
":",
"# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]",
"target",
".",
"update",
"(",
"{",
"key",
":",
"value",
"for",
"key",
",",
"value",
"in",
"props",
".",
"items",
"(",
")",
"if",
"not",
"is_reserved_property",
"(",
"key",
")",
"}",
")",
"return",
"target"
] | Copies all properties with non-reserved names from ``props`` to ``target``
:param props: A dictionary of properties
:param target: Another dictionary
:return: The target dictionary | [
"Copies",
"all",
"properties",
"with",
"non",
"-",
"reserved",
"names",
"from",
"props",
"to",
"target"
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/resolver.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/resolver.py#L60-L69 | def is_ipv4_available():
"""Check if IPv4 is available.
:Return: `True` when an IPv4 socket can be created.
"""
try:
socket.socket(socket.AF_INET).close()
except socket.error:
return False
return True | [
"def",
"is_ipv4_available",
"(",
")",
":",
"try",
":",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
")",
".",
"close",
"(",
")",
"except",
"socket",
".",
"error",
":",
"return",
"False",
"return",
"True"
] | Check if IPv4 is available.
:Return: `True` when an IPv4 socket can be created. | [
"Check",
"if",
"IPv4",
"is",
"available",
"."
] | python | valid |
eflglobal/filters | filters/macros.py | https://github.com/eflglobal/filters/blob/36c2a2b1cffa3a37279053cf181709045fd6683a/filters/macros.py#L39-L85 | def filter_macro(func, *args, **kwargs):
"""
Promotes a function that returns a filter into its own filter type.
Example::
@filter_macro
def String():
return Unicode | Strip | NotEmpty
# You can now use `String` anywhere you would use a regular Filter:
(String | Split(':')).apply('...')
You can also use ``filter_macro`` to create partials, allowing you to
preset one or more initialization arguments::
Minor = filter_macro(Max, max_value=18, inclusive=False)
Minor(inclusive=True).apply(18)
"""
filter_partial = partial(func, *args, **kwargs)
class FilterMacroMeta(FilterMeta):
@staticmethod
def __new__(mcs, name, bases, attrs):
# This is as close as we can get to running
# ``update_wrapper`` on a type.
for attr in WRAPPER_ASSIGNMENTS:
if hasattr(func, attr):
attrs[attr] = getattr(func, attr)
# Note that we ignore the ``name`` argument, passing in
# ``func.__name__`` instead.
return super(FilterMacroMeta, mcs)\
.__new__(mcs, func.__name__, bases, attrs)
def __call__(cls, *runtime_args, **runtime_kwargs):
return filter_partial(*runtime_args, **runtime_kwargs)
class FilterMacro(with_metaclass(FilterMacroMeta, FilterMacroType)):
# This method will probably never get called due to overloaded
# ``__call__`` in the metaclass, but just in case, we'll include
# it because it is an abstract method in `BaseFilter`.
def _apply(self, value):
# noinspection PyProtectedMember
return self.__class__()._apply(value)
return FilterMacro | [
"def",
"filter_macro",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"filter_partial",
"=",
"partial",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"class",
"FilterMacroMeta",
"(",
"FilterMeta",
")",
":",
"@",
"staticmethod",
"def",
"__new__",
"(",
"mcs",
",",
"name",
",",
"bases",
",",
"attrs",
")",
":",
"# This is as close as we can get to running",
"# ``update_wrapper`` on a type.",
"for",
"attr",
"in",
"WRAPPER_ASSIGNMENTS",
":",
"if",
"hasattr",
"(",
"func",
",",
"attr",
")",
":",
"attrs",
"[",
"attr",
"]",
"=",
"getattr",
"(",
"func",
",",
"attr",
")",
"# Note that we ignore the ``name`` argument, passing in",
"# ``func.__name__`` instead.",
"return",
"super",
"(",
"FilterMacroMeta",
",",
"mcs",
")",
".",
"__new__",
"(",
"mcs",
",",
"func",
".",
"__name__",
",",
"bases",
",",
"attrs",
")",
"def",
"__call__",
"(",
"cls",
",",
"*",
"runtime_args",
",",
"*",
"*",
"runtime_kwargs",
")",
":",
"return",
"filter_partial",
"(",
"*",
"runtime_args",
",",
"*",
"*",
"runtime_kwargs",
")",
"class",
"FilterMacro",
"(",
"with_metaclass",
"(",
"FilterMacroMeta",
",",
"FilterMacroType",
")",
")",
":",
"# This method will probably never get called due to overloaded",
"# ``__call__`` in the metaclass, but just in case, we'll include",
"# it because it is an abstract method in `BaseFilter`.",
"def",
"_apply",
"(",
"self",
",",
"value",
")",
":",
"# noinspection PyProtectedMember",
"return",
"self",
".",
"__class__",
"(",
")",
".",
"_apply",
"(",
"value",
")",
"return",
"FilterMacro"
] | Promotes a function that returns a filter into its own filter type.
Example::
@filter_macro
def String():
return Unicode | Strip | NotEmpty
# You can now use `String` anywhere you would use a regular Filter:
(String | Split(':')).apply('...')
You can also use ``filter_macro`` to create partials, allowing you to
preset one or more initialization arguments::
Minor = filter_macro(Max, max_value=18, inclusive=False)
Minor(inclusive=True).apply(18) | [
"Promotes",
"a",
"function",
"that",
"returns",
"a",
"filter",
"into",
"its",
"own",
"filter",
"type",
"."
] | python | train |
iotile/coretools | transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py#L673-L714 | def _publish_scan_response(self, client):
"""Publish a scan response message
The message contains all of the devices that are currently known
to this agent. Connection strings for direct connections are
translated to what is appropriate for this agent.
Args:
client (string): A unique id for the client that made this request
"""
devices = self._manager.scanned_devices
converted_devs = []
for uuid, info in devices.items():
slug = self._build_device_slug(uuid)
message = {}
message['uuid'] = uuid
if uuid in self._connections:
message['user_connected'] = True
elif 'user_connected' in info:
message['user_connected'] = info['user_connected']
else:
message['user_connected'] = False
message['connection_string'] = slug
message['signal_strength'] = info['signal_strength']
converted_devs.append({x: y for x, y in message.items()})
message['type'] = 'notification'
message['operation'] = 'advertisement'
self.client.publish(self.topics.gateway_topic(slug, 'data/advertisement'), message)
probe_message = {}
probe_message['type'] = 'response'
probe_message['client'] = client
probe_message['success'] = True
probe_message['devices'] = converted_devs
self.client.publish(self.topics.status, probe_message) | [
"def",
"_publish_scan_response",
"(",
"self",
",",
"client",
")",
":",
"devices",
"=",
"self",
".",
"_manager",
".",
"scanned_devices",
"converted_devs",
"=",
"[",
"]",
"for",
"uuid",
",",
"info",
"in",
"devices",
".",
"items",
"(",
")",
":",
"slug",
"=",
"self",
".",
"_build_device_slug",
"(",
"uuid",
")",
"message",
"=",
"{",
"}",
"message",
"[",
"'uuid'",
"]",
"=",
"uuid",
"if",
"uuid",
"in",
"self",
".",
"_connections",
":",
"message",
"[",
"'user_connected'",
"]",
"=",
"True",
"elif",
"'user_connected'",
"in",
"info",
":",
"message",
"[",
"'user_connected'",
"]",
"=",
"info",
"[",
"'user_connected'",
"]",
"else",
":",
"message",
"[",
"'user_connected'",
"]",
"=",
"False",
"message",
"[",
"'connection_string'",
"]",
"=",
"slug",
"message",
"[",
"'signal_strength'",
"]",
"=",
"info",
"[",
"'signal_strength'",
"]",
"converted_devs",
".",
"append",
"(",
"{",
"x",
":",
"y",
"for",
"x",
",",
"y",
"in",
"message",
".",
"items",
"(",
")",
"}",
")",
"message",
"[",
"'type'",
"]",
"=",
"'notification'",
"message",
"[",
"'operation'",
"]",
"=",
"'advertisement'",
"self",
".",
"client",
".",
"publish",
"(",
"self",
".",
"topics",
".",
"gateway_topic",
"(",
"slug",
",",
"'data/advertisement'",
")",
",",
"message",
")",
"probe_message",
"=",
"{",
"}",
"probe_message",
"[",
"'type'",
"]",
"=",
"'response'",
"probe_message",
"[",
"'client'",
"]",
"=",
"client",
"probe_message",
"[",
"'success'",
"]",
"=",
"True",
"probe_message",
"[",
"'devices'",
"]",
"=",
"converted_devs",
"self",
".",
"client",
".",
"publish",
"(",
"self",
".",
"topics",
".",
"status",
",",
"probe_message",
")"
] | Publish a scan response message
The message contains all of the devices that are currently known
to this agent. Connection strings for direct connections are
translated to what is appropriate for this agent.
Args:
client (string): A unique id for the client that made this request | [
"Publish",
"a",
"scan",
"response",
"message"
] | python | train |
sebdah/dynamic-dynamodb | dynamic_dynamodb/core/gsi.py | https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/core/gsi.py#L585-L1024 | def __ensure_provisioning_writes(
table_name, table_key, gsi_name, gsi_key, num_consec_write_checks):
""" Ensure that provisioning of writes is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (bool, int, int)
update_needed, updated_write_units, num_consec_write_checks
"""
if not get_gsi_option(table_key, gsi_key, 'enable_writes_autoscaling'):
logger.info(
'{0} - GSI: {1} - '
'Autoscaling of writes has been disabled'.format(
table_name, gsi_name))
return False, dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name), 0
update_needed = False
try:
lookback_window_start = get_gsi_option(
table_key, gsi_key, 'lookback_window_start')
lookback_period = get_gsi_option(
table_key, gsi_key, 'lookback_period')
current_write_units = dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name)
consumed_write_units_percent = \
gsi_stats.get_consumed_write_units_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
throttled_write_count = \
gsi_stats.get_throttled_write_event_count(
table_name, gsi_name, lookback_window_start, lookback_period)
throttled_by_provisioned_write_percent = \
gsi_stats.get_throttled_by_provisioned_write_event_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
throttled_by_consumed_write_percent = \
gsi_stats.get_throttled_by_consumed_write_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
writes_upper_threshold = \
get_gsi_option(table_key, gsi_key, 'writes_upper_threshold')
writes_lower_threshold = \
get_gsi_option(table_key, gsi_key, 'writes_lower_threshold')
throttled_writes_upper_threshold = \
get_gsi_option(
table_key, gsi_key, 'throttled_writes_upper_threshold')
increase_writes_unit = \
get_gsi_option(table_key, gsi_key, 'increase_writes_unit')
increase_writes_with = \
get_gsi_option(table_key, gsi_key, 'increase_writes_with')
decrease_writes_unit = \
get_gsi_option(table_key, gsi_key, 'decrease_writes_unit')
decrease_writes_with = \
get_gsi_option(table_key, gsi_key, 'decrease_writes_with')
min_provisioned_writes = \
get_gsi_option(table_key, gsi_key, 'min_provisioned_writes')
max_provisioned_writes = \
get_gsi_option(table_key, gsi_key, 'max_provisioned_writes')
num_write_checks_before_scale_down = \
get_gsi_option(
table_key, gsi_key, 'num_write_checks_before_scale_down')
num_write_checks_reset_percent = \
get_gsi_option(
table_key, gsi_key, 'num_write_checks_reset_percent')
increase_throttled_by_provisioned_writes_unit = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_provisioned_writes_unit')
increase_throttled_by_provisioned_writes_scale = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_provisioned_writes_scale')
increase_throttled_by_consumed_writes_unit = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_consumed_writes_unit')
increase_throttled_by_consumed_writes_scale = \
get_gsi_option(
table_key,
gsi_key,
'increase_throttled_by_consumed_writes_scale')
increase_consumed_writes_unit = \
get_gsi_option(table_key, gsi_key, 'increase_consumed_writes_unit')
increase_consumed_writes_with = \
get_gsi_option(table_key, gsi_key, 'increase_consumed_writes_with')
increase_consumed_writes_scale = \
get_gsi_option(
table_key, gsi_key, 'increase_consumed_writes_scale')
decrease_consumed_writes_unit = \
get_gsi_option(table_key, gsi_key, 'decrease_consumed_writes_unit')
decrease_consumed_writes_with = \
get_gsi_option(table_key, gsi_key, 'decrease_consumed_writes_with')
decrease_consumed_writes_scale = \
get_gsi_option(
table_key, gsi_key, 'decrease_consumed_writes_scale')
except JSONResponseError:
raise
except BotoServerError:
raise
# Set the updated units to the current write unit value
updated_write_units = current_write_units
# Reset consecutive write count if num_write_checks_reset_percent
# is reached
if num_write_checks_reset_percent:
if consumed_write_units_percent >= num_write_checks_reset_percent:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'write checks. Reason: Consumed percent {2} is '
'greater than reset percent: {3}'.format(
table_name,
gsi_name,
consumed_write_units_percent,
num_write_checks_reset_percent))
num_consec_write_checks = 0
# Exit if up scaling has been disabled
if not get_gsi_option(table_key, gsi_key, 'enable_writes_up_scaling'):
logger.debug(
'{0} - GSI: {1} - Up scaling event detected. No action taken as '
'scaling up writes has been disabled in the configuration'.format(
table_name, gsi_name))
else:
# If local/granular values not specified use global values
increase_consumed_writes_unit = \
increase_consumed_writes_unit or increase_writes_unit
increase_throttled_by_provisioned_writes_unit = (
increase_throttled_by_provisioned_writes_unit
or increase_writes_unit)
increase_throttled_by_consumed_writes_unit = \
increase_throttled_by_consumed_writes_unit or increase_writes_unit
increase_consumed_writes_with = \
increase_consumed_writes_with or increase_writes_with
# Initialise variables to store calculated provisioning
throttled_by_provisioned_calculated_provisioning = scale_reader(
increase_throttled_by_provisioned_writes_scale,
throttled_by_provisioned_write_percent)
throttled_by_consumed_calculated_provisioning = scale_reader(
increase_throttled_by_consumed_writes_scale,
throttled_by_consumed_write_percent)
consumed_calculated_provisioning = scale_reader(
increase_consumed_writes_scale,
consumed_write_units_percent)
throttled_count_calculated_provisioning = 0
calculated_provisioning = 0
# Increase needed due to high throttled to provisioned ratio
if throttled_by_provisioned_calculated_provisioning:
if increase_throttled_by_provisioned_writes_unit == 'percent':
throttled_by_provisioned_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
throttled_by_provisioned_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
throttled_by_provisioned_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
throttled_by_provisioned_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Increase needed due to high throttled to consumed ratio
if throttled_by_consumed_calculated_provisioning:
if increase_throttled_by_consumed_writes_unit == 'percent':
throttled_by_consumed_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
throttled_by_consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
throttled_by_consumed_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
throttled_by_consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Increase needed due to high CU consumption
if consumed_calculated_provisioning:
if increase_consumed_writes_unit == 'percent':
consumed_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
consumed_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
elif (writes_upper_threshold
and consumed_write_units_percent > writes_upper_threshold
and not increase_consumed_writes_scale):
if increase_consumed_writes_unit == 'percent':
consumed_calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
increase_consumed_writes_with,
get_gsi_option(
table_key,
gsi_key,
'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
consumed_calculated_provisioning = \
calculators.increase_writes_in_units(
current_write_units,
increase_consumed_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Increase needed due to high throttling
if (throttled_writes_upper_threshold
and throttled_write_count > throttled_writes_upper_threshold):
if increase_writes_unit == 'percent':
throttled_count_calculated_provisioning = \
calculators.increase_writes_in_percent(
updated_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
throttled_count_calculated_provisioning = \
calculators.increase_writes_in_units(
updated_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
consumed_write_units_percent,
'{0} - GSI: {1}'.format(table_name, gsi_name))
# Determine which metric requires the most scaling
if (throttled_by_provisioned_calculated_provisioning
> calculated_provisioning):
calculated_provisioning = \
throttled_by_provisioned_calculated_provisioning
scale_reason = (
"due to throttled events by provisioned "
"units threshold being exceeded")
if (throttled_by_consumed_calculated_provisioning
> calculated_provisioning):
calculated_provisioning = \
throttled_by_consumed_calculated_provisioning
scale_reason = (
"due to throttled events by consumed "
"units threshold being exceeded")
if consumed_calculated_provisioning > calculated_provisioning:
calculated_provisioning = consumed_calculated_provisioning
scale_reason = "due to consumed threshold being exceeded"
if throttled_count_calculated_provisioning > calculated_provisioning:
calculated_provisioning = throttled_count_calculated_provisioning
scale_reason = "due to throttled events threshold being exceeded"
if calculated_provisioning > current_write_units:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'write checks. Reason: scale up {2}'.format(
table_name, gsi_name, scale_reason))
num_consec_write_checks = 0
update_needed = True
updated_write_units = calculated_provisioning
# Decrease needed due to low CU consumption
if not update_needed:
# If local/granular values not specified use global values
decrease_consumed_writes_unit = \
decrease_consumed_writes_unit or decrease_writes_unit
decrease_consumed_writes_with = \
decrease_consumed_writes_with or decrease_writes_with
# Initialise variables to store calculated provisioning
consumed_calculated_provisioning = scale_reader_decrease(
decrease_consumed_writes_scale,
consumed_write_units_percent)
calculated_provisioning = None
# Exit if down scaling has been disabled
if not get_gsi_option(
table_key, gsi_key, 'enable_writes_down_scaling'):
logger.debug(
'{0} - GSI: {1} - Down scaling event detected. '
'No action taken as scaling '
'down writes has been disabled in the configuration'.format(
table_name, gsi_name))
# Exit if writes == 0% and downscaling has been disabled at 0%
elif (consumed_write_units_percent == 0 and not get_gsi_option(
table_key, gsi_key, 'allow_scaling_down_writes_on_0_percent')):
logger.info(
'{0} - GSI: {1} - Down scaling event detected. '
'No action taken as scaling down writes is not done when'
' usage is at 0%'.format(table_name, gsi_name))
else:
if consumed_calculated_provisioning:
if decrease_consumed_writes_unit == 'percent':
calculated_provisioning = \
calculators.decrease_writes_in_percent(
updated_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = \
calculators.decrease_writes_in_units(
updated_write_units,
consumed_calculated_provisioning,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
elif (writes_lower_threshold
and consumed_write_units_percent < writes_lower_threshold
and not decrease_consumed_writes_scale):
if decrease_consumed_writes_unit == 'percent':
calculated_provisioning = \
calculators.decrease_writes_in_percent(
updated_write_units,
decrease_consumed_writes_with,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = \
calculators.decrease_writes_in_units(
updated_write_units,
decrease_consumed_writes_with,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if (calculated_provisioning
and current_write_units != calculated_provisioning):
num_consec_write_checks += 1
if num_consec_write_checks >= \
num_write_checks_before_scale_down:
update_needed = True
updated_write_units = calculated_provisioning
# Never go over the configured max provisioning
if max_provisioned_writes:
if int(updated_write_units) > int(max_provisioned_writes):
update_needed = True
updated_write_units = int(max_provisioned_writes)
logger.info(
'{0} - GSI: {1} - '
'Will not increase writes over gsi-max-provisioned-writes '
'limit ({2} writes)'.format(
table_name,
gsi_name,
updated_write_units))
# Ensure that we have met the min-provisioning
if min_provisioned_writes:
if int(min_provisioned_writes) > int(updated_write_units):
update_needed = True
updated_write_units = int(min_provisioned_writes)
logger.info(
'{0} - GSI: {1} - Increasing writes to '
'meet gsi-min-provisioned-writes '
'limit ({2} writes)'.format(
table_name,
gsi_name,
updated_write_units))
if calculators.is_consumed_over_proposed(
current_write_units,
updated_write_units,
consumed_write_units_percent):
update_needed = False
updated_write_units = current_write_units
logger.info(
'{0} - GSI: {1} - Consumed is over proposed write units. Will leave '
'table at current setting.'.format(table_name, gsi_name))
logger.info('{0} - GSI: {1} - Consecutive write checks {2}/{3}'.format(
table_name,
gsi_name,
num_consec_write_checks,
num_write_checks_before_scale_down))
return update_needed, updated_write_units, num_consec_write_checks | [
"def",
"__ensure_provisioning_writes",
"(",
"table_name",
",",
"table_key",
",",
"gsi_name",
",",
"gsi_key",
",",
"num_consec_write_checks",
")",
":",
"if",
"not",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'enable_writes_autoscaling'",
")",
":",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - '",
"'Autoscaling of writes has been disabled'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"return",
"False",
",",
"dynamodb",
".",
"get_provisioned_gsi_write_units",
"(",
"table_name",
",",
"gsi_name",
")",
",",
"0",
"update_needed",
"=",
"False",
"try",
":",
"lookback_window_start",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'lookback_window_start'",
")",
"lookback_period",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'lookback_period'",
")",
"current_write_units",
"=",
"dynamodb",
".",
"get_provisioned_gsi_write_units",
"(",
"table_name",
",",
"gsi_name",
")",
"consumed_write_units_percent",
"=",
"gsi_stats",
".",
"get_consumed_write_units_percent",
"(",
"table_name",
",",
"gsi_name",
",",
"lookback_window_start",
",",
"lookback_period",
")",
"throttled_write_count",
"=",
"gsi_stats",
".",
"get_throttled_write_event_count",
"(",
"table_name",
",",
"gsi_name",
",",
"lookback_window_start",
",",
"lookback_period",
")",
"throttled_by_provisioned_write_percent",
"=",
"gsi_stats",
".",
"get_throttled_by_provisioned_write_event_percent",
"(",
"table_name",
",",
"gsi_name",
",",
"lookback_window_start",
",",
"lookback_period",
")",
"throttled_by_consumed_write_percent",
"=",
"gsi_stats",
".",
"get_throttled_by_consumed_write_percent",
"(",
"table_name",
",",
"gsi_name",
",",
"lookback_window_start",
",",
"lookback_period",
")",
"writes_upper_threshold",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'writes_upper_threshold'",
")",
"writes_lower_threshold",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'writes_lower_threshold'",
")",
"throttled_writes_upper_threshold",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'throttled_writes_upper_threshold'",
")",
"increase_writes_unit",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_writes_unit'",
")",
"increase_writes_with",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_writes_with'",
")",
"decrease_writes_unit",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'decrease_writes_unit'",
")",
"decrease_writes_with",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'decrease_writes_with'",
")",
"min_provisioned_writes",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'min_provisioned_writes'",
")",
"max_provisioned_writes",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
"num_write_checks_before_scale_down",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'num_write_checks_before_scale_down'",
")",
"num_write_checks_reset_percent",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'num_write_checks_reset_percent'",
")",
"increase_throttled_by_provisioned_writes_unit",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_throttled_by_provisioned_writes_unit'",
")",
"increase_throttled_by_provisioned_writes_scale",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_throttled_by_provisioned_writes_scale'",
")",
"increase_throttled_by_consumed_writes_unit",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_throttled_by_consumed_writes_unit'",
")",
"increase_throttled_by_consumed_writes_scale",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_throttled_by_consumed_writes_scale'",
")",
"increase_consumed_writes_unit",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_consumed_writes_unit'",
")",
"increase_consumed_writes_with",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_consumed_writes_with'",
")",
"increase_consumed_writes_scale",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'increase_consumed_writes_scale'",
")",
"decrease_consumed_writes_unit",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'decrease_consumed_writes_unit'",
")",
"decrease_consumed_writes_with",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'decrease_consumed_writes_with'",
")",
"decrease_consumed_writes_scale",
"=",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'decrease_consumed_writes_scale'",
")",
"except",
"JSONResponseError",
":",
"raise",
"except",
"BotoServerError",
":",
"raise",
"# Set the updated units to the current write unit value",
"updated_write_units",
"=",
"current_write_units",
"# Reset consecutive write count if num_write_checks_reset_percent",
"# is reached",
"if",
"num_write_checks_reset_percent",
":",
"if",
"consumed_write_units_percent",
">=",
"num_write_checks_reset_percent",
":",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - Resetting the number of consecutive '",
"'write checks. Reason: Consumed percent {2} is '",
"'greater than reset percent: {3}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
",",
"consumed_write_units_percent",
",",
"num_write_checks_reset_percent",
")",
")",
"num_consec_write_checks",
"=",
"0",
"# Exit if up scaling has been disabled",
"if",
"not",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'enable_writes_up_scaling'",
")",
":",
"logger",
".",
"debug",
"(",
"'{0} - GSI: {1} - Up scaling event detected. No action taken as '",
"'scaling up writes has been disabled in the configuration'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"# If local/granular values not specified use global values",
"increase_consumed_writes_unit",
"=",
"increase_consumed_writes_unit",
"or",
"increase_writes_unit",
"increase_throttled_by_provisioned_writes_unit",
"=",
"(",
"increase_throttled_by_provisioned_writes_unit",
"or",
"increase_writes_unit",
")",
"increase_throttled_by_consumed_writes_unit",
"=",
"increase_throttled_by_consumed_writes_unit",
"or",
"increase_writes_unit",
"increase_consumed_writes_with",
"=",
"increase_consumed_writes_with",
"or",
"increase_writes_with",
"# Initialise variables to store calculated provisioning",
"throttled_by_provisioned_calculated_provisioning",
"=",
"scale_reader",
"(",
"increase_throttled_by_provisioned_writes_scale",
",",
"throttled_by_provisioned_write_percent",
")",
"throttled_by_consumed_calculated_provisioning",
"=",
"scale_reader",
"(",
"increase_throttled_by_consumed_writes_scale",
",",
"throttled_by_consumed_write_percent",
")",
"consumed_calculated_provisioning",
"=",
"scale_reader",
"(",
"increase_consumed_writes_scale",
",",
"consumed_write_units_percent",
")",
"throttled_count_calculated_provisioning",
"=",
"0",
"calculated_provisioning",
"=",
"0",
"# Increase needed due to high throttled to provisioned ratio",
"if",
"throttled_by_provisioned_calculated_provisioning",
":",
"if",
"increase_throttled_by_provisioned_writes_unit",
"==",
"'percent'",
":",
"throttled_by_provisioned_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_percent",
"(",
"current_write_units",
",",
"throttled_by_provisioned_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"throttled_by_provisioned_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_units",
"(",
"current_write_units",
",",
"throttled_by_provisioned_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"# Increase needed due to high throttled to consumed ratio",
"if",
"throttled_by_consumed_calculated_provisioning",
":",
"if",
"increase_throttled_by_consumed_writes_unit",
"==",
"'percent'",
":",
"throttled_by_consumed_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_percent",
"(",
"current_write_units",
",",
"throttled_by_consumed_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"throttled_by_consumed_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_units",
"(",
"current_write_units",
",",
"throttled_by_consumed_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"# Increase needed due to high CU consumption",
"if",
"consumed_calculated_provisioning",
":",
"if",
"increase_consumed_writes_unit",
"==",
"'percent'",
":",
"consumed_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_percent",
"(",
"current_write_units",
",",
"consumed_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"consumed_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_units",
"(",
"current_write_units",
",",
"consumed_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"elif",
"(",
"writes_upper_threshold",
"and",
"consumed_write_units_percent",
">",
"writes_upper_threshold",
"and",
"not",
"increase_consumed_writes_scale",
")",
":",
"if",
"increase_consumed_writes_unit",
"==",
"'percent'",
":",
"consumed_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_percent",
"(",
"current_write_units",
",",
"increase_consumed_writes_with",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"consumed_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_units",
"(",
"current_write_units",
",",
"increase_consumed_writes_with",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"# Increase needed due to high throttling",
"if",
"(",
"throttled_writes_upper_threshold",
"and",
"throttled_write_count",
">",
"throttled_writes_upper_threshold",
")",
":",
"if",
"increase_writes_unit",
"==",
"'percent'",
":",
"throttled_count_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_percent",
"(",
"updated_write_units",
",",
"increase_writes_with",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"throttled_count_calculated_provisioning",
"=",
"calculators",
".",
"increase_writes_in_units",
"(",
"updated_write_units",
",",
"increase_writes_with",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'max_provisioned_writes'",
")",
",",
"consumed_write_units_percent",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"# Determine which metric requires the most scaling",
"if",
"(",
"throttled_by_provisioned_calculated_provisioning",
">",
"calculated_provisioning",
")",
":",
"calculated_provisioning",
"=",
"throttled_by_provisioned_calculated_provisioning",
"scale_reason",
"=",
"(",
"\"due to throttled events by provisioned \"",
"\"units threshold being exceeded\"",
")",
"if",
"(",
"throttled_by_consumed_calculated_provisioning",
">",
"calculated_provisioning",
")",
":",
"calculated_provisioning",
"=",
"throttled_by_consumed_calculated_provisioning",
"scale_reason",
"=",
"(",
"\"due to throttled events by consumed \"",
"\"units threshold being exceeded\"",
")",
"if",
"consumed_calculated_provisioning",
">",
"calculated_provisioning",
":",
"calculated_provisioning",
"=",
"consumed_calculated_provisioning",
"scale_reason",
"=",
"\"due to consumed threshold being exceeded\"",
"if",
"throttled_count_calculated_provisioning",
">",
"calculated_provisioning",
":",
"calculated_provisioning",
"=",
"throttled_count_calculated_provisioning",
"scale_reason",
"=",
"\"due to throttled events threshold being exceeded\"",
"if",
"calculated_provisioning",
">",
"current_write_units",
":",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - Resetting the number of consecutive '",
"'write checks. Reason: scale up {2}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
",",
"scale_reason",
")",
")",
"num_consec_write_checks",
"=",
"0",
"update_needed",
"=",
"True",
"updated_write_units",
"=",
"calculated_provisioning",
"# Decrease needed due to low CU consumption",
"if",
"not",
"update_needed",
":",
"# If local/granular values not specified use global values",
"decrease_consumed_writes_unit",
"=",
"decrease_consumed_writes_unit",
"or",
"decrease_writes_unit",
"decrease_consumed_writes_with",
"=",
"decrease_consumed_writes_with",
"or",
"decrease_writes_with",
"# Initialise variables to store calculated provisioning",
"consumed_calculated_provisioning",
"=",
"scale_reader_decrease",
"(",
"decrease_consumed_writes_scale",
",",
"consumed_write_units_percent",
")",
"calculated_provisioning",
"=",
"None",
"# Exit if down scaling has been disabled",
"if",
"not",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'enable_writes_down_scaling'",
")",
":",
"logger",
".",
"debug",
"(",
"'{0} - GSI: {1} - Down scaling event detected. '",
"'No action taken as scaling '",
"'down writes has been disabled in the configuration'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"# Exit if writes == 0% and downscaling has been disabled at 0%",
"elif",
"(",
"consumed_write_units_percent",
"==",
"0",
"and",
"not",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'allow_scaling_down_writes_on_0_percent'",
")",
")",
":",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - Down scaling event detected. '",
"'No action taken as scaling down writes is not done when'",
"' usage is at 0%'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"if",
"consumed_calculated_provisioning",
":",
"if",
"decrease_consumed_writes_unit",
"==",
"'percent'",
":",
"calculated_provisioning",
"=",
"calculators",
".",
"decrease_writes_in_percent",
"(",
"updated_write_units",
",",
"consumed_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'min_provisioned_writes'",
")",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"calculated_provisioning",
"=",
"calculators",
".",
"decrease_writes_in_units",
"(",
"updated_write_units",
",",
"consumed_calculated_provisioning",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'min_provisioned_writes'",
")",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"elif",
"(",
"writes_lower_threshold",
"and",
"consumed_write_units_percent",
"<",
"writes_lower_threshold",
"and",
"not",
"decrease_consumed_writes_scale",
")",
":",
"if",
"decrease_consumed_writes_unit",
"==",
"'percent'",
":",
"calculated_provisioning",
"=",
"calculators",
".",
"decrease_writes_in_percent",
"(",
"updated_write_units",
",",
"decrease_consumed_writes_with",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'min_provisioned_writes'",
")",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"else",
":",
"calculated_provisioning",
"=",
"calculators",
".",
"decrease_writes_in_units",
"(",
"updated_write_units",
",",
"decrease_consumed_writes_with",
",",
"get_gsi_option",
"(",
"table_key",
",",
"gsi_key",
",",
"'min_provisioned_writes'",
")",
",",
"'{0} - GSI: {1}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"if",
"(",
"calculated_provisioning",
"and",
"current_write_units",
"!=",
"calculated_provisioning",
")",
":",
"num_consec_write_checks",
"+=",
"1",
"if",
"num_consec_write_checks",
">=",
"num_write_checks_before_scale_down",
":",
"update_needed",
"=",
"True",
"updated_write_units",
"=",
"calculated_provisioning",
"# Never go over the configured max provisioning",
"if",
"max_provisioned_writes",
":",
"if",
"int",
"(",
"updated_write_units",
")",
">",
"int",
"(",
"max_provisioned_writes",
")",
":",
"update_needed",
"=",
"True",
"updated_write_units",
"=",
"int",
"(",
"max_provisioned_writes",
")",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - '",
"'Will not increase writes over gsi-max-provisioned-writes '",
"'limit ({2} writes)'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
",",
"updated_write_units",
")",
")",
"# Ensure that we have met the min-provisioning",
"if",
"min_provisioned_writes",
":",
"if",
"int",
"(",
"min_provisioned_writes",
")",
">",
"int",
"(",
"updated_write_units",
")",
":",
"update_needed",
"=",
"True",
"updated_write_units",
"=",
"int",
"(",
"min_provisioned_writes",
")",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - Increasing writes to '",
"'meet gsi-min-provisioned-writes '",
"'limit ({2} writes)'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
",",
"updated_write_units",
")",
")",
"if",
"calculators",
".",
"is_consumed_over_proposed",
"(",
"current_write_units",
",",
"updated_write_units",
",",
"consumed_write_units_percent",
")",
":",
"update_needed",
"=",
"False",
"updated_write_units",
"=",
"current_write_units",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - Consumed is over proposed write units. Will leave '",
"'table at current setting.'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
")",
")",
"logger",
".",
"info",
"(",
"'{0} - GSI: {1} - Consecutive write checks {2}/{3}'",
".",
"format",
"(",
"table_name",
",",
"gsi_name",
",",
"num_consec_write_checks",
",",
"num_write_checks_before_scale_down",
")",
")",
"return",
"update_needed",
",",
"updated_write_units",
",",
"num_consec_write_checks"
] | Ensure that provisioning of writes is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (bool, int, int)
update_needed, updated_write_units, num_consec_write_checks | [
"Ensure",
"that",
"provisioning",
"of",
"writes",
"is",
"correct"
] | python | train |
jut-io/jut-python-tools | jut/commands/programs.py | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/commands/programs.py#L89-L199 | def pull(options):
"""
pull all remote programs to a local directory
"""
configuration = config.get_default()
app_url = configuration['app_url']
if options.deployment != None:
deployment_name = options.deployment
else:
deployment_name = configuration['deployment_name']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
if options.all == True:
account_id = None
else:
account_id = accounts.get_logged_in_account_id(token_manager=token_manager,
app_url=app_url)
programs_details = programs.get_programs(deployment_name,
token_manager=token_manager,
created_by=account_id,
app_url=app_url)
if not os.path.exists(options.directory):
os.mkdir(options.directory)
account_ids = set()
for program in programs_details:
account_ids.add(program['createdBy'])
accounts_details = accounts.get_accounts(account_ids,
token_manager=token_manager,
app_url=app_url)
account_lookup = {}
for account in accounts_details['accounts']:
account_lookup[account['id']] = account
decision = None
for program in programs_details:
program_name = program['name']
juttle_filename = '%s.juttle' % escape_filename(program_name)
if options.per_user_directory:
username = account_lookup[program['createdBy']]['username']
userdir = os.path.join(options.directory, username)
if not os.path.exists(userdir):
os.mkdir(userdir)
juttle_filepath = os.path.join(userdir, juttle_filename)
else:
juttle_filepath = os.path.join(options.directory, juttle_filename)
if os.path.exists(juttle_filepath) and decision != 'A':
program_code = None
with codecs.open(juttle_filepath, 'r', encoding='UTF-8') as program_file:
program_code = program_file.read()
local_last_edited = int(os.stat(juttle_filepath).st_mtime)
remote_last_edited = dates.iso8601_to_epoch(program['lastEdited'])
if local_last_edited != remote_last_edited:
info('Juttle changed since last pull for "%s"' % program_name)
decision = console.prompt('Would you like to '
'(O - Override,'
' S - Skip,'
' R - Review Changes,'
' A - override All)?')
if decision == 'R':
info('Following is what would change if we overrode using your copy:')
info('*'*80)
for line in difflib.ndiff(program['code'].split('\n'),
program_code.split('\n')):
info(line)
info('*'*80)
decision = console.prompt('Would you like to '
'(O - Override,'
' S - Skip)?')
if decision == 'S':
# jump to the next file
continue
elif decision == 'O':
pass
elif decision == 'A':
pass
else:
raise JutException('Unexpected option "%s"' % decision)
info('importing program "%s" to %s' % (program['name'], juttle_filepath))
with codecs.open(juttle_filepath, 'w', encoding='UTF-8') as program_file:
program_file.write(program['code'])
# update creation time to match the lastEdited field
epoch = dates.iso8601_to_epoch(program['lastEdited'])
os.utime(juttle_filepath, (epoch, epoch)) | [
"def",
"pull",
"(",
"options",
")",
":",
"configuration",
"=",
"config",
".",
"get_default",
"(",
")",
"app_url",
"=",
"configuration",
"[",
"'app_url'",
"]",
"if",
"options",
".",
"deployment",
"!=",
"None",
":",
"deployment_name",
"=",
"options",
".",
"deployment",
"else",
":",
"deployment_name",
"=",
"configuration",
"[",
"'deployment_name'",
"]",
"client_id",
"=",
"configuration",
"[",
"'client_id'",
"]",
"client_secret",
"=",
"configuration",
"[",
"'client_secret'",
"]",
"token_manager",
"=",
"auth",
".",
"TokenManager",
"(",
"client_id",
"=",
"client_id",
",",
"client_secret",
"=",
"client_secret",
",",
"app_url",
"=",
"app_url",
")",
"if",
"options",
".",
"all",
"==",
"True",
":",
"account_id",
"=",
"None",
"else",
":",
"account_id",
"=",
"accounts",
".",
"get_logged_in_account_id",
"(",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"programs_details",
"=",
"programs",
".",
"get_programs",
"(",
"deployment_name",
",",
"token_manager",
"=",
"token_manager",
",",
"created_by",
"=",
"account_id",
",",
"app_url",
"=",
"app_url",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"options",
".",
"directory",
")",
":",
"os",
".",
"mkdir",
"(",
"options",
".",
"directory",
")",
"account_ids",
"=",
"set",
"(",
")",
"for",
"program",
"in",
"programs_details",
":",
"account_ids",
".",
"add",
"(",
"program",
"[",
"'createdBy'",
"]",
")",
"accounts_details",
"=",
"accounts",
".",
"get_accounts",
"(",
"account_ids",
",",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"account_lookup",
"=",
"{",
"}",
"for",
"account",
"in",
"accounts_details",
"[",
"'accounts'",
"]",
":",
"account_lookup",
"[",
"account",
"[",
"'id'",
"]",
"]",
"=",
"account",
"decision",
"=",
"None",
"for",
"program",
"in",
"programs_details",
":",
"program_name",
"=",
"program",
"[",
"'name'",
"]",
"juttle_filename",
"=",
"'%s.juttle'",
"%",
"escape_filename",
"(",
"program_name",
")",
"if",
"options",
".",
"per_user_directory",
":",
"username",
"=",
"account_lookup",
"[",
"program",
"[",
"'createdBy'",
"]",
"]",
"[",
"'username'",
"]",
"userdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"options",
".",
"directory",
",",
"username",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"userdir",
")",
":",
"os",
".",
"mkdir",
"(",
"userdir",
")",
"juttle_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"userdir",
",",
"juttle_filename",
")",
"else",
":",
"juttle_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"options",
".",
"directory",
",",
"juttle_filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"juttle_filepath",
")",
"and",
"decision",
"!=",
"'A'",
":",
"program_code",
"=",
"None",
"with",
"codecs",
".",
"open",
"(",
"juttle_filepath",
",",
"'r'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"as",
"program_file",
":",
"program_code",
"=",
"program_file",
".",
"read",
"(",
")",
"local_last_edited",
"=",
"int",
"(",
"os",
".",
"stat",
"(",
"juttle_filepath",
")",
".",
"st_mtime",
")",
"remote_last_edited",
"=",
"dates",
".",
"iso8601_to_epoch",
"(",
"program",
"[",
"'lastEdited'",
"]",
")",
"if",
"local_last_edited",
"!=",
"remote_last_edited",
":",
"info",
"(",
"'Juttle changed since last pull for \"%s\"'",
"%",
"program_name",
")",
"decision",
"=",
"console",
".",
"prompt",
"(",
"'Would you like to '",
"'(O - Override,'",
"' S - Skip,'",
"' R - Review Changes,'",
"' A - override All)?'",
")",
"if",
"decision",
"==",
"'R'",
":",
"info",
"(",
"'Following is what would change if we overrode using your copy:'",
")",
"info",
"(",
"'*'",
"*",
"80",
")",
"for",
"line",
"in",
"difflib",
".",
"ndiff",
"(",
"program",
"[",
"'code'",
"]",
".",
"split",
"(",
"'\\n'",
")",
",",
"program_code",
".",
"split",
"(",
"'\\n'",
")",
")",
":",
"info",
"(",
"line",
")",
"info",
"(",
"'*'",
"*",
"80",
")",
"decision",
"=",
"console",
".",
"prompt",
"(",
"'Would you like to '",
"'(O - Override,'",
"' S - Skip)?'",
")",
"if",
"decision",
"==",
"'S'",
":",
"# jump to the next file",
"continue",
"elif",
"decision",
"==",
"'O'",
":",
"pass",
"elif",
"decision",
"==",
"'A'",
":",
"pass",
"else",
":",
"raise",
"JutException",
"(",
"'Unexpected option \"%s\"'",
"%",
"decision",
")",
"info",
"(",
"'importing program \"%s\" to %s'",
"%",
"(",
"program",
"[",
"'name'",
"]",
",",
"juttle_filepath",
")",
")",
"with",
"codecs",
".",
"open",
"(",
"juttle_filepath",
",",
"'w'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"as",
"program_file",
":",
"program_file",
".",
"write",
"(",
"program",
"[",
"'code'",
"]",
")",
"# update creation time to match the lastEdited field",
"epoch",
"=",
"dates",
".",
"iso8601_to_epoch",
"(",
"program",
"[",
"'lastEdited'",
"]",
")",
"os",
".",
"utime",
"(",
"juttle_filepath",
",",
"(",
"epoch",
",",
"epoch",
")",
")"
] | pull all remote programs to a local directory | [
"pull",
"all",
"remote",
"programs",
"to",
"a",
"local",
"directory"
] | python | train |
wummel/dosage | dosagelib/events.py | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/events.py#L220-L227 | def newComic(self, comic):
"""Start new comic list in HTML."""
if self.lastUrl is not None:
self.html.write(u'</li>\n')
if self.lastComic is not None:
self.html.write(u'</ul>\n')
self.html.write(u'<li>%s</li>\n' % comic.name)
self.html.write(u'<ul>\n') | [
"def",
"newComic",
"(",
"self",
",",
"comic",
")",
":",
"if",
"self",
".",
"lastUrl",
"is",
"not",
"None",
":",
"self",
".",
"html",
".",
"write",
"(",
"u'</li>\\n'",
")",
"if",
"self",
".",
"lastComic",
"is",
"not",
"None",
":",
"self",
".",
"html",
".",
"write",
"(",
"u'</ul>\\n'",
")",
"self",
".",
"html",
".",
"write",
"(",
"u'<li>%s</li>\\n'",
"%",
"comic",
".",
"name",
")",
"self",
".",
"html",
".",
"write",
"(",
"u'<ul>\\n'",
")"
] | Start new comic list in HTML. | [
"Start",
"new",
"comic",
"list",
"in",
"HTML",
"."
] | python | train |
SuperCowPowers/workbench | workbench/workers/mem_procdump.py | https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/mem_procdump.py#L32-L78 | def execute(self, input_data):
''' Execute method '''
# Spin up the rekall adapter
adapter = RekallAdapter()
adapter.set_plugin_name(self.plugin_name)
# Create a temporary directory and run this plugin from there
with self.goto_temp_directory():
# Run the procdump plugin
rekall_output = adapter.execute(input_data)
# Process the output data
for line in rekall_output:
if line['type'] == 'm': # Meta
self.output['meta'] = line['data']
elif line['type'] == 't': # New Table Headers (column names)
self.column_map = {item['cname']: item['name'] if 'name' in item else item['cname'] for item in line['data']}
elif line['type'] == 'r': # Row
# Add the row to our current table
row = RekallAdapter.process_row(line['data'], self.column_map)
self.output['tables'][self.current_table_name].append(row)
# Scrape any extracted files
print 'mem_procdump: Scraping dumped files...'
for output_file in glob.glob('*'):
# Store the output into workbench, put md5s in the 'dumped_files' field
output_name = os.path.basename(output_file)
output_name = output_name.replace('executable.', '')
with open(output_file, 'rb') as dumped_file:
raw_bytes = dumped_file.read()
md5 = self.c.store_sample(raw_bytes, output_name, 'exe')
# Remove some columns from meta data
meta = self.c.work_request('meta', md5)['meta']
del meta['customer']
del meta['encoding']
del meta['import_time']
del meta['mime_type']
self.output['tables'][self.current_table_name].append(meta)
# All done
return self.output | [
"def",
"execute",
"(",
"self",
",",
"input_data",
")",
":",
"# Spin up the rekall adapter",
"adapter",
"=",
"RekallAdapter",
"(",
")",
"adapter",
".",
"set_plugin_name",
"(",
"self",
".",
"plugin_name",
")",
"# Create a temporary directory and run this plugin from there",
"with",
"self",
".",
"goto_temp_directory",
"(",
")",
":",
"# Run the procdump plugin",
"rekall_output",
"=",
"adapter",
".",
"execute",
"(",
"input_data",
")",
"# Process the output data",
"for",
"line",
"in",
"rekall_output",
":",
"if",
"line",
"[",
"'type'",
"]",
"==",
"'m'",
":",
"# Meta",
"self",
".",
"output",
"[",
"'meta'",
"]",
"=",
"line",
"[",
"'data'",
"]",
"elif",
"line",
"[",
"'type'",
"]",
"==",
"'t'",
":",
"# New Table Headers (column names)",
"self",
".",
"column_map",
"=",
"{",
"item",
"[",
"'cname'",
"]",
":",
"item",
"[",
"'name'",
"]",
"if",
"'name'",
"in",
"item",
"else",
"item",
"[",
"'cname'",
"]",
"for",
"item",
"in",
"line",
"[",
"'data'",
"]",
"}",
"elif",
"line",
"[",
"'type'",
"]",
"==",
"'r'",
":",
"# Row",
"# Add the row to our current table",
"row",
"=",
"RekallAdapter",
".",
"process_row",
"(",
"line",
"[",
"'data'",
"]",
",",
"self",
".",
"column_map",
")",
"self",
".",
"output",
"[",
"'tables'",
"]",
"[",
"self",
".",
"current_table_name",
"]",
".",
"append",
"(",
"row",
")",
"# Scrape any extracted files",
"print",
"'mem_procdump: Scraping dumped files...'",
"for",
"output_file",
"in",
"glob",
".",
"glob",
"(",
"'*'",
")",
":",
"# Store the output into workbench, put md5s in the 'dumped_files' field",
"output_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"output_file",
")",
"output_name",
"=",
"output_name",
".",
"replace",
"(",
"'executable.'",
",",
"''",
")",
"with",
"open",
"(",
"output_file",
",",
"'rb'",
")",
"as",
"dumped_file",
":",
"raw_bytes",
"=",
"dumped_file",
".",
"read",
"(",
")",
"md5",
"=",
"self",
".",
"c",
".",
"store_sample",
"(",
"raw_bytes",
",",
"output_name",
",",
"'exe'",
")",
"# Remove some columns from meta data",
"meta",
"=",
"self",
".",
"c",
".",
"work_request",
"(",
"'meta'",
",",
"md5",
")",
"[",
"'meta'",
"]",
"del",
"meta",
"[",
"'customer'",
"]",
"del",
"meta",
"[",
"'encoding'",
"]",
"del",
"meta",
"[",
"'import_time'",
"]",
"del",
"meta",
"[",
"'mime_type'",
"]",
"self",
".",
"output",
"[",
"'tables'",
"]",
"[",
"self",
".",
"current_table_name",
"]",
".",
"append",
"(",
"meta",
")",
"# All done",
"return",
"self",
".",
"output"
] | Execute method | [
"Execute",
"method"
] | python | train |
tmr232/Sark | sark/code/instruction.py | https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/instruction.py#L270-L280 | def reg(self):
"""Name of the register used in the operand."""
if self.type.is_displ or self.type.is_phrase:
size = core.get_native_size()
return base.get_register_name(self.reg_id, size)
if self.type.is_reg:
return base.get_register_name(self.reg_id, self.size)
else:
raise exceptions.SarkOperandWithoutReg("Operand does not have a register.") | [
"def",
"reg",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
".",
"is_displ",
"or",
"self",
".",
"type",
".",
"is_phrase",
":",
"size",
"=",
"core",
".",
"get_native_size",
"(",
")",
"return",
"base",
".",
"get_register_name",
"(",
"self",
".",
"reg_id",
",",
"size",
")",
"if",
"self",
".",
"type",
".",
"is_reg",
":",
"return",
"base",
".",
"get_register_name",
"(",
"self",
".",
"reg_id",
",",
"self",
".",
"size",
")",
"else",
":",
"raise",
"exceptions",
".",
"SarkOperandWithoutReg",
"(",
"\"Operand does not have a register.\"",
")"
] | Name of the register used in the operand. | [
"Name",
"of",
"the",
"register",
"used",
"in",
"the",
"operand",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/parsers/wmi_parser.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/wmi_parser.py#L30-L91 | def BinarySIDtoStringSID(sid):
"""Converts a binary SID to its string representation.
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379597.aspx
The byte representation of an SID is as follows:
Offset Length Description
00 01 revision
01 01 sub-authority count
02 06 authority (big endian)
08 04 subauthority #1 (little endian)
0b 04 subauthority #2 (little endian)
...
Args:
sid: A `bytes` instance for a SID to convert.
Returns:
A `unicode` representation of given SID.
Raises:
ValueError: If the binary SID is malformed.
"""
precondition.AssertType(sid, bytes)
# TODO: This seemingly no-op is actually not a no-op. The reason
# is that `sid` might be either `bytes` from the future package or `str` (e.g.
# a bytes literal on Python 2). This call ensures that we get a `bytes` object
# with Python 3 semantics. Once GRR's codebase is ready to drop support for
# Python 2 this line can be removed as indeed then it would be a no-op.
sid = bytes(sid)
if not sid:
return u""
str_sid_components = [sid[0]]
# Now decode the 48-byte portion
if len(sid) >= 8:
subauthority_count = sid[1]
identifier_authority = struct.unpack(">H", sid[2:4])[0]
identifier_authority <<= 32
identifier_authority |= struct.unpack(">L", sid[4:8])[0]
str_sid_components.append(identifier_authority)
start = 8
for i in range(subauthority_count):
authority = sid[start:start + 4]
if not authority:
break
if len(authority) < 4:
message = ("In binary SID '%s', component %d has been truncated. "
"Expected 4 bytes, found %d: (%s)")
message %= (sid, i, len(authority), authority)
raise ValueError(message)
str_sid_components.append(struct.unpack("<L", authority)[0])
start += 4
return u"S-%s" % (u"-".join(map(str, str_sid_components))) | [
"def",
"BinarySIDtoStringSID",
"(",
"sid",
")",
":",
"precondition",
".",
"AssertType",
"(",
"sid",
",",
"bytes",
")",
"# TODO: This seemingly no-op is actually not a no-op. The reason",
"# is that `sid` might be either `bytes` from the future package or `str` (e.g.",
"# a bytes literal on Python 2). This call ensures that we get a `bytes` object",
"# with Python 3 semantics. Once GRR's codebase is ready to drop support for",
"# Python 2 this line can be removed as indeed then it would be a no-op.",
"sid",
"=",
"bytes",
"(",
"sid",
")",
"if",
"not",
"sid",
":",
"return",
"u\"\"",
"str_sid_components",
"=",
"[",
"sid",
"[",
"0",
"]",
"]",
"# Now decode the 48-byte portion",
"if",
"len",
"(",
"sid",
")",
">=",
"8",
":",
"subauthority_count",
"=",
"sid",
"[",
"1",
"]",
"identifier_authority",
"=",
"struct",
".",
"unpack",
"(",
"\">H\"",
",",
"sid",
"[",
"2",
":",
"4",
"]",
")",
"[",
"0",
"]",
"identifier_authority",
"<<=",
"32",
"identifier_authority",
"|=",
"struct",
".",
"unpack",
"(",
"\">L\"",
",",
"sid",
"[",
"4",
":",
"8",
"]",
")",
"[",
"0",
"]",
"str_sid_components",
".",
"append",
"(",
"identifier_authority",
")",
"start",
"=",
"8",
"for",
"i",
"in",
"range",
"(",
"subauthority_count",
")",
":",
"authority",
"=",
"sid",
"[",
"start",
":",
"start",
"+",
"4",
"]",
"if",
"not",
"authority",
":",
"break",
"if",
"len",
"(",
"authority",
")",
"<",
"4",
":",
"message",
"=",
"(",
"\"In binary SID '%s', component %d has been truncated. \"",
"\"Expected 4 bytes, found %d: (%s)\"",
")",
"message",
"%=",
"(",
"sid",
",",
"i",
",",
"len",
"(",
"authority",
")",
",",
"authority",
")",
"raise",
"ValueError",
"(",
"message",
")",
"str_sid_components",
".",
"append",
"(",
"struct",
".",
"unpack",
"(",
"\"<L\"",
",",
"authority",
")",
"[",
"0",
"]",
")",
"start",
"+=",
"4",
"return",
"u\"S-%s\"",
"%",
"(",
"u\"-\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"str_sid_components",
")",
")",
")"
] | Converts a binary SID to its string representation.
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379597.aspx
The byte representation of an SID is as follows:
Offset Length Description
00 01 revision
01 01 sub-authority count
02 06 authority (big endian)
08 04 subauthority #1 (little endian)
0b 04 subauthority #2 (little endian)
...
Args:
sid: A `bytes` instance for a SID to convert.
Returns:
A `unicode` representation of given SID.
Raises:
ValueError: If the binary SID is malformed. | [
"Converts",
"a",
"binary",
"SID",
"to",
"its",
"string",
"representation",
"."
] | python | train |
bjodah/pycompilation | pycompilation/compilation.py | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L474-L596 | def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs) | [
"def",
"pyx2obj",
"(",
"pyxpath",
",",
"objpath",
"=",
"None",
",",
"interm_c_dir",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"full_module_name",
"=",
"None",
",",
"only_update",
"=",
"False",
",",
"metadir",
"=",
"None",
",",
"include_numpy",
"=",
"False",
",",
"include_dirs",
"=",
"None",
",",
"cy_kwargs",
"=",
"None",
",",
"gdb",
"=",
"False",
",",
"cplus",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"pyxpath",
".",
"endswith",
"(",
"'.pyx'",
")",
"cwd",
"=",
"cwd",
"or",
"'.'",
"objpath",
"=",
"objpath",
"or",
"'.'",
"interm_c_dir",
"=",
"interm_c_dir",
"or",
"os",
".",
"path",
".",
"dirname",
"(",
"objpath",
")",
"abs_objpath",
"=",
"get_abspath",
"(",
"objpath",
",",
"cwd",
"=",
"cwd",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"abs_objpath",
")",
":",
"pyx_fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"pyxpath",
")",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"pyx_fname",
")",
"objpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"objpath",
",",
"name",
"+",
"objext",
")",
"cy_kwargs",
"=",
"cy_kwargs",
"or",
"{",
"}",
"cy_kwargs",
"[",
"'output_dir'",
"]",
"=",
"cwd",
"if",
"cplus",
"is",
"None",
":",
"cplus",
"=",
"pyx_is_cplus",
"(",
"pyxpath",
")",
"cy_kwargs",
"[",
"'cplus'",
"]",
"=",
"cplus",
"if",
"gdb",
":",
"cy_kwargs",
"[",
"'gdb_debug'",
"]",
"=",
"True",
"if",
"include_dirs",
":",
"cy_kwargs",
"[",
"'include_path'",
"]",
"=",
"include_dirs",
"interm_c_file",
"=",
"simple_cythonize",
"(",
"pyxpath",
",",
"destdir",
"=",
"interm_c_dir",
",",
"cwd",
"=",
"cwd",
",",
"logger",
"=",
"logger",
",",
"full_module_name",
"=",
"full_module_name",
",",
"only_update",
"=",
"only_update",
",",
"*",
"*",
"cy_kwargs",
")",
"include_dirs",
"=",
"include_dirs",
"or",
"[",
"]",
"if",
"include_numpy",
":",
"import",
"numpy",
"numpy_inc_dir",
"=",
"numpy",
".",
"get_include",
"(",
")",
"if",
"numpy_inc_dir",
"not",
"in",
"include_dirs",
":",
"include_dirs",
".",
"append",
"(",
"numpy_inc_dir",
")",
"flags",
"=",
"kwargs",
".",
"pop",
"(",
"'flags'",
",",
"[",
"]",
")",
"needed_flags",
"=",
"(",
"'-fwrapv'",
",",
"'-pthread'",
")",
"if",
"not",
"cplus",
":",
"needed_flags",
"+=",
"(",
"'-Wstrict-prototypes'",
",",
")",
"# not really needed..",
"for",
"flag",
"in",
"needed_flags",
":",
"if",
"flag",
"not",
"in",
"flags",
":",
"flags",
".",
"append",
"(",
"flag",
")",
"options",
"=",
"kwargs",
".",
"pop",
"(",
"'options'",
",",
"[",
"]",
")",
"if",
"kwargs",
".",
"pop",
"(",
"'strict_aliasing'",
",",
"False",
")",
":",
"raise",
"CompilationError",
"(",
"\"Cython req. strict aliasing to be disabled.\"",
")",
"if",
"'pic'",
"not",
"in",
"options",
":",
"options",
".",
"append",
"(",
"'pic'",
")",
"if",
"'warn'",
"not",
"in",
"options",
":",
"options",
".",
"append",
"(",
"'warn'",
")",
"# Let's be explicit about standard",
"if",
"cplus",
":",
"std",
"=",
"kwargs",
".",
"pop",
"(",
"'std'",
",",
"'c++98'",
")",
"else",
":",
"std",
"=",
"kwargs",
".",
"pop",
"(",
"'std'",
",",
"'c99'",
")",
"return",
"src2obj",
"(",
"interm_c_file",
",",
"objpath",
"=",
"objpath",
",",
"cwd",
"=",
"cwd",
",",
"only_update",
"=",
"only_update",
",",
"metadir",
"=",
"metadir",
",",
"include_dirs",
"=",
"include_dirs",
",",
"flags",
"=",
"flags",
",",
"std",
"=",
"std",
",",
"options",
"=",
"options",
",",
"logger",
"=",
"logger",
",",
"inc_py",
"=",
"True",
",",
"strict_aliasing",
"=",
"False",
",",
"*",
"*",
"kwargs",
")"
] | Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file. | [
"Convenience",
"function"
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/tone_analyzer_v3.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/tone_analyzer_v3.py#L1062-L1073 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'utterance_id') and self.utterance_id is not None:
_dict['utterance_id'] = self.utterance_id
if hasattr(self, 'utterance_text') and self.utterance_text is not None:
_dict['utterance_text'] = self.utterance_text
if hasattr(self, 'tones') and self.tones is not None:
_dict['tones'] = [x._to_dict() for x in self.tones]
if hasattr(self, 'error') and self.error is not None:
_dict['error'] = self.error
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'utterance_id'",
")",
"and",
"self",
".",
"utterance_id",
"is",
"not",
"None",
":",
"_dict",
"[",
"'utterance_id'",
"]",
"=",
"self",
".",
"utterance_id",
"if",
"hasattr",
"(",
"self",
",",
"'utterance_text'",
")",
"and",
"self",
".",
"utterance_text",
"is",
"not",
"None",
":",
"_dict",
"[",
"'utterance_text'",
"]",
"=",
"self",
".",
"utterance_text",
"if",
"hasattr",
"(",
"self",
",",
"'tones'",
")",
"and",
"self",
".",
"tones",
"is",
"not",
"None",
":",
"_dict",
"[",
"'tones'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"tones",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'error'",
")",
"and",
"self",
".",
"error",
"is",
"not",
"None",
":",
"_dict",
"[",
"'error'",
"]",
"=",
"self",
".",
"error",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
fastai/fastai | fastai/vision/image.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L170-L173 | def pixel(self, func:PixelFunc, *args, **kwargs)->'Image':
"Equivalent to `image.px = func(image.px)`."
self.px = func(self.px, *args, **kwargs)
return self | [
"def",
"pixel",
"(",
"self",
",",
"func",
":",
"PixelFunc",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"'Image'",
":",
"self",
".",
"px",
"=",
"func",
"(",
"self",
".",
"px",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self"
] | Equivalent to `image.px = func(image.px)`. | [
"Equivalent",
"to",
"image",
".",
"px",
"=",
"func",
"(",
"image",
".",
"px",
")",
"."
] | python | train |
mardix/Juice | juice/decorators.py | https://github.com/mardix/Juice/blob/7afa8d4238868235dfcdae82272bd77958dd416a/juice/decorators.py#L514-L532 | def require_user_roles(*roles):
"""
A decorator to check if user has any of the roles specified
@require_user_roles('superadmin', 'admin')
def fn():
pass
"""
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if ext.user_authenticated():
if not ext.flask_login.current_user.has_any_roles(*roles):
return abort(403)
else:
return abort(401)
return f(*args, **kwargs)
return wrapped
return wrapper | [
"def",
"require_user_roles",
"(",
"*",
"roles",
")",
":",
"def",
"wrapper",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ext",
".",
"user_authenticated",
"(",
")",
":",
"if",
"not",
"ext",
".",
"flask_login",
".",
"current_user",
".",
"has_any_roles",
"(",
"*",
"roles",
")",
":",
"return",
"abort",
"(",
"403",
")",
"else",
":",
"return",
"abort",
"(",
"401",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped",
"return",
"wrapper"
] | A decorator to check if user has any of the roles specified
@require_user_roles('superadmin', 'admin')
def fn():
pass | [
"A",
"decorator",
"to",
"check",
"if",
"user",
"has",
"any",
"of",
"the",
"roles",
"specified"
] | python | train |
prompt-toolkit/pyvim | pyvim/window_arrangement.py | https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/window_arrangement.py#L374-L403 | def _add_editor_buffer(self, editor_buffer, show_in_current_window=False):
"""
Insert this new buffer in the list of buffers, right after the active
one.
"""
assert isinstance(editor_buffer, EditorBuffer) and editor_buffer not in self.editor_buffers
# Add to list of EditorBuffers
eb = self.active_editor_buffer
if eb is None:
self.editor_buffers.append(editor_buffer)
else:
# Append right after the currently active one.
try:
index = self.editor_buffers.index(self.active_editor_buffer)
except ValueError:
index = 0
self.editor_buffers.insert(index, editor_buffer)
# When there are no tabs/windows yet, create one for this buffer.
if self.tab_pages == []:
self.tab_pages.append(TabPage(Window(editor_buffer)))
self.active_tab_index = 0
# To be shown?
if show_in_current_window and self.active_tab:
self.active_tab.show_editor_buffer(editor_buffer)
# Start reporter.
editor_buffer.run_reporter() | [
"def",
"_add_editor_buffer",
"(",
"self",
",",
"editor_buffer",
",",
"show_in_current_window",
"=",
"False",
")",
":",
"assert",
"isinstance",
"(",
"editor_buffer",
",",
"EditorBuffer",
")",
"and",
"editor_buffer",
"not",
"in",
"self",
".",
"editor_buffers",
"# Add to list of EditorBuffers",
"eb",
"=",
"self",
".",
"active_editor_buffer",
"if",
"eb",
"is",
"None",
":",
"self",
".",
"editor_buffers",
".",
"append",
"(",
"editor_buffer",
")",
"else",
":",
"# Append right after the currently active one.",
"try",
":",
"index",
"=",
"self",
".",
"editor_buffers",
".",
"index",
"(",
"self",
".",
"active_editor_buffer",
")",
"except",
"ValueError",
":",
"index",
"=",
"0",
"self",
".",
"editor_buffers",
".",
"insert",
"(",
"index",
",",
"editor_buffer",
")",
"# When there are no tabs/windows yet, create one for this buffer.",
"if",
"self",
".",
"tab_pages",
"==",
"[",
"]",
":",
"self",
".",
"tab_pages",
".",
"append",
"(",
"TabPage",
"(",
"Window",
"(",
"editor_buffer",
")",
")",
")",
"self",
".",
"active_tab_index",
"=",
"0",
"# To be shown?",
"if",
"show_in_current_window",
"and",
"self",
".",
"active_tab",
":",
"self",
".",
"active_tab",
".",
"show_editor_buffer",
"(",
"editor_buffer",
")",
"# Start reporter.",
"editor_buffer",
".",
"run_reporter",
"(",
")"
] | Insert this new buffer in the list of buffers, right after the active
one. | [
"Insert",
"this",
"new",
"buffer",
"in",
"the",
"list",
"of",
"buffers",
"right",
"after",
"the",
"active",
"one",
"."
] | python | train |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/build.py | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/build.py#L155-L166 | def addSuffixToExtensions(toc):
"""
Returns a new TOC with proper library suffix for EXTENSION items.
"""
new_toc = TOC()
for inm, fnm, typ in toc:
if typ in ('EXTENSION', 'DEPENDENCY'):
binext = os.path.splitext(fnm)[1]
if not os.path.splitext(inm)[1] == binext:
inm = inm + binext
new_toc.append((inm, fnm, typ))
return new_toc | [
"def",
"addSuffixToExtensions",
"(",
"toc",
")",
":",
"new_toc",
"=",
"TOC",
"(",
")",
"for",
"inm",
",",
"fnm",
",",
"typ",
"in",
"toc",
":",
"if",
"typ",
"in",
"(",
"'EXTENSION'",
",",
"'DEPENDENCY'",
")",
":",
"binext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fnm",
")",
"[",
"1",
"]",
"if",
"not",
"os",
".",
"path",
".",
"splitext",
"(",
"inm",
")",
"[",
"1",
"]",
"==",
"binext",
":",
"inm",
"=",
"inm",
"+",
"binext",
"new_toc",
".",
"append",
"(",
"(",
"inm",
",",
"fnm",
",",
"typ",
")",
")",
"return",
"new_toc"
] | Returns a new TOC with proper library suffix for EXTENSION items. | [
"Returns",
"a",
"new",
"TOC",
"with",
"proper",
"library",
"suffix",
"for",
"EXTENSION",
"items",
"."
] | python | train |
python-openxml/python-docx | docx/oxml/coreprops.py | https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/coreprops.py#L222-L238 | def _offset_dt(cls, dt, offset_str):
"""
Return a |datetime| instance that is offset from datetime *dt* by
the timezone offset specified in *offset_str*, a string like
``'-07:00'``.
"""
match = cls._offset_pattern.match(offset_str)
if match is None:
raise ValueError(
"'%s' is not a valid offset string" % offset_str
)
sign, hours_str, minutes_str = match.groups()
sign_factor = -1 if sign == '+' else 1
hours = int(hours_str) * sign_factor
minutes = int(minutes_str) * sign_factor
td = timedelta(hours=hours, minutes=minutes)
return dt + td | [
"def",
"_offset_dt",
"(",
"cls",
",",
"dt",
",",
"offset_str",
")",
":",
"match",
"=",
"cls",
".",
"_offset_pattern",
".",
"match",
"(",
"offset_str",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"'%s' is not a valid offset string\"",
"%",
"offset_str",
")",
"sign",
",",
"hours_str",
",",
"minutes_str",
"=",
"match",
".",
"groups",
"(",
")",
"sign_factor",
"=",
"-",
"1",
"if",
"sign",
"==",
"'+'",
"else",
"1",
"hours",
"=",
"int",
"(",
"hours_str",
")",
"*",
"sign_factor",
"minutes",
"=",
"int",
"(",
"minutes_str",
")",
"*",
"sign_factor",
"td",
"=",
"timedelta",
"(",
"hours",
"=",
"hours",
",",
"minutes",
"=",
"minutes",
")",
"return",
"dt",
"+",
"td"
] | Return a |datetime| instance that is offset from datetime *dt* by
the timezone offset specified in *offset_str*, a string like
``'-07:00'``. | [
"Return",
"a",
"|datetime|",
"instance",
"that",
"is",
"offset",
"from",
"datetime",
"*",
"dt",
"*",
"by",
"the",
"timezone",
"offset",
"specified",
"in",
"*",
"offset_str",
"*",
"a",
"string",
"like",
"-",
"07",
":",
"00",
"."
] | python | train |
koehlma/pygrooveshark | src/grooveshark/__init__.py | https://github.com/koehlma/pygrooveshark/blob/17673758ac12f54dc26ac879c30ea44f13b81057/src/grooveshark/__init__.py#L115-L122 | def _request_token(self, method, client):
'''
generates a request token
'''
if time.time() - self.session.time > grooveshark.const.TOKEN_TIMEOUT:
self._get_token()
random_value = self._random_hex()
return random_value + hashlib.sha1((method + ':' + self.session.token + ':' + grooveshark.const.CLIENTS[client]['token'] + ':' + random_value).encode('utf-8')).hexdigest() | [
"def",
"_request_token",
"(",
"self",
",",
"method",
",",
"client",
")",
":",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"session",
".",
"time",
">",
"grooveshark",
".",
"const",
".",
"TOKEN_TIMEOUT",
":",
"self",
".",
"_get_token",
"(",
")",
"random_value",
"=",
"self",
".",
"_random_hex",
"(",
")",
"return",
"random_value",
"+",
"hashlib",
".",
"sha1",
"(",
"(",
"method",
"+",
"':'",
"+",
"self",
".",
"session",
".",
"token",
"+",
"':'",
"+",
"grooveshark",
".",
"const",
".",
"CLIENTS",
"[",
"client",
"]",
"[",
"'token'",
"]",
"+",
"':'",
"+",
"random_value",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] | generates a request token | [
"generates",
"a",
"request",
"token"
] | python | train |
choderalab/pymbar | pymbar/mbar.py | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar.py#L793-L873 | def computeCovarianceOfSums(self, d_ij, K, a):
"""
We wish to calculate the variance of a weighted sum of free energy differences.
for example ``var(\sum a_i df_i)``.
We explicitly lay out the calculations for four variables (where each variable
is a logarithm of a partition function), then generalize.
The uncertainty in the sum of two weighted differences is
.. code-block:: none
var(a1(f_i1 - f_j1) + a2(f_i2 - f_j2)) =
a1^2 var(f_i1 - f_j1) +
a2^2 var(f_i2 - f_j2) +
2 a1 a2 cov(f_i1 - f_j1, f_i2 - f_j2)
cov(f_i1 - f_j1, f_i2 - f_j2) =
cov(f_i1,f_i2) -
cov(f_i1,f_j2) -
cov(f_j1,f_i2) +
cov(f_j1,f_j2)
call:
.. code-block:: none
f_i1 = a
f_j1 = b
f_i2 = c
f_j2 = d
a1^2 var(a-b) + a2^2 var(c-d) + 2a1a2 cov(a-b,c-d)
we want ``2cov(a-b,c-d) = 2cov(a,c)-2cov(a,d)-2cov(b,c)+2cov(b,d)``,
since ``var(x-y) = var(x) + var(y) - 2cov(x,y)``,
then, ``2cov(x,y) = -var(x-y) + var(x) + var(y)``. So, we get
.. code-block:: none
2cov(a,c) = -var(a-c) + var(a) + var(c)
-2cov(a,d) = +var(a-d) - var(a) - var(d)
-2cov(b,c) = +var(b-c) - var(b) - var(c)
2cov(b,d) = -var(b-d) + var(b) + var(d)
adding up, finally :
.. code-block:: none
2cov(a-b,c-d) = 2cov(a,c)-2cov(a,d)-2cov(b,c)+2cov(b,d) =
- var(a-c) + var(a-d) + var(b-c) - var(b-d)
a1^2 var(a-b)+a2^2 var(c-d)+2a1a2cov(a-b,c-d) =
a1^2 var(a-b)+a2^2 var(c-d)+a1a2 [-var(a-c)+var(a-d)+var(b-c)-var(b-d)]
var(a1(f_i1 - f_j1) + a2(f_i2 - f_j2)) =
a1^2 var(f_i1 - f_j1) + a2^2 var(f_i2 - f_j2) + 2a1 a2 cov(f_i1 - f_j1, f_i2 - f_j2)
= a1^2 var(f_i1 - f_j1) + a2^2 var(f_i2 - f_j2) + a1 a2 [-var(f_i1 - f_i2) + var(f_i1 - f_j2) + var(f_j1-f_i2) - var(f_j1 - f_j2)]
assume two arrays of free energy differences, and and array of constant vectors a.
we want the variance ``var(\sum_k a_k (f_i,k - f_j,k))`` Each set is separated from the other by an offset K
same process applies with the sum, with the single var terms and the pair terms
Parameters
----------
d_ij : a matrix of standard deviations of the quantities f_i - f_j
K : The number of states in each 'chunk', has to be constant
outputs : KxK variance matrix for the sums or differences ``\sum a_i df_i``
"""
# todo: vectorize this.
var_ij = np.square(d_ij)
d2 = np.zeros([K,K],float)
n = len(a)
for i in range(K):
for j in range(K):
for k in range(n):
d2[i,j] += a[k]**2 * var_ij[i+k*K,j+k*K]
for l in range(n):
d2[i,j] += a[k] * a[l] * (-var_ij[i+k*K,i+l*K] + var_ij[i+k*K,j+l*K] + var_ij[j+k*K,i+l*K] - var_ij[j+k*K,j+l*K])
return np.sqrt(d2) | [
"def",
"computeCovarianceOfSums",
"(",
"self",
",",
"d_ij",
",",
"K",
",",
"a",
")",
":",
"# todo: vectorize this.",
"var_ij",
"=",
"np",
".",
"square",
"(",
"d_ij",
")",
"d2",
"=",
"np",
".",
"zeros",
"(",
"[",
"K",
",",
"K",
"]",
",",
"float",
")",
"n",
"=",
"len",
"(",
"a",
")",
"for",
"i",
"in",
"range",
"(",
"K",
")",
":",
"for",
"j",
"in",
"range",
"(",
"K",
")",
":",
"for",
"k",
"in",
"range",
"(",
"n",
")",
":",
"d2",
"[",
"i",
",",
"j",
"]",
"+=",
"a",
"[",
"k",
"]",
"**",
"2",
"*",
"var_ij",
"[",
"i",
"+",
"k",
"*",
"K",
",",
"j",
"+",
"k",
"*",
"K",
"]",
"for",
"l",
"in",
"range",
"(",
"n",
")",
":",
"d2",
"[",
"i",
",",
"j",
"]",
"+=",
"a",
"[",
"k",
"]",
"*",
"a",
"[",
"l",
"]",
"*",
"(",
"-",
"var_ij",
"[",
"i",
"+",
"k",
"*",
"K",
",",
"i",
"+",
"l",
"*",
"K",
"]",
"+",
"var_ij",
"[",
"i",
"+",
"k",
"*",
"K",
",",
"j",
"+",
"l",
"*",
"K",
"]",
"+",
"var_ij",
"[",
"j",
"+",
"k",
"*",
"K",
",",
"i",
"+",
"l",
"*",
"K",
"]",
"-",
"var_ij",
"[",
"j",
"+",
"k",
"*",
"K",
",",
"j",
"+",
"l",
"*",
"K",
"]",
")",
"return",
"np",
".",
"sqrt",
"(",
"d2",
")"
] | We wish to calculate the variance of a weighted sum of free energy differences.
for example ``var(\sum a_i df_i)``.
We explicitly lay out the calculations for four variables (where each variable
is a logarithm of a partition function), then generalize.
The uncertainty in the sum of two weighted differences is
.. code-block:: none
var(a1(f_i1 - f_j1) + a2(f_i2 - f_j2)) =
a1^2 var(f_i1 - f_j1) +
a2^2 var(f_i2 - f_j2) +
2 a1 a2 cov(f_i1 - f_j1, f_i2 - f_j2)
cov(f_i1 - f_j1, f_i2 - f_j2) =
cov(f_i1,f_i2) -
cov(f_i1,f_j2) -
cov(f_j1,f_i2) +
cov(f_j1,f_j2)
call:
.. code-block:: none
f_i1 = a
f_j1 = b
f_i2 = c
f_j2 = d
a1^2 var(a-b) + a2^2 var(c-d) + 2a1a2 cov(a-b,c-d)
we want ``2cov(a-b,c-d) = 2cov(a,c)-2cov(a,d)-2cov(b,c)+2cov(b,d)``,
since ``var(x-y) = var(x) + var(y) - 2cov(x,y)``,
then, ``2cov(x,y) = -var(x-y) + var(x) + var(y)``. So, we get
.. code-block:: none
2cov(a,c) = -var(a-c) + var(a) + var(c)
-2cov(a,d) = +var(a-d) - var(a) - var(d)
-2cov(b,c) = +var(b-c) - var(b) - var(c)
2cov(b,d) = -var(b-d) + var(b) + var(d)
adding up, finally :
.. code-block:: none
2cov(a-b,c-d) = 2cov(a,c)-2cov(a,d)-2cov(b,c)+2cov(b,d) =
- var(a-c) + var(a-d) + var(b-c) - var(b-d)
a1^2 var(a-b)+a2^2 var(c-d)+2a1a2cov(a-b,c-d) =
a1^2 var(a-b)+a2^2 var(c-d)+a1a2 [-var(a-c)+var(a-d)+var(b-c)-var(b-d)]
var(a1(f_i1 - f_j1) + a2(f_i2 - f_j2)) =
a1^2 var(f_i1 - f_j1) + a2^2 var(f_i2 - f_j2) + 2a1 a2 cov(f_i1 - f_j1, f_i2 - f_j2)
= a1^2 var(f_i1 - f_j1) + a2^2 var(f_i2 - f_j2) + a1 a2 [-var(f_i1 - f_i2) + var(f_i1 - f_j2) + var(f_j1-f_i2) - var(f_j1 - f_j2)]
assume two arrays of free energy differences, and and array of constant vectors a.
we want the variance ``var(\sum_k a_k (f_i,k - f_j,k))`` Each set is separated from the other by an offset K
same process applies with the sum, with the single var terms and the pair terms
Parameters
----------
d_ij : a matrix of standard deviations of the quantities f_i - f_j
K : The number of states in each 'chunk', has to be constant
outputs : KxK variance matrix for the sums or differences ``\sum a_i df_i`` | [
"We",
"wish",
"to",
"calculate",
"the",
"variance",
"of",
"a",
"weighted",
"sum",
"of",
"free",
"energy",
"differences",
".",
"for",
"example",
"var",
"(",
"\\",
"sum",
"a_i",
"df_i",
")",
"."
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py#L595-L612 | def delete_deployment_target(self, project, deployment_group_id, target_id):
"""DeleteDeploymentTarget.
[Preview API] Delete a deployment target in a deployment group. This deletes the agent from associated deployment pool too.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group in which deployment target is deleted.
:param int target_id: ID of the deployment target to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
if target_id is not None:
route_values['targetId'] = self._serialize.url('target_id', target_id, 'int')
self._send(http_method='DELETE',
location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6',
version='5.1-preview.1',
route_values=route_values) | [
"def",
"delete_deployment_target",
"(",
"self",
",",
"project",
",",
"deployment_group_id",
",",
"target_id",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"deployment_group_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'deploymentGroupId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'deployment_group_id'",
",",
"deployment_group_id",
",",
"'int'",
")",
"if",
"target_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'targetId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'target_id'",
",",
"target_id",
",",
"'int'",
")",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'DELETE'",
",",
"location_id",
"=",
"'2f0aa599-c121-4256-a5fd-ba370e0ae7b6'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
")"
] | DeleteDeploymentTarget.
[Preview API] Delete a deployment target in a deployment group. This deletes the agent from associated deployment pool too.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group in which deployment target is deleted.
:param int target_id: ID of the deployment target to delete. | [
"DeleteDeploymentTarget",
".",
"[",
"Preview",
"API",
"]",
"Delete",
"a",
"deployment",
"target",
"in",
"a",
"deployment",
"group",
".",
"This",
"deletes",
"the",
"agent",
"from",
"associated",
"deployment",
"pool",
"too",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"param",
"int",
"deployment_group_id",
":",
"ID",
"of",
"the",
"deployment",
"group",
"in",
"which",
"deployment",
"target",
"is",
"deleted",
".",
":",
"param",
"int",
"target_id",
":",
"ID",
"of",
"the",
"deployment",
"target",
"to",
"delete",
"."
] | python | train |
python-openxml/python-docx | docx/oxml/text/font.py | https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/text/font.py#L184-L195 | def subscript(self):
"""
|True| if `w:vertAlign/@w:val` is 'subscript'. |False| if
`w:vertAlign/@w:val` contains any other value. |None| if
`w:vertAlign` is not present.
"""
vertAlign = self.vertAlign
if vertAlign is None:
return None
if vertAlign.val == ST_VerticalAlignRun.SUBSCRIPT:
return True
return False | [
"def",
"subscript",
"(",
"self",
")",
":",
"vertAlign",
"=",
"self",
".",
"vertAlign",
"if",
"vertAlign",
"is",
"None",
":",
"return",
"None",
"if",
"vertAlign",
".",
"val",
"==",
"ST_VerticalAlignRun",
".",
"SUBSCRIPT",
":",
"return",
"True",
"return",
"False"
] | |True| if `w:vertAlign/@w:val` is 'subscript'. |False| if
`w:vertAlign/@w:val` contains any other value. |None| if
`w:vertAlign` is not present. | [
"|True|",
"if",
"w",
":",
"vertAlign",
"/"
] | python | train |
lingthio/Flask-User | flask_user/email_manager.py | https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/email_manager.py#L37-L58 | def send_confirm_email_email(self, user, user_email):
"""Send the 'email confirmation' email."""
# Verify config settings
if not self.user_manager.USER_ENABLE_EMAIL: return
if not self.user_manager.USER_ENABLE_CONFIRM_EMAIL: return
# The confirm_email email is sent to a specific user_email.email or user.email
email = user_email.email if user_email else user.email
# Generate a confirm_email_link
object_id = user_email.id if user_email else user.id
token = self.user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Render email from templates and send it via the configured EmailAdapter
self._render_and_send_email(
email,
user,
self.user_manager.USER_CONFIRM_EMAIL_TEMPLATE,
confirm_email_link=confirm_email_link,
) | [
"def",
"send_confirm_email_email",
"(",
"self",
",",
"user",
",",
"user_email",
")",
":",
"# Verify config settings",
"if",
"not",
"self",
".",
"user_manager",
".",
"USER_ENABLE_EMAIL",
":",
"return",
"if",
"not",
"self",
".",
"user_manager",
".",
"USER_ENABLE_CONFIRM_EMAIL",
":",
"return",
"# The confirm_email email is sent to a specific user_email.email or user.email",
"email",
"=",
"user_email",
".",
"email",
"if",
"user_email",
"else",
"user",
".",
"email",
"# Generate a confirm_email_link",
"object_id",
"=",
"user_email",
".",
"id",
"if",
"user_email",
"else",
"user",
".",
"id",
"token",
"=",
"self",
".",
"user_manager",
".",
"generate_token",
"(",
"object_id",
")",
"confirm_email_link",
"=",
"url_for",
"(",
"'user.confirm_email'",
",",
"token",
"=",
"token",
",",
"_external",
"=",
"True",
")",
"# Render email from templates and send it via the configured EmailAdapter",
"self",
".",
"_render_and_send_email",
"(",
"email",
",",
"user",
",",
"self",
".",
"user_manager",
".",
"USER_CONFIRM_EMAIL_TEMPLATE",
",",
"confirm_email_link",
"=",
"confirm_email_link",
",",
")"
] | Send the 'email confirmation' email. | [
"Send",
"the",
"email",
"confirmation",
"email",
"."
] | python | train |
log2timeline/plaso | plaso/parsers/winfirewall.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/winfirewall.py#L158-L200 | def _ParseLogLine(self, parser_mediator, structure):
"""Parse a single log line and and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=structure.date_time)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = WinFirewallEventData()
event_data.action = self._GetStructureValue(structure, 'action')
event_data.dest_ip = self._GetStructureValue(structure, 'dest_ip')
event_data.dest_port = self._GetStructureValue(structure, 'dest_port')
event_data.flags = self._GetStructureValue(structure, 'flags')
event_data.icmp_code = self._GetStructureValue(structure, 'icmp_code')
event_data.icmp_type = self._GetStructureValue(structure, 'icmp_type')
event_data.info = self._GetStructureValue(structure, 'info')
event_data.path = self._GetStructureValue(structure, 'path')
event_data.protocol = self._GetStructureValue(structure, 'protocol')
event_data.size = self._GetStructureValue(structure, 'size')
event_data.source_ip = self._GetStructureValue(structure, 'source_ip')
event_data.source_port = self._GetStructureValue(structure, 'source_port')
event_data.tcp_ack = self._GetStructureValue(structure, 'tcp_ack')
event_data.tcp_seq = self._GetStructureValue(structure, 'tcp_seq')
event_data.tcp_win = self._GetStructureValue(structure, 'tcp_win')
if self._use_local_timezone:
time_zone = parser_mediator.timezone
else:
time_zone = pytz.UTC
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN, time_zone=time_zone)
parser_mediator.ProduceEventWithEventData(event, event_data) | [
"def",
"_ParseLogLine",
"(",
"self",
",",
"parser_mediator",
",",
"structure",
")",
":",
"try",
":",
"date_time",
"=",
"dfdatetime_time_elements",
".",
"TimeElements",
"(",
"time_elements_tuple",
"=",
"structure",
".",
"date_time",
")",
"date_time",
".",
"is_local_time",
"=",
"True",
"except",
"ValueError",
":",
"parser_mediator",
".",
"ProduceExtractionWarning",
"(",
"'invalid date time value: {0!s}'",
".",
"format",
"(",
"structure",
".",
"date_time",
")",
")",
"return",
"event_data",
"=",
"WinFirewallEventData",
"(",
")",
"event_data",
".",
"action",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'action'",
")",
"event_data",
".",
"dest_ip",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'dest_ip'",
")",
"event_data",
".",
"dest_port",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'dest_port'",
")",
"event_data",
".",
"flags",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'flags'",
")",
"event_data",
".",
"icmp_code",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'icmp_code'",
")",
"event_data",
".",
"icmp_type",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'icmp_type'",
")",
"event_data",
".",
"info",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'info'",
")",
"event_data",
".",
"path",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'path'",
")",
"event_data",
".",
"protocol",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'protocol'",
")",
"event_data",
".",
"size",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'size'",
")",
"event_data",
".",
"source_ip",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'source_ip'",
")",
"event_data",
".",
"source_port",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'source_port'",
")",
"event_data",
".",
"tcp_ack",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'tcp_ack'",
")",
"event_data",
".",
"tcp_seq",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'tcp_seq'",
")",
"event_data",
".",
"tcp_win",
"=",
"self",
".",
"_GetStructureValue",
"(",
"structure",
",",
"'tcp_win'",
")",
"if",
"self",
".",
"_use_local_timezone",
":",
"time_zone",
"=",
"parser_mediator",
".",
"timezone",
"else",
":",
"time_zone",
"=",
"pytz",
".",
"UTC",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_WRITTEN",
",",
"time_zone",
"=",
"time_zone",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")"
] | Parse a single log line and and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file. | [
"Parse",
"a",
"single",
"log",
"line",
"and",
"and",
"produce",
"an",
"event",
"object",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/openflow_state/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/openflow_state/__init__.py#L225-L248 | def _set_meter(self, v, load=False):
"""
Setter method for meter, mapped from YANG variable /openflow_state/meter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_meter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_meter() directly.
YANG Description: Meter
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=meter.meter, is_container='container', presence=False, yang_name="meter", rest_name="meter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-meter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """meter must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=meter.meter, is_container='container', presence=False, yang_name="meter", rest_name="meter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-meter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""",
})
self.__meter = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_meter",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"meter",
".",
"meter",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"meter\"",
",",
"rest_name",
"=",
"\"meter\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'callpoint'",
":",
"u'openflow-meter'",
",",
"u'cli-suppress-show-path'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-openflow-operational'",
",",
"defining_module",
"=",
"'brocade-openflow-operational'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"meter must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=meter.meter, is_container='container', presence=False, yang_name=\"meter\", rest_name=\"meter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-meter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__meter",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for meter, mapped from YANG variable /openflow_state/meter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_meter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_meter() directly.
YANG Description: Meter | [
"Setter",
"method",
"for",
"meter",
"mapped",
"from",
"YANG",
"variable",
"/",
"openflow_state",
"/",
"meter",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_meter",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_meter",
"()",
"directly",
"."
] | python | train |
mediawiki-utilities/python-mwsessions | mwsessions/sessionizer.py | https://github.com/mediawiki-utilities/python-mwsessions/blob/bbbc7330075a2066514df21a64a9afd7a4e0de52/mwsessions/sessionizer.py#L101-L110 | def get_active_sessions(self):
"""
Retrieves the active, unexpired sessions.
:Returns:
A generator of :class:`~mwsessions.Session`
"""
for last_timestamp, i, events in self.recently_active:
yield Session(events[-1].user, unpack_events(events)) | [
"def",
"get_active_sessions",
"(",
"self",
")",
":",
"for",
"last_timestamp",
",",
"i",
",",
"events",
"in",
"self",
".",
"recently_active",
":",
"yield",
"Session",
"(",
"events",
"[",
"-",
"1",
"]",
".",
"user",
",",
"unpack_events",
"(",
"events",
")",
")"
] | Retrieves the active, unexpired sessions.
:Returns:
A generator of :class:`~mwsessions.Session` | [
"Retrieves",
"the",
"active",
"unexpired",
"sessions",
"."
] | python | train |
KxSystems/pyq | src/pyq/ptk.py | https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/ptk.py#L48-L53 | def get_prompt_tokens(_):
"""Return a list of tokens for the prompt"""
namespace = q(r'\d')
if namespace == '.':
namespace = ''
return [(Token.Generic.Prompt, 'q%s)' % namespace)] | [
"def",
"get_prompt_tokens",
"(",
"_",
")",
":",
"namespace",
"=",
"q",
"(",
"r'\\d'",
")",
"if",
"namespace",
"==",
"'.'",
":",
"namespace",
"=",
"''",
"return",
"[",
"(",
"Token",
".",
"Generic",
".",
"Prompt",
",",
"'q%s)'",
"%",
"namespace",
")",
"]"
] | Return a list of tokens for the prompt | [
"Return",
"a",
"list",
"of",
"tokens",
"for",
"the",
"prompt"
] | python | train |
boppreh/keyboard | keyboard/_winkeyboard.py | https://github.com/boppreh/keyboard/blob/dbb73dfff484f733d5fed8dbc53301af5b6c7f50/keyboard/_winkeyboard.py#L486-L558 | def prepare_intercept(callback):
"""
Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept).
"""
_setup_name_tables()
def process_key(event_type, vk, scan_code, is_extended):
global shift_is_pressed, altgr_is_pressed, ignore_next_right_alt
#print(event_type, vk, scan_code, is_extended)
# Pressing alt-gr also generates an extra "right alt" event
if vk == 0xA5 and ignore_next_right_alt:
ignore_next_right_alt = False
return True
modifiers = (
('shift',) * shift_is_pressed +
('alt gr',) * altgr_is_pressed +
('num lock',) * (user32.GetKeyState(0x90) & 1) +
('caps lock',) * (user32.GetKeyState(0x14) & 1) +
('scroll lock',) * (user32.GetKeyState(0x91) & 1)
)
entry = (scan_code, vk, is_extended, modifiers)
if entry not in to_name:
to_name[entry] = list(get_event_names(*entry))
names = to_name[entry]
name = names[0] if names else None
# TODO: inaccurate when holding multiple different shifts.
if vk in shift_vks:
shift_is_pressed = event_type == KEY_DOWN
if scan_code == 541 and vk == 162:
ignore_next_right_alt = True
altgr_is_pressed = event_type == KEY_DOWN
is_keypad = (scan_code, vk, is_extended) in keypad_keys
return callback(KeyboardEvent(event_type=event_type, scan_code=scan_code or -vk, name=name, is_keypad=is_keypad))
def low_level_keyboard_handler(nCode, wParam, lParam):
try:
vk = lParam.contents.vk_code
# Ignore the second `alt` DOWN observed in some cases.
fake_alt = (LLKHF_INJECTED | 0x20)
# Ignore events generated by SendInput with Unicode.
if vk != VK_PACKET and lParam.contents.flags & fake_alt != fake_alt:
event_type = keyboard_event_types[wParam]
is_extended = lParam.contents.flags & 1
scan_code = lParam.contents.scan_code
should_continue = process_key(event_type, vk, scan_code, is_extended)
if not should_continue:
return -1
except Exception as e:
print('Error in keyboard hook:')
traceback.print_exc()
return CallNextHookEx(None, nCode, wParam, lParam)
WH_KEYBOARD_LL = c_int(13)
keyboard_callback = LowLevelKeyboardProc(low_level_keyboard_handler)
handle = GetModuleHandleW(None)
thread_id = DWORD(0)
keyboard_hook = SetWindowsHookEx(WH_KEYBOARD_LL, keyboard_callback, handle, thread_id)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, keyboard_callback) | [
"def",
"prepare_intercept",
"(",
"callback",
")",
":",
"_setup_name_tables",
"(",
")",
"def",
"process_key",
"(",
"event_type",
",",
"vk",
",",
"scan_code",
",",
"is_extended",
")",
":",
"global",
"shift_is_pressed",
",",
"altgr_is_pressed",
",",
"ignore_next_right_alt",
"#print(event_type, vk, scan_code, is_extended)",
"# Pressing alt-gr also generates an extra \"right alt\" event",
"if",
"vk",
"==",
"0xA5",
"and",
"ignore_next_right_alt",
":",
"ignore_next_right_alt",
"=",
"False",
"return",
"True",
"modifiers",
"=",
"(",
"(",
"'shift'",
",",
")",
"*",
"shift_is_pressed",
"+",
"(",
"'alt gr'",
",",
")",
"*",
"altgr_is_pressed",
"+",
"(",
"'num lock'",
",",
")",
"*",
"(",
"user32",
".",
"GetKeyState",
"(",
"0x90",
")",
"&",
"1",
")",
"+",
"(",
"'caps lock'",
",",
")",
"*",
"(",
"user32",
".",
"GetKeyState",
"(",
"0x14",
")",
"&",
"1",
")",
"+",
"(",
"'scroll lock'",
",",
")",
"*",
"(",
"user32",
".",
"GetKeyState",
"(",
"0x91",
")",
"&",
"1",
")",
")",
"entry",
"=",
"(",
"scan_code",
",",
"vk",
",",
"is_extended",
",",
"modifiers",
")",
"if",
"entry",
"not",
"in",
"to_name",
":",
"to_name",
"[",
"entry",
"]",
"=",
"list",
"(",
"get_event_names",
"(",
"*",
"entry",
")",
")",
"names",
"=",
"to_name",
"[",
"entry",
"]",
"name",
"=",
"names",
"[",
"0",
"]",
"if",
"names",
"else",
"None",
"# TODO: inaccurate when holding multiple different shifts.",
"if",
"vk",
"in",
"shift_vks",
":",
"shift_is_pressed",
"=",
"event_type",
"==",
"KEY_DOWN",
"if",
"scan_code",
"==",
"541",
"and",
"vk",
"==",
"162",
":",
"ignore_next_right_alt",
"=",
"True",
"altgr_is_pressed",
"=",
"event_type",
"==",
"KEY_DOWN",
"is_keypad",
"=",
"(",
"scan_code",
",",
"vk",
",",
"is_extended",
")",
"in",
"keypad_keys",
"return",
"callback",
"(",
"KeyboardEvent",
"(",
"event_type",
"=",
"event_type",
",",
"scan_code",
"=",
"scan_code",
"or",
"-",
"vk",
",",
"name",
"=",
"name",
",",
"is_keypad",
"=",
"is_keypad",
")",
")",
"def",
"low_level_keyboard_handler",
"(",
"nCode",
",",
"wParam",
",",
"lParam",
")",
":",
"try",
":",
"vk",
"=",
"lParam",
".",
"contents",
".",
"vk_code",
"# Ignore the second `alt` DOWN observed in some cases.",
"fake_alt",
"=",
"(",
"LLKHF_INJECTED",
"|",
"0x20",
")",
"# Ignore events generated by SendInput with Unicode.",
"if",
"vk",
"!=",
"VK_PACKET",
"and",
"lParam",
".",
"contents",
".",
"flags",
"&",
"fake_alt",
"!=",
"fake_alt",
":",
"event_type",
"=",
"keyboard_event_types",
"[",
"wParam",
"]",
"is_extended",
"=",
"lParam",
".",
"contents",
".",
"flags",
"&",
"1",
"scan_code",
"=",
"lParam",
".",
"contents",
".",
"scan_code",
"should_continue",
"=",
"process_key",
"(",
"event_type",
",",
"vk",
",",
"scan_code",
",",
"is_extended",
")",
"if",
"not",
"should_continue",
":",
"return",
"-",
"1",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Error in keyboard hook:'",
")",
"traceback",
".",
"print_exc",
"(",
")",
"return",
"CallNextHookEx",
"(",
"None",
",",
"nCode",
",",
"wParam",
",",
"lParam",
")",
"WH_KEYBOARD_LL",
"=",
"c_int",
"(",
"13",
")",
"keyboard_callback",
"=",
"LowLevelKeyboardProc",
"(",
"low_level_keyboard_handler",
")",
"handle",
"=",
"GetModuleHandleW",
"(",
"None",
")",
"thread_id",
"=",
"DWORD",
"(",
"0",
")",
"keyboard_hook",
"=",
"SetWindowsHookEx",
"(",
"WH_KEYBOARD_LL",
",",
"keyboard_callback",
",",
"handle",
",",
"thread_id",
")",
"# Register to remove the hook when the interpreter exits. Unfortunately a",
"# try/finally block doesn't seem to work here.",
"atexit",
".",
"register",
"(",
"UnhookWindowsHookEx",
",",
"keyboard_callback",
")"
] | Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept). | [
"Registers",
"a",
"Windows",
"low",
"level",
"keyboard",
"hook",
".",
"The",
"provided",
"callback",
"will",
"be",
"invoked",
"for",
"each",
"high",
"-",
"level",
"keyboard",
"event",
"and",
"is",
"expected",
"to",
"return",
"True",
"if",
"the",
"key",
"event",
"should",
"be",
"passed",
"to",
"the",
"next",
"program",
"or",
"False",
"if",
"the",
"event",
"is",
"to",
"be",
"blocked",
"."
] | python | train |
googlefonts/glyphsLib | Lib/glyphsLib/builder/builders.py | https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L640-L702 | def _fake_designspace(self, ufos):
"""Build a fake designspace with the given UFOs as sources, so that all
builder functions can rely on the presence of a designspace.
"""
designspace = designspaceLib.DesignSpaceDocument()
ufo_to_location = defaultdict(dict)
# Make weight and width axis if relevant
for info_key, axis_def in zip(
("openTypeOS2WeightClass", "openTypeOS2WidthClass"),
(WEIGHT_AXIS_DEF, WIDTH_AXIS_DEF),
):
axis = designspace.newAxisDescriptor()
axis.tag = axis_def.tag
axis.name = axis_def.name
mapping = []
for ufo in ufos:
user_loc = getattr(ufo.info, info_key)
if user_loc is not None:
design_loc = class_to_value(axis_def.tag, user_loc)
mapping.append((user_loc, design_loc))
ufo_to_location[ufo][axis_def.name] = design_loc
mapping = sorted(set(mapping))
if len(mapping) > 1:
axis.map = mapping
axis.minimum = min([user_loc for user_loc, _ in mapping])
axis.maximum = max([user_loc for user_loc, _ in mapping])
axis.default = min(
axis.maximum, max(axis.minimum, axis_def.default_user_loc)
)
designspace.addAxis(axis)
for ufo in ufos:
source = designspace.newSourceDescriptor()
source.font = ufo
source.familyName = ufo.info.familyName
source.styleName = ufo.info.styleName
# source.name = '%s %s' % (source.familyName, source.styleName)
source.path = ufo.path
source.location = ufo_to_location[ufo]
designspace.addSource(source)
# UFO-level skip list lib keys are usually ignored, except when we don't have a
# Designspace file to start from. If they exist in the UFOs, promote them to a
# Designspace-level lib key. However, to avoid accidents, expect the list to
# exist in none or be the same in all UFOs.
if any("public.skipExportGlyphs" in ufo.lib for ufo in ufos):
skip_export_glyphs = {
frozenset(ufo.lib.get("public.skipExportGlyphs", [])) for ufo in ufos
}
if len(skip_export_glyphs) == 1:
designspace.lib["public.skipExportGlyphs"] = sorted(
next(iter(skip_export_glyphs))
)
else:
raise ValueError(
"The `public.skipExportGlyphs` list of all UFOs must either not "
"exist or be the same in every UFO."
)
return designspace | [
"def",
"_fake_designspace",
"(",
"self",
",",
"ufos",
")",
":",
"designspace",
"=",
"designspaceLib",
".",
"DesignSpaceDocument",
"(",
")",
"ufo_to_location",
"=",
"defaultdict",
"(",
"dict",
")",
"# Make weight and width axis if relevant",
"for",
"info_key",
",",
"axis_def",
"in",
"zip",
"(",
"(",
"\"openTypeOS2WeightClass\"",
",",
"\"openTypeOS2WidthClass\"",
")",
",",
"(",
"WEIGHT_AXIS_DEF",
",",
"WIDTH_AXIS_DEF",
")",
",",
")",
":",
"axis",
"=",
"designspace",
".",
"newAxisDescriptor",
"(",
")",
"axis",
".",
"tag",
"=",
"axis_def",
".",
"tag",
"axis",
".",
"name",
"=",
"axis_def",
".",
"name",
"mapping",
"=",
"[",
"]",
"for",
"ufo",
"in",
"ufos",
":",
"user_loc",
"=",
"getattr",
"(",
"ufo",
".",
"info",
",",
"info_key",
")",
"if",
"user_loc",
"is",
"not",
"None",
":",
"design_loc",
"=",
"class_to_value",
"(",
"axis_def",
".",
"tag",
",",
"user_loc",
")",
"mapping",
".",
"append",
"(",
"(",
"user_loc",
",",
"design_loc",
")",
")",
"ufo_to_location",
"[",
"ufo",
"]",
"[",
"axis_def",
".",
"name",
"]",
"=",
"design_loc",
"mapping",
"=",
"sorted",
"(",
"set",
"(",
"mapping",
")",
")",
"if",
"len",
"(",
"mapping",
")",
">",
"1",
":",
"axis",
".",
"map",
"=",
"mapping",
"axis",
".",
"minimum",
"=",
"min",
"(",
"[",
"user_loc",
"for",
"user_loc",
",",
"_",
"in",
"mapping",
"]",
")",
"axis",
".",
"maximum",
"=",
"max",
"(",
"[",
"user_loc",
"for",
"user_loc",
",",
"_",
"in",
"mapping",
"]",
")",
"axis",
".",
"default",
"=",
"min",
"(",
"axis",
".",
"maximum",
",",
"max",
"(",
"axis",
".",
"minimum",
",",
"axis_def",
".",
"default_user_loc",
")",
")",
"designspace",
".",
"addAxis",
"(",
"axis",
")",
"for",
"ufo",
"in",
"ufos",
":",
"source",
"=",
"designspace",
".",
"newSourceDescriptor",
"(",
")",
"source",
".",
"font",
"=",
"ufo",
"source",
".",
"familyName",
"=",
"ufo",
".",
"info",
".",
"familyName",
"source",
".",
"styleName",
"=",
"ufo",
".",
"info",
".",
"styleName",
"# source.name = '%s %s' % (source.familyName, source.styleName)",
"source",
".",
"path",
"=",
"ufo",
".",
"path",
"source",
".",
"location",
"=",
"ufo_to_location",
"[",
"ufo",
"]",
"designspace",
".",
"addSource",
"(",
"source",
")",
"# UFO-level skip list lib keys are usually ignored, except when we don't have a",
"# Designspace file to start from. If they exist in the UFOs, promote them to a",
"# Designspace-level lib key. However, to avoid accidents, expect the list to",
"# exist in none or be the same in all UFOs.",
"if",
"any",
"(",
"\"public.skipExportGlyphs\"",
"in",
"ufo",
".",
"lib",
"for",
"ufo",
"in",
"ufos",
")",
":",
"skip_export_glyphs",
"=",
"{",
"frozenset",
"(",
"ufo",
".",
"lib",
".",
"get",
"(",
"\"public.skipExportGlyphs\"",
",",
"[",
"]",
")",
")",
"for",
"ufo",
"in",
"ufos",
"}",
"if",
"len",
"(",
"skip_export_glyphs",
")",
"==",
"1",
":",
"designspace",
".",
"lib",
"[",
"\"public.skipExportGlyphs\"",
"]",
"=",
"sorted",
"(",
"next",
"(",
"iter",
"(",
"skip_export_glyphs",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"The `public.skipExportGlyphs` list of all UFOs must either not \"",
"\"exist or be the same in every UFO.\"",
")",
"return",
"designspace"
] | Build a fake designspace with the given UFOs as sources, so that all
builder functions can rely on the presence of a designspace. | [
"Build",
"a",
"fake",
"designspace",
"with",
"the",
"given",
"UFOs",
"as",
"sources",
"so",
"that",
"all",
"builder",
"functions",
"can",
"rely",
"on",
"the",
"presence",
"of",
"a",
"designspace",
"."
] | python | train |
Yubico/yubikey-manager | ykman/cli/otp.py | https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/otp.py#L390-L432 | def chalresp(ctx, slot, key, totp, touch, force, generate):
"""
Program a challenge-response credential.
If KEY is not given, an interactive prompt will ask for it.
"""
controller = ctx.obj['controller']
if key:
if generate:
ctx.fail('Invalid options: --generate conflicts with KEY argument.')
elif totp:
key = parse_b32_key(key)
else:
key = parse_key(key)
else:
if force and not generate:
ctx.fail('No secret key given. Please remove the --force flag, '
'set the KEY argument or set the --generate flag.')
elif totp:
while True:
key = click.prompt('Enter a secret key (base32)', err=True)
try:
key = parse_b32_key(key)
break
except Exception as e:
click.echo(e)
else:
if generate:
key = os.urandom(20)
click.echo('Using a randomly generated key: {}'.format(
b2a_hex(key).decode('ascii')))
else:
key = click.prompt('Enter a secret key', err=True)
key = parse_key(key)
cred_type = 'TOTP' if totp else 'challenge-response'
force or click.confirm('Program a {} credential in slot {}?'
.format(cred_type, slot), abort=True, err=True)
try:
controller.program_chalresp(slot, key, touch)
except YkpersError as e:
_failed_to_write_msg(ctx, e) | [
"def",
"chalresp",
"(",
"ctx",
",",
"slot",
",",
"key",
",",
"totp",
",",
"touch",
",",
"force",
",",
"generate",
")",
":",
"controller",
"=",
"ctx",
".",
"obj",
"[",
"'controller'",
"]",
"if",
"key",
":",
"if",
"generate",
":",
"ctx",
".",
"fail",
"(",
"'Invalid options: --generate conflicts with KEY argument.'",
")",
"elif",
"totp",
":",
"key",
"=",
"parse_b32_key",
"(",
"key",
")",
"else",
":",
"key",
"=",
"parse_key",
"(",
"key",
")",
"else",
":",
"if",
"force",
"and",
"not",
"generate",
":",
"ctx",
".",
"fail",
"(",
"'No secret key given. Please remove the --force flag, '",
"'set the KEY argument or set the --generate flag.'",
")",
"elif",
"totp",
":",
"while",
"True",
":",
"key",
"=",
"click",
".",
"prompt",
"(",
"'Enter a secret key (base32)'",
",",
"err",
"=",
"True",
")",
"try",
":",
"key",
"=",
"parse_b32_key",
"(",
"key",
")",
"break",
"except",
"Exception",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"e",
")",
"else",
":",
"if",
"generate",
":",
"key",
"=",
"os",
".",
"urandom",
"(",
"20",
")",
"click",
".",
"echo",
"(",
"'Using a randomly generated key: {}'",
".",
"format",
"(",
"b2a_hex",
"(",
"key",
")",
".",
"decode",
"(",
"'ascii'",
")",
")",
")",
"else",
":",
"key",
"=",
"click",
".",
"prompt",
"(",
"'Enter a secret key'",
",",
"err",
"=",
"True",
")",
"key",
"=",
"parse_key",
"(",
"key",
")",
"cred_type",
"=",
"'TOTP'",
"if",
"totp",
"else",
"'challenge-response'",
"force",
"or",
"click",
".",
"confirm",
"(",
"'Program a {} credential in slot {}?'",
".",
"format",
"(",
"cred_type",
",",
"slot",
")",
",",
"abort",
"=",
"True",
",",
"err",
"=",
"True",
")",
"try",
":",
"controller",
".",
"program_chalresp",
"(",
"slot",
",",
"key",
",",
"touch",
")",
"except",
"YkpersError",
"as",
"e",
":",
"_failed_to_write_msg",
"(",
"ctx",
",",
"e",
")"
] | Program a challenge-response credential.
If KEY is not given, an interactive prompt will ask for it. | [
"Program",
"a",
"challenge",
"-",
"response",
"credential",
"."
] | python | train |
fjwCode/cerium | cerium/androiddriver.py | https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L168-L172 | def get_screen_density(self) -> str:
'''Show device screen density (PPI).'''
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'wm', 'density')
return output.split()[2] | [
"def",
"get_screen_density",
"(",
"self",
")",
"->",
"str",
":",
"output",
",",
"_",
"=",
"self",
".",
"_execute",
"(",
"'-s'",
",",
"self",
".",
"device_sn",
",",
"'shell'",
",",
"'wm'",
",",
"'density'",
")",
"return",
"output",
".",
"split",
"(",
")",
"[",
"2",
"]"
] | Show device screen density (PPI). | [
"Show",
"device",
"screen",
"density",
"(",
"PPI",
")",
"."
] | python | train |
sorgerlab/indra | indra/sources/sofia/api.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sofia/api.py#L9-L32 | def process_table(fname):
"""Return processor by processing a given sheet of a spreadsheet file.
Parameters
----------
fname : str
The name of the Excel file (typically .xlsx extension) to process
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute.
"""
book = openpyxl.load_workbook(fname, read_only=True)
try:
rel_sheet = book['Relations']
except Exception as e:
rel_sheet = book['Causal']
event_sheet = book['Events']
entities_sheet = book['Entities']
sp = SofiaExcelProcessor(rel_sheet.rows, event_sheet.rows,
entities_sheet.rows)
return sp | [
"def",
"process_table",
"(",
"fname",
")",
":",
"book",
"=",
"openpyxl",
".",
"load_workbook",
"(",
"fname",
",",
"read_only",
"=",
"True",
")",
"try",
":",
"rel_sheet",
"=",
"book",
"[",
"'Relations'",
"]",
"except",
"Exception",
"as",
"e",
":",
"rel_sheet",
"=",
"book",
"[",
"'Causal'",
"]",
"event_sheet",
"=",
"book",
"[",
"'Events'",
"]",
"entities_sheet",
"=",
"book",
"[",
"'Entities'",
"]",
"sp",
"=",
"SofiaExcelProcessor",
"(",
"rel_sheet",
".",
"rows",
",",
"event_sheet",
".",
"rows",
",",
"entities_sheet",
".",
"rows",
")",
"return",
"sp"
] | Return processor by processing a given sheet of a spreadsheet file.
Parameters
----------
fname : str
The name of the Excel file (typically .xlsx extension) to process
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute. | [
"Return",
"processor",
"by",
"processing",
"a",
"given",
"sheet",
"of",
"a",
"spreadsheet",
"file",
"."
] | python | train |
mrstephenneal/pdfconduit | sandbox/pdfrw_upscale.py | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/sandbox/pdfrw_upscale.py#L12-L36 | def upscale(file_name, scale=1.5, margin_x=0, margin_y=0, suffix='scaled', tempdir=None):
"""Upscale a PDF to a large size."""
def adjust(page):
info = PageMerge().add(page)
x1, y1, x2, y2 = info.xobj_box
viewrect = (margin_x, margin_y, x2 - x1 - 2 * margin_x, y2 - y1 - 2 * margin_y)
page = PageMerge().add(page, viewrect=viewrect)
page[0].scale(scale)
return page.render()
# Set output file name
if tempdir:
output = NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False).name
elif suffix:
output = os.path.join(os.path.dirname(file_name), add_suffix(file_name, suffix))
else:
output = NamedTemporaryFile(suffix='.pdf').name
reader = PdfReader(file_name)
writer = PdfWriter(output)
for i in list(range(0, len(reader.pages))):
writer.addpage(adjust(reader.pages[i]))
writer.trailer.Info = IndirectPdfDict(reader.Info or {})
writer.write()
return output | [
"def",
"upscale",
"(",
"file_name",
",",
"scale",
"=",
"1.5",
",",
"margin_x",
"=",
"0",
",",
"margin_y",
"=",
"0",
",",
"suffix",
"=",
"'scaled'",
",",
"tempdir",
"=",
"None",
")",
":",
"def",
"adjust",
"(",
"page",
")",
":",
"info",
"=",
"PageMerge",
"(",
")",
".",
"add",
"(",
"page",
")",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
"=",
"info",
".",
"xobj_box",
"viewrect",
"=",
"(",
"margin_x",
",",
"margin_y",
",",
"x2",
"-",
"x1",
"-",
"2",
"*",
"margin_x",
",",
"y2",
"-",
"y1",
"-",
"2",
"*",
"margin_y",
")",
"page",
"=",
"PageMerge",
"(",
")",
".",
"add",
"(",
"page",
",",
"viewrect",
"=",
"viewrect",
")",
"page",
"[",
"0",
"]",
".",
"scale",
"(",
"scale",
")",
"return",
"page",
".",
"render",
"(",
")",
"# Set output file name",
"if",
"tempdir",
":",
"output",
"=",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.pdf'",
",",
"dir",
"=",
"tempdir",
",",
"delete",
"=",
"False",
")",
".",
"name",
"elif",
"suffix",
":",
"output",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file_name",
")",
",",
"add_suffix",
"(",
"file_name",
",",
"suffix",
")",
")",
"else",
":",
"output",
"=",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.pdf'",
")",
".",
"name",
"reader",
"=",
"PdfReader",
"(",
"file_name",
")",
"writer",
"=",
"PdfWriter",
"(",
"output",
")",
"for",
"i",
"in",
"list",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"reader",
".",
"pages",
")",
")",
")",
":",
"writer",
".",
"addpage",
"(",
"adjust",
"(",
"reader",
".",
"pages",
"[",
"i",
"]",
")",
")",
"writer",
".",
"trailer",
".",
"Info",
"=",
"IndirectPdfDict",
"(",
"reader",
".",
"Info",
"or",
"{",
"}",
")",
"writer",
".",
"write",
"(",
")",
"return",
"output"
] | Upscale a PDF to a large size. | [
"Upscale",
"a",
"PDF",
"to",
"a",
"large",
"size",
"."
] | python | train |
vertexproject/synapse | synapse/lib/slabseqn.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/slabseqn.py#L130-L137 | def rows(self, offs):
'''
Iterate over raw indx, bytes tuples from a given offset.
'''
lkey = s_common.int64en(offs)
for lkey, byts in self.slab.scanByRange(lkey, db=self.db):
indx = s_common.int64un(lkey)
yield indx, byts | [
"def",
"rows",
"(",
"self",
",",
"offs",
")",
":",
"lkey",
"=",
"s_common",
".",
"int64en",
"(",
"offs",
")",
"for",
"lkey",
",",
"byts",
"in",
"self",
".",
"slab",
".",
"scanByRange",
"(",
"lkey",
",",
"db",
"=",
"self",
".",
"db",
")",
":",
"indx",
"=",
"s_common",
".",
"int64un",
"(",
"lkey",
")",
"yield",
"indx",
",",
"byts"
] | Iterate over raw indx, bytes tuples from a given offset. | [
"Iterate",
"over",
"raw",
"indx",
"bytes",
"tuples",
"from",
"a",
"given",
"offset",
"."
] | python | train |
cloud-custodian/cloud-custodian | c7n/logs_support.py | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/logs_support.py#L47-L73 | def normalized_log_entries(raw_entries):
'''Mimic the format returned by LambdaManager.logs()'''
entry_start = r'([0-9:, \-]+) - .* - (\w+) - (.*)$'
entry = None
# process start/end here - avoid parsing log entries twice
for line in raw_entries:
m = re.match(entry_start, line)
if m:
# this is the start of a new entry
# spit out the one previously built up (if any)
if entry is not None:
yield entry
(log_time, log_level, log_text) = m.groups()
# convert time
log_timestamp = _timestamp_from_string(log_time)
# join level and first line of message
msg = '[{}] {}'.format(log_level, log_text)
entry = {
'timestamp': log_timestamp,
'message': msg,
}
else:
# additional line(s) for entry (i.e. stack trace)
entry['message'] = entry['message'] + line
if entry is not None:
# return the final entry
yield entry | [
"def",
"normalized_log_entries",
"(",
"raw_entries",
")",
":",
"entry_start",
"=",
"r'([0-9:, \\-]+) - .* - (\\w+) - (.*)$'",
"entry",
"=",
"None",
"# process start/end here - avoid parsing log entries twice",
"for",
"line",
"in",
"raw_entries",
":",
"m",
"=",
"re",
".",
"match",
"(",
"entry_start",
",",
"line",
")",
"if",
"m",
":",
"# this is the start of a new entry",
"# spit out the one previously built up (if any)",
"if",
"entry",
"is",
"not",
"None",
":",
"yield",
"entry",
"(",
"log_time",
",",
"log_level",
",",
"log_text",
")",
"=",
"m",
".",
"groups",
"(",
")",
"# convert time",
"log_timestamp",
"=",
"_timestamp_from_string",
"(",
"log_time",
")",
"# join level and first line of message",
"msg",
"=",
"'[{}] {}'",
".",
"format",
"(",
"log_level",
",",
"log_text",
")",
"entry",
"=",
"{",
"'timestamp'",
":",
"log_timestamp",
",",
"'message'",
":",
"msg",
",",
"}",
"else",
":",
"# additional line(s) for entry (i.e. stack trace)",
"entry",
"[",
"'message'",
"]",
"=",
"entry",
"[",
"'message'",
"]",
"+",
"line",
"if",
"entry",
"is",
"not",
"None",
":",
"# return the final entry",
"yield",
"entry"
] | Mimic the format returned by LambdaManager.logs() | [
"Mimic",
"the",
"format",
"returned",
"by",
"LambdaManager",
".",
"logs",
"()"
] | python | train |
hobson/aima | aima/logic.py | https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/logic.py#L357-L401 | def pl_true(exp, model={}):
"""Return True if the propositional logic expression is true in the model,
and False if it is false. If the model does not specify the value for
every proposition, this may return None to indicate 'not obvious';
this may happen even when the expression is tautological."""
op, args = exp.op, exp.args
if exp == TRUE:
return True
elif exp == FALSE:
return False
elif is_prop_symbol(op):
return model.get(exp)
elif op == '~':
p = pl_true(args[0], model)
if p is None: return None
else: return not p
elif op == '|':
result = False
for arg in args:
p = pl_true(arg, model)
if p is True: return True
if p is None: result = None
return result
elif op == '&':
result = True
for arg in args:
p = pl_true(arg, model)
if p is False: return False
if p is None: result = None
return result
p, q = args
if op == '>>':
return pl_true(~p | q, model)
elif op == '<<':
return pl_true(p | ~q, model)
pt = pl_true(p, model)
if pt is None: return None
qt = pl_true(q, model)
if qt is None: return None
if op == '<=>':
return pt == qt
elif op == '^':
return pt != qt
else:
raise ValueError, "illegal operator in logic expression" + str(exp) | [
"def",
"pl_true",
"(",
"exp",
",",
"model",
"=",
"{",
"}",
")",
":",
"op",
",",
"args",
"=",
"exp",
".",
"op",
",",
"exp",
".",
"args",
"if",
"exp",
"==",
"TRUE",
":",
"return",
"True",
"elif",
"exp",
"==",
"FALSE",
":",
"return",
"False",
"elif",
"is_prop_symbol",
"(",
"op",
")",
":",
"return",
"model",
".",
"get",
"(",
"exp",
")",
"elif",
"op",
"==",
"'~'",
":",
"p",
"=",
"pl_true",
"(",
"args",
"[",
"0",
"]",
",",
"model",
")",
"if",
"p",
"is",
"None",
":",
"return",
"None",
"else",
":",
"return",
"not",
"p",
"elif",
"op",
"==",
"'|'",
":",
"result",
"=",
"False",
"for",
"arg",
"in",
"args",
":",
"p",
"=",
"pl_true",
"(",
"arg",
",",
"model",
")",
"if",
"p",
"is",
"True",
":",
"return",
"True",
"if",
"p",
"is",
"None",
":",
"result",
"=",
"None",
"return",
"result",
"elif",
"op",
"==",
"'&'",
":",
"result",
"=",
"True",
"for",
"arg",
"in",
"args",
":",
"p",
"=",
"pl_true",
"(",
"arg",
",",
"model",
")",
"if",
"p",
"is",
"False",
":",
"return",
"False",
"if",
"p",
"is",
"None",
":",
"result",
"=",
"None",
"return",
"result",
"p",
",",
"q",
"=",
"args",
"if",
"op",
"==",
"'>>'",
":",
"return",
"pl_true",
"(",
"~",
"p",
"|",
"q",
",",
"model",
")",
"elif",
"op",
"==",
"'<<'",
":",
"return",
"pl_true",
"(",
"p",
"|",
"~",
"q",
",",
"model",
")",
"pt",
"=",
"pl_true",
"(",
"p",
",",
"model",
")",
"if",
"pt",
"is",
"None",
":",
"return",
"None",
"qt",
"=",
"pl_true",
"(",
"q",
",",
"model",
")",
"if",
"qt",
"is",
"None",
":",
"return",
"None",
"if",
"op",
"==",
"'<=>'",
":",
"return",
"pt",
"==",
"qt",
"elif",
"op",
"==",
"'^'",
":",
"return",
"pt",
"!=",
"qt",
"else",
":",
"raise",
"ValueError",
",",
"\"illegal operator in logic expression\"",
"+",
"str",
"(",
"exp",
")"
] | Return True if the propositional logic expression is true in the model,
and False if it is false. If the model does not specify the value for
every proposition, this may return None to indicate 'not obvious';
this may happen even when the expression is tautological. | [
"Return",
"True",
"if",
"the",
"propositional",
"logic",
"expression",
"is",
"true",
"in",
"the",
"model",
"and",
"False",
"if",
"it",
"is",
"false",
".",
"If",
"the",
"model",
"does",
"not",
"specify",
"the",
"value",
"for",
"every",
"proposition",
"this",
"may",
"return",
"None",
"to",
"indicate",
"not",
"obvious",
";",
"this",
"may",
"happen",
"even",
"when",
"the",
"expression",
"is",
"tautological",
"."
] | python | valid |
intuition-io/intuition | intuition/utils.py | https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/utils.py#L26-L40 | def next_tick(date, interval=15):
'''
Only return when we reach given datetime
'''
# Intuition works with utc dates, conversion are made for I/O
now = dt.datetime.now(pytz.utc)
live = False
# Sleep until we reach the given date
while now < date:
time.sleep(interval)
# Update current time
now = dt.datetime.now(pytz.utc)
# Since we're here, we waited a future date, so this is live trading
live = True
return live | [
"def",
"next_tick",
"(",
"date",
",",
"interval",
"=",
"15",
")",
":",
"# Intuition works with utc dates, conversion are made for I/O",
"now",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
"pytz",
".",
"utc",
")",
"live",
"=",
"False",
"# Sleep until we reach the given date",
"while",
"now",
"<",
"date",
":",
"time",
".",
"sleep",
"(",
"interval",
")",
"# Update current time",
"now",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
"pytz",
".",
"utc",
")",
"# Since we're here, we waited a future date, so this is live trading",
"live",
"=",
"True",
"return",
"live"
] | Only return when we reach given datetime | [
"Only",
"return",
"when",
"we",
"reach",
"given",
"datetime"
] | python | train |
molmod/molmod | molmod/io/atrj.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/atrj.py#L66-L70 | def get_next(self, label):
"""Get the next section with the given label"""
while self._get_current_label() != label:
self._skip_section()
return self._read_section() | [
"def",
"get_next",
"(",
"self",
",",
"label",
")",
":",
"while",
"self",
".",
"_get_current_label",
"(",
")",
"!=",
"label",
":",
"self",
".",
"_skip_section",
"(",
")",
"return",
"self",
".",
"_read_section",
"(",
")"
] | Get the next section with the given label | [
"Get",
"the",
"next",
"section",
"with",
"the",
"given",
"label"
] | python | train |
urinieto/msaf | msaf/utils.py | https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L120-L154 | def sonify_clicks(audio, clicks, out_file, fs, offset=0):
"""Sonifies the estimated times into the output file.
Parameters
----------
audio: np.array
Audio samples of the input track.
clicks: np.array
Click positions in seconds.
out_file: str
Path to the output file.
fs: int
Sample rate.
offset: float
Offset of the clicks with respect to the audio.
"""
# Generate clicks (this should be done by mir_eval, but its
# latest release is not compatible with latest numpy)
times = clicks + offset
# 1 kHz tone, 100ms
click = np.sin(2 * np.pi * np.arange(fs * .1) * 1000 / (1. * fs))
# Exponential decay
click *= np.exp(-np.arange(fs * .1) / (fs * .01))
length = int(times.max() * fs + click.shape[0] + 1)
audio_clicks = mir_eval.sonify.clicks(times, fs, length=length)
# Create array to store the audio plus the clicks
out_audio = np.zeros(max(len(audio), len(audio_clicks)))
# Assign the audio and the clicks
out_audio[:len(audio)] = audio
out_audio[:len(audio_clicks)] += audio_clicks
# Write to file
scipy.io.wavfile.write(out_file, fs, out_audio) | [
"def",
"sonify_clicks",
"(",
"audio",
",",
"clicks",
",",
"out_file",
",",
"fs",
",",
"offset",
"=",
"0",
")",
":",
"# Generate clicks (this should be done by mir_eval, but its",
"# latest release is not compatible with latest numpy)",
"times",
"=",
"clicks",
"+",
"offset",
"# 1 kHz tone, 100ms",
"click",
"=",
"np",
".",
"sin",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"arange",
"(",
"fs",
"*",
".1",
")",
"*",
"1000",
"/",
"(",
"1.",
"*",
"fs",
")",
")",
"# Exponential decay",
"click",
"*=",
"np",
".",
"exp",
"(",
"-",
"np",
".",
"arange",
"(",
"fs",
"*",
".1",
")",
"/",
"(",
"fs",
"*",
".01",
")",
")",
"length",
"=",
"int",
"(",
"times",
".",
"max",
"(",
")",
"*",
"fs",
"+",
"click",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
"audio_clicks",
"=",
"mir_eval",
".",
"sonify",
".",
"clicks",
"(",
"times",
",",
"fs",
",",
"length",
"=",
"length",
")",
"# Create array to store the audio plus the clicks",
"out_audio",
"=",
"np",
".",
"zeros",
"(",
"max",
"(",
"len",
"(",
"audio",
")",
",",
"len",
"(",
"audio_clicks",
")",
")",
")",
"# Assign the audio and the clicks",
"out_audio",
"[",
":",
"len",
"(",
"audio",
")",
"]",
"=",
"audio",
"out_audio",
"[",
":",
"len",
"(",
"audio_clicks",
")",
"]",
"+=",
"audio_clicks",
"# Write to file",
"scipy",
".",
"io",
".",
"wavfile",
".",
"write",
"(",
"out_file",
",",
"fs",
",",
"out_audio",
")"
] | Sonifies the estimated times into the output file.
Parameters
----------
audio: np.array
Audio samples of the input track.
clicks: np.array
Click positions in seconds.
out_file: str
Path to the output file.
fs: int
Sample rate.
offset: float
Offset of the clicks with respect to the audio. | [
"Sonifies",
"the",
"estimated",
"times",
"into",
"the",
"output",
"file",
"."
] | python | test |
PyCQA/astroid | astroid/rebuilder.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L658-L666 | def visit_ifexp(self, node, parent):
"""visit a IfExp node by returning a fresh instance of it"""
newnode = nodes.IfExp(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.test, newnode),
self.visit(node.body, newnode),
self.visit(node.orelse, newnode),
)
return newnode | [
"def",
"visit_ifexp",
"(",
"self",
",",
"node",
",",
"parent",
")",
":",
"newnode",
"=",
"nodes",
".",
"IfExp",
"(",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"parent",
")",
"newnode",
".",
"postinit",
"(",
"self",
".",
"visit",
"(",
"node",
".",
"test",
",",
"newnode",
")",
",",
"self",
".",
"visit",
"(",
"node",
".",
"body",
",",
"newnode",
")",
",",
"self",
".",
"visit",
"(",
"node",
".",
"orelse",
",",
"newnode",
")",
",",
")",
"return",
"newnode"
] | visit a IfExp node by returning a fresh instance of it | [
"visit",
"a",
"IfExp",
"node",
"by",
"returning",
"a",
"fresh",
"instance",
"of",
"it"
] | python | train |
AmesCornish/buttersink | buttersink/btrfs.py | https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/btrfs.py#L527-L541 | def _rescanSizes(self, force=True):
""" Zero and recalculate quota sizes to subvolume sizes will be correct. """
status = self.QUOTA_CTL(cmd=BTRFS_QUOTA_CTL_ENABLE).status
logger.debug("CTL Status: %s", hex(status))
status = self.QUOTA_RESCAN_STATUS()
logger.debug("RESCAN Status: %s", status)
if not status.flags:
if not force:
return
self.QUOTA_RESCAN()
logger.warn("Waiting for btrfs quota usage scan...")
self.QUOTA_RESCAN_WAIT() | [
"def",
"_rescanSizes",
"(",
"self",
",",
"force",
"=",
"True",
")",
":",
"status",
"=",
"self",
".",
"QUOTA_CTL",
"(",
"cmd",
"=",
"BTRFS_QUOTA_CTL_ENABLE",
")",
".",
"status",
"logger",
".",
"debug",
"(",
"\"CTL Status: %s\"",
",",
"hex",
"(",
"status",
")",
")",
"status",
"=",
"self",
".",
"QUOTA_RESCAN_STATUS",
"(",
")",
"logger",
".",
"debug",
"(",
"\"RESCAN Status: %s\"",
",",
"status",
")",
"if",
"not",
"status",
".",
"flags",
":",
"if",
"not",
"force",
":",
"return",
"self",
".",
"QUOTA_RESCAN",
"(",
")",
"logger",
".",
"warn",
"(",
"\"Waiting for btrfs quota usage scan...\"",
")",
"self",
".",
"QUOTA_RESCAN_WAIT",
"(",
")"
] | Zero and recalculate quota sizes to subvolume sizes will be correct. | [
"Zero",
"and",
"recalculate",
"quota",
"sizes",
"to",
"subvolume",
"sizes",
"will",
"be",
"correct",
"."
] | python | train |
estnltk/estnltk | estnltk/text.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L855-L860 | def named_entities(self):
"""The elements of ``named_entities`` layer."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
phrases = self.split_by(NAMED_ENTITIES)
return [' '.join(phrase.lemmas) for phrase in phrases] | [
"def",
"named_entities",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_tagged",
"(",
"NAMED_ENTITIES",
")",
":",
"self",
".",
"tag_named_entities",
"(",
")",
"phrases",
"=",
"self",
".",
"split_by",
"(",
"NAMED_ENTITIES",
")",
"return",
"[",
"' '",
".",
"join",
"(",
"phrase",
".",
"lemmas",
")",
"for",
"phrase",
"in",
"phrases",
"]"
] | The elements of ``named_entities`` layer. | [
"The",
"elements",
"of",
"named_entities",
"layer",
"."
] | python | train |
emory-libraries/eulfedora | eulfedora/syncutil.py | https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/syncutil.py#L266-L303 | def get_datastream_info(self, dsinfo):
'''Use regular expressions to pull datastream [version]
details (id, mimetype, size, and checksum) for binary content,
in order to sanity check the decoded data.
:param dsinfo: text content just before a binaryContent tag
:returns: dict with keys for id, mimetype, size, type and digest,
or None if no match is found
'''
# we only need to look at the end of this section of content
dsinfo = dsinfo[-750:]
# if not enough content is present, include the end of
# the last read chunk, if available
if len(dsinfo) < 750 and self.end_of_last_chunk is not None:
dsinfo = self.end_of_last_chunk + dsinfo
# force text needed for python 3 compatibility (in python 3
# dsinfo is bytes instead of a string)
try:
text = force_text(dsinfo)
except UnicodeDecodeError as err:
# it's possible to see a unicode character split across
# read blocks; if we get an "invalid start byte" unicode
# decode error, try converting the text without the first
# character; if that's the problem, it's not needed
# for datastream context
if 'invalid start byte' in force_text(err):
text = force_text(dsinfo[1:])
else:
raise err
# in case the text contains multiple datastream ids, find
# all matches and then use the last, since we want the last one
# in this section, just before the datastream content
matches = list(self.dsinfo_regex.finditer(text))
if matches:
infomatch = matches[-1]
return infomatch.groupdict() | [
"def",
"get_datastream_info",
"(",
"self",
",",
"dsinfo",
")",
":",
"# we only need to look at the end of this section of content",
"dsinfo",
"=",
"dsinfo",
"[",
"-",
"750",
":",
"]",
"# if not enough content is present, include the end of",
"# the last read chunk, if available",
"if",
"len",
"(",
"dsinfo",
")",
"<",
"750",
"and",
"self",
".",
"end_of_last_chunk",
"is",
"not",
"None",
":",
"dsinfo",
"=",
"self",
".",
"end_of_last_chunk",
"+",
"dsinfo",
"# force text needed for python 3 compatibility (in python 3",
"# dsinfo is bytes instead of a string)",
"try",
":",
"text",
"=",
"force_text",
"(",
"dsinfo",
")",
"except",
"UnicodeDecodeError",
"as",
"err",
":",
"# it's possible to see a unicode character split across",
"# read blocks; if we get an \"invalid start byte\" unicode",
"# decode error, try converting the text without the first",
"# character; if that's the problem, it's not needed",
"# for datastream context",
"if",
"'invalid start byte'",
"in",
"force_text",
"(",
"err",
")",
":",
"text",
"=",
"force_text",
"(",
"dsinfo",
"[",
"1",
":",
"]",
")",
"else",
":",
"raise",
"err",
"# in case the text contains multiple datastream ids, find",
"# all matches and then use the last, since we want the last one",
"# in this section, just before the datastream content",
"matches",
"=",
"list",
"(",
"self",
".",
"dsinfo_regex",
".",
"finditer",
"(",
"text",
")",
")",
"if",
"matches",
":",
"infomatch",
"=",
"matches",
"[",
"-",
"1",
"]",
"return",
"infomatch",
".",
"groupdict",
"(",
")"
] | Use regular expressions to pull datastream [version]
details (id, mimetype, size, and checksum) for binary content,
in order to sanity check the decoded data.
:param dsinfo: text content just before a binaryContent tag
:returns: dict with keys for id, mimetype, size, type and digest,
or None if no match is found | [
"Use",
"regular",
"expressions",
"to",
"pull",
"datastream",
"[",
"version",
"]",
"details",
"(",
"id",
"mimetype",
"size",
"and",
"checksum",
")",
"for",
"binary",
"content",
"in",
"order",
"to",
"sanity",
"check",
"the",
"decoded",
"data",
"."
] | python | train |
maljovec/topopy | topopy/TopologicalObject.py | https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/TopologicalObject.py#L200-L234 | def build(self, X, Y, w=None, edges=None):
""" Assigns data to this object and builds the requested topological
structure
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph.
"""
self.reset()
if X is None or Y is None:
return
self.__set_data(X, Y, w)
if self.debug:
sys.stdout.write("Graph Preparation: ")
start = time.clock()
self.graph_rep = nglpy.Graph(
self.Xnorm,
self.graph,
self.max_neighbors,
self.beta,
connect=self.connect,
)
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) | [
"def",
"build",
"(",
"self",
",",
"X",
",",
"Y",
",",
"w",
"=",
"None",
",",
"edges",
"=",
"None",
")",
":",
"self",
".",
"reset",
"(",
")",
"if",
"X",
"is",
"None",
"or",
"Y",
"is",
"None",
":",
"return",
"self",
".",
"__set_data",
"(",
"X",
",",
"Y",
",",
"w",
")",
"if",
"self",
".",
"debug",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Graph Preparation: \"",
")",
"start",
"=",
"time",
".",
"clock",
"(",
")",
"self",
".",
"graph_rep",
"=",
"nglpy",
".",
"Graph",
"(",
"self",
".",
"Xnorm",
",",
"self",
".",
"graph",
",",
"self",
".",
"max_neighbors",
",",
"self",
".",
"beta",
",",
"connect",
"=",
"self",
".",
"connect",
",",
")",
"if",
"self",
".",
"debug",
":",
"end",
"=",
"time",
".",
"clock",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"%f s\\n\"",
"%",
"(",
"end",
"-",
"start",
")",
")"
] | Assigns data to this object and builds the requested topological
structure
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph. | [
"Assigns",
"data",
"to",
"this",
"object",
"and",
"builds",
"the",
"requested",
"topological",
"structure"
] | python | train |
shoebot/shoebot | shoebot/sbio/shell.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/sbio/shell.py#L152-L159 | def do_escape_nl(self, arg):
"""
Escape newlines in any responses
"""
if arg.lower() == 'off':
self.escape_nl = False
else:
self.escape_nl = True | [
"def",
"do_escape_nl",
"(",
"self",
",",
"arg",
")",
":",
"if",
"arg",
".",
"lower",
"(",
")",
"==",
"'off'",
":",
"self",
".",
"escape_nl",
"=",
"False",
"else",
":",
"self",
".",
"escape_nl",
"=",
"True"
] | Escape newlines in any responses | [
"Escape",
"newlines",
"in",
"any",
"responses"
] | python | valid |
EUDAT-B2SAFE/B2HANDLE | b2handle/util/argsutils.py | https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/util/argsutils.py#L24-L47 | def check_presence_of_mandatory_args(args, mandatory_args):
'''
Checks whether all mandatory arguments are passed.
This function aims at methods with many arguments
which are passed as kwargs so that the order
in which the are passed does not matter.
:args: The dictionary passed as args.
:mandatory_args: A list of keys that have to be
present in the dictionary.
:raise: :exc:`~ValueError`
:returns: True, if all mandatory args are passed. If not,
an exception is raised.
'''
missing_args = []
for name in mandatory_args:
if name not in args.keys():
missing_args.append(name)
if len(missing_args) > 0:
raise ValueError('Missing mandatory arguments: '+', '.join(missing_args))
else:
return True | [
"def",
"check_presence_of_mandatory_args",
"(",
"args",
",",
"mandatory_args",
")",
":",
"missing_args",
"=",
"[",
"]",
"for",
"name",
"in",
"mandatory_args",
":",
"if",
"name",
"not",
"in",
"args",
".",
"keys",
"(",
")",
":",
"missing_args",
".",
"append",
"(",
"name",
")",
"if",
"len",
"(",
"missing_args",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'Missing mandatory arguments: '",
"+",
"', '",
".",
"join",
"(",
"missing_args",
")",
")",
"else",
":",
"return",
"True"
] | Checks whether all mandatory arguments are passed.
This function aims at methods with many arguments
which are passed as kwargs so that the order
in which the are passed does not matter.
:args: The dictionary passed as args.
:mandatory_args: A list of keys that have to be
present in the dictionary.
:raise: :exc:`~ValueError`
:returns: True, if all mandatory args are passed. If not,
an exception is raised. | [
"Checks",
"whether",
"all",
"mandatory",
"arguments",
"are",
"passed",
"."
] | python | train |
bolt-project/bolt | bolt/spark/construct.py | https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/construct.py#L208-L222 | def _wrap(func, shape, context=None, axis=(0,), dtype=None, npartitions=None):
"""
Wrap an existing numpy constructor in a parallelized construction
"""
if isinstance(shape, int):
shape = (shape,)
key_shape, value_shape = get_kv_shape(shape, ConstructSpark._format_axes(axis, shape))
split = len(key_shape)
# make the keys
rdd = context.parallelize(list(product(*[arange(x) for x in key_shape])), npartitions)
# use a map to make the arrays in parallel
rdd = rdd.map(lambda x: (x, func(value_shape, dtype, order='C')))
return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype) | [
"def",
"_wrap",
"(",
"func",
",",
"shape",
",",
"context",
"=",
"None",
",",
"axis",
"=",
"(",
"0",
",",
")",
",",
"dtype",
"=",
"None",
",",
"npartitions",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"shape",
",",
"int",
")",
":",
"shape",
"=",
"(",
"shape",
",",
")",
"key_shape",
",",
"value_shape",
"=",
"get_kv_shape",
"(",
"shape",
",",
"ConstructSpark",
".",
"_format_axes",
"(",
"axis",
",",
"shape",
")",
")",
"split",
"=",
"len",
"(",
"key_shape",
")",
"# make the keys",
"rdd",
"=",
"context",
".",
"parallelize",
"(",
"list",
"(",
"product",
"(",
"*",
"[",
"arange",
"(",
"x",
")",
"for",
"x",
"in",
"key_shape",
"]",
")",
")",
",",
"npartitions",
")",
"# use a map to make the arrays in parallel",
"rdd",
"=",
"rdd",
".",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
",",
"func",
"(",
"value_shape",
",",
"dtype",
",",
"order",
"=",
"'C'",
")",
")",
")",
"return",
"BoltArraySpark",
"(",
"rdd",
",",
"shape",
"=",
"shape",
",",
"split",
"=",
"split",
",",
"dtype",
"=",
"dtype",
")"
] | Wrap an existing numpy constructor in a parallelized construction | [
"Wrap",
"an",
"existing",
"numpy",
"constructor",
"in",
"a",
"parallelized",
"construction"
] | python | test |
fogleman/pg | pg/util.py | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L68-L78 | def normal_from_points(a, b, c):
'''Computes a normal vector given three points.
'''
x1, y1, z1 = a
x2, y2, z2 = b
x3, y3, z3 = c
ab = (x2 - x1, y2 - y1, z2 - z1)
ac = (x3 - x1, y3 - y1, z3 - z1)
x, y, z = cross(ab, ac)
d = (x * x + y * y + z * z) ** 0.5
return (x / d, y / d, z / d) | [
"def",
"normal_from_points",
"(",
"a",
",",
"b",
",",
"c",
")",
":",
"x1",
",",
"y1",
",",
"z1",
"=",
"a",
"x2",
",",
"y2",
",",
"z2",
"=",
"b",
"x3",
",",
"y3",
",",
"z3",
"=",
"c",
"ab",
"=",
"(",
"x2",
"-",
"x1",
",",
"y2",
"-",
"y1",
",",
"z2",
"-",
"z1",
")",
"ac",
"=",
"(",
"x3",
"-",
"x1",
",",
"y3",
"-",
"y1",
",",
"z3",
"-",
"z1",
")",
"x",
",",
"y",
",",
"z",
"=",
"cross",
"(",
"ab",
",",
"ac",
")",
"d",
"=",
"(",
"x",
"*",
"x",
"+",
"y",
"*",
"y",
"+",
"z",
"*",
"z",
")",
"**",
"0.5",
"return",
"(",
"x",
"/",
"d",
",",
"y",
"/",
"d",
",",
"z",
"/",
"d",
")"
] | Computes a normal vector given three points. | [
"Computes",
"a",
"normal",
"vector",
"given",
"three",
"points",
"."
] | python | train |
mcocdawc/chemcoord | src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py | https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py#L189-L337 | def get_construction_table(self, fragment_list=None,
use_lookup=None,
perform_checks=True):
"""Create a construction table for a Zmatrix.
A construction table is basically a Zmatrix without the values
for the bond lengths, angles and dihedrals.
It contains the whole information about which reference atoms
are used by each atom in the Zmatrix.
The absolute references in cartesian space are one of the following
magic strings::
['origin', 'e_x', 'e_y', 'e_z']
This method creates a so called "chemical" construction table,
which makes use of the connectivity table in this molecule.
Args:
fragment_list (sequence): There are four possibilities to specify
the sequence of fragments:
1. A list of tuples is given. Each tuple contains the fragment
with its corresponding construction table in the form of::
[(frag1, c_table1), (frag2, c_table2)...]
If the construction table of a fragment is not complete,
the rest of each fragment's
construction table is calculated automatically.
2. It is possible to omit the construction tables for some
or all fragments as in the following example::
[(frag1, c_table1), frag2, (frag3, c_table3)...]
3. If ``self`` contains more atoms than the union over all
fragments, the rest of the molecule without the fragments
is automatically prepended using
:meth:`~Cartesian.get_without`::
self.get_without(fragments) + fragment_list
4. If fragment_list is ``None`` then fragmentation, etc.
is done automatically. The fragments are then sorted by
their number of atoms, in order to use the largest fragment
as reference for the other ones.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
perform_checks (bool): The checks for invalid references are
performed using :meth:`~chemcoord.Cartesian.correct_dihedral`
and :meth:`~chemcoord.Cartesian.correct_absolute_refs`.
Returns:
:class:`pandas.DataFrame`: Construction table
"""
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
if fragment_list is None:
self.get_bonds(use_lookup=use_lookup)
self._give_val_sorted_bond_dict(use_lookup=use_lookup)
fragments = sorted(self.fragmentate(use_lookup=use_lookup),
key=len, reverse=True)
# During function execution the bonding situation does not change,
# so the lookup may be used now.
use_lookup = True
else:
fragments = fragment_list
def prepend_missing_parts_of_molecule(fragment_list):
for fragment in fragment_list:
if pd.api.types.is_list_like(fragment):
try:
full_index |= fragment[0].index
except NameError:
full_index = fragment[0].index
else:
try:
full_index |= fragment.index
except NameError:
full_index = fragment.index
if not self.index.difference(full_index).empty:
missing_part = self.get_without(self.loc[full_index],
use_lookup=use_lookup)
fragment_list = missing_part + fragment_list
return fragment_list
fragments = prepend_missing_parts_of_molecule(fragments)
if isinstance(fragments[0], tuple):
fragment, references = fragments[0]
full_table = fragment._get_frag_constr_table(
use_lookup=use_lookup, predefined_table=references)
else:
fragment = fragments[0]
full_table = fragment._get_frag_constr_table(use_lookup=use_lookup)
for fragment in fragments[1:]:
finished_part = self.loc[full_table.index]
if pd.api.types.is_list_like(fragment):
fragment, references = fragment
if len(references) < min(3, len(fragment)):
raise ValueError('If you specify references for a '
'fragment, it has to consist of at least'
'min(3, len(fragment)) rows.')
constr_table = fragment._get_frag_constr_table(
predefined_table=references, use_lookup=use_lookup)
else:
i, b = fragment.get_shortest_distance(finished_part)[:2]
constr_table = fragment._get_frag_constr_table(
start_atom=i, use_lookup=use_lookup)
if len(full_table) == 1:
a, d = 'e_z', 'e_x'
elif len(full_table) == 2:
if b == full_table.index[0]:
a = full_table.index[1]
else:
a = full_table.index[0]
d = 'e_x'
else:
if b in full_table.index[:2]:
if b == full_table.index[0]:
a = full_table.index[2]
d = full_table.index[1]
else:
a = full_table.loc[b, 'b']
d = full_table.index[2]
else:
a, d = full_table.loc[b, ['b', 'a']]
if len(constr_table) >= 1:
constr_table.iloc[0, :] = b, a, d
if len(constr_table) >= 2:
constr_table.iloc[1, [1, 2]] = b, a
if len(constr_table) >= 3:
constr_table.iloc[2, 2] = b
full_table = pd.concat([full_table, constr_table])
c_table = full_table
if perform_checks:
c_table = self.correct_dihedral(c_table)
c_table = self.correct_dihedral(c_table, use_lookup=use_lookup)
c_table = self.correct_absolute_refs(c_table)
return c_table | [
"def",
"get_construction_table",
"(",
"self",
",",
"fragment_list",
"=",
"None",
",",
"use_lookup",
"=",
"None",
",",
"perform_checks",
"=",
"True",
")",
":",
"if",
"use_lookup",
"is",
"None",
":",
"use_lookup",
"=",
"settings",
"[",
"'defaults'",
"]",
"[",
"'use_lookup'",
"]",
"if",
"fragment_list",
"is",
"None",
":",
"self",
".",
"get_bonds",
"(",
"use_lookup",
"=",
"use_lookup",
")",
"self",
".",
"_give_val_sorted_bond_dict",
"(",
"use_lookup",
"=",
"use_lookup",
")",
"fragments",
"=",
"sorted",
"(",
"self",
".",
"fragmentate",
"(",
"use_lookup",
"=",
"use_lookup",
")",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"# During function execution the bonding situation does not change,",
"# so the lookup may be used now.",
"use_lookup",
"=",
"True",
"else",
":",
"fragments",
"=",
"fragment_list",
"def",
"prepend_missing_parts_of_molecule",
"(",
"fragment_list",
")",
":",
"for",
"fragment",
"in",
"fragment_list",
":",
"if",
"pd",
".",
"api",
".",
"types",
".",
"is_list_like",
"(",
"fragment",
")",
":",
"try",
":",
"full_index",
"|=",
"fragment",
"[",
"0",
"]",
".",
"index",
"except",
"NameError",
":",
"full_index",
"=",
"fragment",
"[",
"0",
"]",
".",
"index",
"else",
":",
"try",
":",
"full_index",
"|=",
"fragment",
".",
"index",
"except",
"NameError",
":",
"full_index",
"=",
"fragment",
".",
"index",
"if",
"not",
"self",
".",
"index",
".",
"difference",
"(",
"full_index",
")",
".",
"empty",
":",
"missing_part",
"=",
"self",
".",
"get_without",
"(",
"self",
".",
"loc",
"[",
"full_index",
"]",
",",
"use_lookup",
"=",
"use_lookup",
")",
"fragment_list",
"=",
"missing_part",
"+",
"fragment_list",
"return",
"fragment_list",
"fragments",
"=",
"prepend_missing_parts_of_molecule",
"(",
"fragments",
")",
"if",
"isinstance",
"(",
"fragments",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"fragment",
",",
"references",
"=",
"fragments",
"[",
"0",
"]",
"full_table",
"=",
"fragment",
".",
"_get_frag_constr_table",
"(",
"use_lookup",
"=",
"use_lookup",
",",
"predefined_table",
"=",
"references",
")",
"else",
":",
"fragment",
"=",
"fragments",
"[",
"0",
"]",
"full_table",
"=",
"fragment",
".",
"_get_frag_constr_table",
"(",
"use_lookup",
"=",
"use_lookup",
")",
"for",
"fragment",
"in",
"fragments",
"[",
"1",
":",
"]",
":",
"finished_part",
"=",
"self",
".",
"loc",
"[",
"full_table",
".",
"index",
"]",
"if",
"pd",
".",
"api",
".",
"types",
".",
"is_list_like",
"(",
"fragment",
")",
":",
"fragment",
",",
"references",
"=",
"fragment",
"if",
"len",
"(",
"references",
")",
"<",
"min",
"(",
"3",
",",
"len",
"(",
"fragment",
")",
")",
":",
"raise",
"ValueError",
"(",
"'If you specify references for a '",
"'fragment, it has to consist of at least'",
"'min(3, len(fragment)) rows.'",
")",
"constr_table",
"=",
"fragment",
".",
"_get_frag_constr_table",
"(",
"predefined_table",
"=",
"references",
",",
"use_lookup",
"=",
"use_lookup",
")",
"else",
":",
"i",
",",
"b",
"=",
"fragment",
".",
"get_shortest_distance",
"(",
"finished_part",
")",
"[",
":",
"2",
"]",
"constr_table",
"=",
"fragment",
".",
"_get_frag_constr_table",
"(",
"start_atom",
"=",
"i",
",",
"use_lookup",
"=",
"use_lookup",
")",
"if",
"len",
"(",
"full_table",
")",
"==",
"1",
":",
"a",
",",
"d",
"=",
"'e_z'",
",",
"'e_x'",
"elif",
"len",
"(",
"full_table",
")",
"==",
"2",
":",
"if",
"b",
"==",
"full_table",
".",
"index",
"[",
"0",
"]",
":",
"a",
"=",
"full_table",
".",
"index",
"[",
"1",
"]",
"else",
":",
"a",
"=",
"full_table",
".",
"index",
"[",
"0",
"]",
"d",
"=",
"'e_x'",
"else",
":",
"if",
"b",
"in",
"full_table",
".",
"index",
"[",
":",
"2",
"]",
":",
"if",
"b",
"==",
"full_table",
".",
"index",
"[",
"0",
"]",
":",
"a",
"=",
"full_table",
".",
"index",
"[",
"2",
"]",
"d",
"=",
"full_table",
".",
"index",
"[",
"1",
"]",
"else",
":",
"a",
"=",
"full_table",
".",
"loc",
"[",
"b",
",",
"'b'",
"]",
"d",
"=",
"full_table",
".",
"index",
"[",
"2",
"]",
"else",
":",
"a",
",",
"d",
"=",
"full_table",
".",
"loc",
"[",
"b",
",",
"[",
"'b'",
",",
"'a'",
"]",
"]",
"if",
"len",
"(",
"constr_table",
")",
">=",
"1",
":",
"constr_table",
".",
"iloc",
"[",
"0",
",",
":",
"]",
"=",
"b",
",",
"a",
",",
"d",
"if",
"len",
"(",
"constr_table",
")",
">=",
"2",
":",
"constr_table",
".",
"iloc",
"[",
"1",
",",
"[",
"1",
",",
"2",
"]",
"]",
"=",
"b",
",",
"a",
"if",
"len",
"(",
"constr_table",
")",
">=",
"3",
":",
"constr_table",
".",
"iloc",
"[",
"2",
",",
"2",
"]",
"=",
"b",
"full_table",
"=",
"pd",
".",
"concat",
"(",
"[",
"full_table",
",",
"constr_table",
"]",
")",
"c_table",
"=",
"full_table",
"if",
"perform_checks",
":",
"c_table",
"=",
"self",
".",
"correct_dihedral",
"(",
"c_table",
")",
"c_table",
"=",
"self",
".",
"correct_dihedral",
"(",
"c_table",
",",
"use_lookup",
"=",
"use_lookup",
")",
"c_table",
"=",
"self",
".",
"correct_absolute_refs",
"(",
"c_table",
")",
"return",
"c_table"
] | Create a construction table for a Zmatrix.
A construction table is basically a Zmatrix without the values
for the bond lengths, angles and dihedrals.
It contains the whole information about which reference atoms
are used by each atom in the Zmatrix.
The absolute references in cartesian space are one of the following
magic strings::
['origin', 'e_x', 'e_y', 'e_z']
This method creates a so called "chemical" construction table,
which makes use of the connectivity table in this molecule.
Args:
fragment_list (sequence): There are four possibilities to specify
the sequence of fragments:
1. A list of tuples is given. Each tuple contains the fragment
with its corresponding construction table in the form of::
[(frag1, c_table1), (frag2, c_table2)...]
If the construction table of a fragment is not complete,
the rest of each fragment's
construction table is calculated automatically.
2. It is possible to omit the construction tables for some
or all fragments as in the following example::
[(frag1, c_table1), frag2, (frag3, c_table3)...]
3. If ``self`` contains more atoms than the union over all
fragments, the rest of the molecule without the fragments
is automatically prepended using
:meth:`~Cartesian.get_without`::
self.get_without(fragments) + fragment_list
4. If fragment_list is ``None`` then fragmentation, etc.
is done automatically. The fragments are then sorted by
their number of atoms, in order to use the largest fragment
as reference for the other ones.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
perform_checks (bool): The checks for invalid references are
performed using :meth:`~chemcoord.Cartesian.correct_dihedral`
and :meth:`~chemcoord.Cartesian.correct_absolute_refs`.
Returns:
:class:`pandas.DataFrame`: Construction table | [
"Create",
"a",
"construction",
"table",
"for",
"a",
"Zmatrix",
"."
] | python | train |
assemblerflow/flowcraft | flowcraft/templates/fastqc_report.py | https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L324-L359 | def get_sample_trim(p1_data, p2_data):
"""Get the optimal read trim range from data files of paired FastQ reads.
Given the FastQC data report files for paired-end FastQ reads, this
function will assess the optimal trim range for the 3' and 5' ends of
the paired-end reads. This assessment will be based on the *'Per sequence
GC content'*.
Parameters
----------
p1_data: str
Path to FastQC data report file from pair 1
p2_data: str
Path to FastQC data report file from pair 2
Returns
-------
optimal_5trim: int
Optimal trim index for the 5' end of the reads
optima_3trim: int
Optimal trim index for the 3' end of the reads
See Also
--------
trim_range
"""
sample_ranges = [trim_range(x) for x in [p1_data, p2_data]]
# Get the optimal trim position for 5' end
optimal_5trim = max([x[0] for x in sample_ranges])
# Get optimal trim position for 3' end
optimal_3trim = min([x[1] for x in sample_ranges])
return optimal_5trim, optimal_3trim | [
"def",
"get_sample_trim",
"(",
"p1_data",
",",
"p2_data",
")",
":",
"sample_ranges",
"=",
"[",
"trim_range",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"p1_data",
",",
"p2_data",
"]",
"]",
"# Get the optimal trim position for 5' end",
"optimal_5trim",
"=",
"max",
"(",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"sample_ranges",
"]",
")",
"# Get optimal trim position for 3' end",
"optimal_3trim",
"=",
"min",
"(",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"sample_ranges",
"]",
")",
"return",
"optimal_5trim",
",",
"optimal_3trim"
] | Get the optimal read trim range from data files of paired FastQ reads.
Given the FastQC data report files for paired-end FastQ reads, this
function will assess the optimal trim range for the 3' and 5' ends of
the paired-end reads. This assessment will be based on the *'Per sequence
GC content'*.
Parameters
----------
p1_data: str
Path to FastQC data report file from pair 1
p2_data: str
Path to FastQC data report file from pair 2
Returns
-------
optimal_5trim: int
Optimal trim index for the 5' end of the reads
optima_3trim: int
Optimal trim index for the 3' end of the reads
See Also
--------
trim_range | [
"Get",
"the",
"optimal",
"read",
"trim",
"range",
"from",
"data",
"files",
"of",
"paired",
"FastQ",
"reads",
"."
] | python | test |
obulpathi/cdn-fastly-python | fastly/__init__.py | https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L684-L687 | def get_service(self, service_id):
"""Get a specific service by id."""
content = self._fetch("/service/%s" % service_id)
return FastlyService(self, content) | [
"def",
"get_service",
"(",
"self",
",",
"service_id",
")",
":",
"content",
"=",
"self",
".",
"_fetch",
"(",
"\"/service/%s\"",
"%",
"service_id",
")",
"return",
"FastlyService",
"(",
"self",
",",
"content",
")"
] | Get a specific service by id. | [
"Get",
"a",
"specific",
"service",
"by",
"id",
"."
] | python | train |
dwavesystems/dimod | dimod/binary_quadratic_model.py | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1218-L1298 | def relabel_variables(self, mapping, inplace=True):
"""Relabel variables of a binary quadratic model as specified by mapping.
Args:
mapping (dict):
Dict mapping current variable labels to new ones. If an incomplete mapping is
provided, unmapped variables retain their current labels.
inplace (bool, optional, default=True):
If True, the binary quadratic model is updated in-place; otherwise, a new binary
quadratic model is returned.
Returns:
:class:`.BinaryQuadraticModel`: A binary quadratic model
with the variables relabeled. If `inplace` is set to True, returns
itself.
Examples:
This example creates a binary quadratic model with two variables and relables one.
>>> import dimod
...
>>> model = dimod.BinaryQuadraticModel({0: 0., 1: 1.}, {(0, 1): -1}, 0.0, vartype=dimod.SPIN)
>>> model.relabel_variables({0: 'a'}) # doctest: +SKIP
BinaryQuadraticModel({1: 1.0, 'a': 0.0}, {('a', 1): -1}, 0.0, Vartype.SPIN)
This example creates a binary quadratic model with two variables and returns a new
model with relabled variables.
>>> import dimod
...
>>> model = dimod.BinaryQuadraticModel({0: 0., 1: 1.}, {(0, 1): -1}, 0.0, vartype=dimod.SPIN)
>>> new_model = model.relabel_variables({0: 'a', 1: 'b'}, inplace=False) # doctest: +SKIP
>>> new_model.quadratic # doctest: +SKIP
{('a', 'b'): -1}
"""
try:
old_labels = set(mapping)
new_labels = set(itervalues(mapping))
except TypeError:
raise ValueError("mapping targets must be hashable objects")
for v in new_labels:
if v in self.linear and v not in old_labels:
raise ValueError(('A variable cannot be relabeled "{}" without also relabeling '
"the existing variable of the same name").format(v))
if inplace:
shared = old_labels & new_labels
if shared:
old_to_intermediate, intermediate_to_new = resolve_label_conflict(mapping, old_labels, new_labels)
self.relabel_variables(old_to_intermediate, inplace=True)
self.relabel_variables(intermediate_to_new, inplace=True)
return self
linear = self.linear
quadratic = self.quadratic
adj = self.adj
# rebuild linear and adj with the new labels
for old in list(linear):
if old not in mapping:
continue
new = mapping[old]
# get the new interactions that need to be added
new_interactions = [(new, v, adj[old][v]) for v in adj[old]]
self.add_variable(new, linear[old])
self.add_interactions_from(new_interactions)
self.remove_variable(old)
return self
else:
return BinaryQuadraticModel({mapping.get(v, v): bias for v, bias in iteritems(self.linear)},
{(mapping.get(u, u), mapping.get(v, v)): bias
for (u, v), bias in iteritems(self.quadratic)},
self.offset, self.vartype) | [
"def",
"relabel_variables",
"(",
"self",
",",
"mapping",
",",
"inplace",
"=",
"True",
")",
":",
"try",
":",
"old_labels",
"=",
"set",
"(",
"mapping",
")",
"new_labels",
"=",
"set",
"(",
"itervalues",
"(",
"mapping",
")",
")",
"except",
"TypeError",
":",
"raise",
"ValueError",
"(",
"\"mapping targets must be hashable objects\"",
")",
"for",
"v",
"in",
"new_labels",
":",
"if",
"v",
"in",
"self",
".",
"linear",
"and",
"v",
"not",
"in",
"old_labels",
":",
"raise",
"ValueError",
"(",
"(",
"'A variable cannot be relabeled \"{}\" without also relabeling '",
"\"the existing variable of the same name\"",
")",
".",
"format",
"(",
"v",
")",
")",
"if",
"inplace",
":",
"shared",
"=",
"old_labels",
"&",
"new_labels",
"if",
"shared",
":",
"old_to_intermediate",
",",
"intermediate_to_new",
"=",
"resolve_label_conflict",
"(",
"mapping",
",",
"old_labels",
",",
"new_labels",
")",
"self",
".",
"relabel_variables",
"(",
"old_to_intermediate",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"relabel_variables",
"(",
"intermediate_to_new",
",",
"inplace",
"=",
"True",
")",
"return",
"self",
"linear",
"=",
"self",
".",
"linear",
"quadratic",
"=",
"self",
".",
"quadratic",
"adj",
"=",
"self",
".",
"adj",
"# rebuild linear and adj with the new labels",
"for",
"old",
"in",
"list",
"(",
"linear",
")",
":",
"if",
"old",
"not",
"in",
"mapping",
":",
"continue",
"new",
"=",
"mapping",
"[",
"old",
"]",
"# get the new interactions that need to be added",
"new_interactions",
"=",
"[",
"(",
"new",
",",
"v",
",",
"adj",
"[",
"old",
"]",
"[",
"v",
"]",
")",
"for",
"v",
"in",
"adj",
"[",
"old",
"]",
"]",
"self",
".",
"add_variable",
"(",
"new",
",",
"linear",
"[",
"old",
"]",
")",
"self",
".",
"add_interactions_from",
"(",
"new_interactions",
")",
"self",
".",
"remove_variable",
"(",
"old",
")",
"return",
"self",
"else",
":",
"return",
"BinaryQuadraticModel",
"(",
"{",
"mapping",
".",
"get",
"(",
"v",
",",
"v",
")",
":",
"bias",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"self",
".",
"linear",
")",
"}",
",",
"{",
"(",
"mapping",
".",
"get",
"(",
"u",
",",
"u",
")",
",",
"mapping",
".",
"get",
"(",
"v",
",",
"v",
")",
")",
":",
"bias",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"iteritems",
"(",
"self",
".",
"quadratic",
")",
"}",
",",
"self",
".",
"offset",
",",
"self",
".",
"vartype",
")"
] | Relabel variables of a binary quadratic model as specified by mapping.
Args:
mapping (dict):
Dict mapping current variable labels to new ones. If an incomplete mapping is
provided, unmapped variables retain their current labels.
inplace (bool, optional, default=True):
If True, the binary quadratic model is updated in-place; otherwise, a new binary
quadratic model is returned.
Returns:
:class:`.BinaryQuadraticModel`: A binary quadratic model
with the variables relabeled. If `inplace` is set to True, returns
itself.
Examples:
This example creates a binary quadratic model with two variables and relables one.
>>> import dimod
...
>>> model = dimod.BinaryQuadraticModel({0: 0., 1: 1.}, {(0, 1): -1}, 0.0, vartype=dimod.SPIN)
>>> model.relabel_variables({0: 'a'}) # doctest: +SKIP
BinaryQuadraticModel({1: 1.0, 'a': 0.0}, {('a', 1): -1}, 0.0, Vartype.SPIN)
This example creates a binary quadratic model with two variables and returns a new
model with relabled variables.
>>> import dimod
...
>>> model = dimod.BinaryQuadraticModel({0: 0., 1: 1.}, {(0, 1): -1}, 0.0, vartype=dimod.SPIN)
>>> new_model = model.relabel_variables({0: 'a', 1: 'b'}, inplace=False) # doctest: +SKIP
>>> new_model.quadratic # doctest: +SKIP
{('a', 'b'): -1} | [
"Relabel",
"variables",
"of",
"a",
"binary",
"quadratic",
"model",
"as",
"specified",
"by",
"mapping",
"."
] | python | train |
ThePlasmaRailgun/py-rolldice | rolldice/rolldice.py | https://github.com/ThePlasmaRailgun/py-rolldice/blob/dc46d1d3e765592e76c52fd812b4f3b7425db552/rolldice/rolldice.py#L199-L221 | def _eval_call(self, node):
"""
Evaluate a function call
:param node: Node to eval
:return: Result of node
"""
try:
func = self.functions[node.func.id]
except KeyError:
raise NameError(node.func.id)
value = func(
*(self._eval(a) for a in node.args),
**dict(self._eval(k) for k in node.keywords)
)
if value is True:
return 1
elif value is False:
return 0
else:
return value | [
"def",
"_eval_call",
"(",
"self",
",",
"node",
")",
":",
"try",
":",
"func",
"=",
"self",
".",
"functions",
"[",
"node",
".",
"func",
".",
"id",
"]",
"except",
"KeyError",
":",
"raise",
"NameError",
"(",
"node",
".",
"func",
".",
"id",
")",
"value",
"=",
"func",
"(",
"*",
"(",
"self",
".",
"_eval",
"(",
"a",
")",
"for",
"a",
"in",
"node",
".",
"args",
")",
",",
"*",
"*",
"dict",
"(",
"self",
".",
"_eval",
"(",
"k",
")",
"for",
"k",
"in",
"node",
".",
"keywords",
")",
")",
"if",
"value",
"is",
"True",
":",
"return",
"1",
"elif",
"value",
"is",
"False",
":",
"return",
"0",
"else",
":",
"return",
"value"
] | Evaluate a function call
:param node: Node to eval
:return: Result of node | [
"Evaluate",
"a",
"function",
"call"
] | python | train |
fastai/fastai | fastai/torch_core.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L365-L371 | def try_int(o:Any)->Any:
"Try to convert `o` to int, default to `o` if not possible."
# NB: single-item rank-1 array/tensor can be converted to int, but we don't want to do this
if isinstance(o, (np.ndarray,Tensor)): return o if o.ndim else int(o)
if isinstance(o, collections.Sized) or getattr(o,'__array_interface__',False): return o
try: return int(o)
except: return o | [
"def",
"try_int",
"(",
"o",
":",
"Any",
")",
"->",
"Any",
":",
"# NB: single-item rank-1 array/tensor can be converted to int, but we don't want to do this",
"if",
"isinstance",
"(",
"o",
",",
"(",
"np",
".",
"ndarray",
",",
"Tensor",
")",
")",
":",
"return",
"o",
"if",
"o",
".",
"ndim",
"else",
"int",
"(",
"o",
")",
"if",
"isinstance",
"(",
"o",
",",
"collections",
".",
"Sized",
")",
"or",
"getattr",
"(",
"o",
",",
"'__array_interface__'",
",",
"False",
")",
":",
"return",
"o",
"try",
":",
"return",
"int",
"(",
"o",
")",
"except",
":",
"return",
"o"
] | Try to convert `o` to int, default to `o` if not possible. | [
"Try",
"to",
"convert",
"o",
"to",
"int",
"default",
"to",
"o",
"if",
"not",
"possible",
"."
] | python | train |
edx/edx-enterprise | integrated_channels/xapi/management/commands/send_course_enrollments.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/xapi/management/commands/send_course_enrollments.py#L131-L147 | def get_course_enrollments(self, enterprise_customer, days):
"""
Get course enrollments for all the learners of given enterprise customer.
Arguments:
enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners
of this enterprise customer.
days (int): Include course enrollment of this number of days.
Returns:
(list): A list of CourseEnrollment objects.
"""
return CourseEnrollment.objects.filter(
created__gt=datetime.datetime.now() - datetime.timedelta(days=days)
).filter(
user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True)
) | [
"def",
"get_course_enrollments",
"(",
"self",
",",
"enterprise_customer",
",",
"days",
")",
":",
"return",
"CourseEnrollment",
".",
"objects",
".",
"filter",
"(",
"created__gt",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"days",
")",
")",
".",
"filter",
"(",
"user_id__in",
"=",
"enterprise_customer",
".",
"enterprise_customer_users",
".",
"values_list",
"(",
"'user_id'",
",",
"flat",
"=",
"True",
")",
")"
] | Get course enrollments for all the learners of given enterprise customer.
Arguments:
enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners
of this enterprise customer.
days (int): Include course enrollment of this number of days.
Returns:
(list): A list of CourseEnrollment objects. | [
"Get",
"course",
"enrollments",
"for",
"all",
"the",
"learners",
"of",
"given",
"enterprise",
"customer",
"."
] | python | valid |
faxir/faxir-python | faxir/api/numbers_api.py | https://github.com/faxir/faxir-python/blob/75ed2ea487a6be537342baea1077a02b0c8e70c1/faxir/api/numbers_api.py#L36-L56 | def get_number(self, number, **kwargs): # noqa: E501
"""Get number information # noqa: E501
Get info of a single number # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_number(number, async=True)
>>> result = thread.get()
:param async bool
:param str number: (required)
:return: Number
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_number_with_http_info(number, **kwargs) # noqa: E501
else:
(data) = self.get_number_with_http_info(number, **kwargs) # noqa: E501
return data | [
"def",
"get_number",
"(",
"self",
",",
"number",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"self",
".",
"get_number_with_http_info",
"(",
"number",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_number_with_http_info",
"(",
"number",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Get number information # noqa: E501
Get info of a single number # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_number(number, async=True)
>>> result = thread.get()
:param async bool
:param str number: (required)
:return: Number
If the method is called asynchronously,
returns the request thread. | [
"Get",
"number",
"information",
"#",
"noqa",
":",
"E501"
] | python | train |
cenobites/flask-jsonrpc | flask_jsonrpc/proxy.py | https://github.com/cenobites/flask-jsonrpc/blob/c7f8e049adda8cf4c5a62aea345eb42697f10eff/flask_jsonrpc/proxy.py#L59-L70 | def send_payload(self, params):
"""Performs the actual sending action and returns the result
"""
data = json.dumps({
'jsonrpc': self.version,
'method': self.service_name,
'params': params,
'id': text_type(uuid.uuid4())
})
data_binary = data.encode('utf-8')
url_request = Request(self.service_url, data_binary, headers=self.headers)
return urlopen(url_request).read() | [
"def",
"send_payload",
"(",
"self",
",",
"params",
")",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'jsonrpc'",
":",
"self",
".",
"version",
",",
"'method'",
":",
"self",
".",
"service_name",
",",
"'params'",
":",
"params",
",",
"'id'",
":",
"text_type",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"}",
")",
"data_binary",
"=",
"data",
".",
"encode",
"(",
"'utf-8'",
")",
"url_request",
"=",
"Request",
"(",
"self",
".",
"service_url",
",",
"data_binary",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"return",
"urlopen",
"(",
"url_request",
")",
".",
"read",
"(",
")"
] | Performs the actual sending action and returns the result | [
"Performs",
"the",
"actual",
"sending",
"action",
"and",
"returns",
"the",
"result"
] | python | valid |
harmsm/PyCmdMessenger | PyCmdMessenger/PyCmdMessenger.py | https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L175-L289 | def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time | [
"def",
"receive",
"(",
"self",
",",
"arg_formats",
"=",
"None",
")",
":",
"# Read serial input until a command separator or empty character is",
"# reached ",
"msg",
"=",
"[",
"[",
"]",
"]",
"raw_msg",
"=",
"[",
"]",
"escaped",
"=",
"False",
"command_sep_found",
"=",
"False",
"while",
"True",
":",
"tmp",
"=",
"self",
".",
"board",
".",
"read",
"(",
")",
"raw_msg",
".",
"append",
"(",
"tmp",
")",
"if",
"escaped",
":",
"# Either drop the escape character or, if this wasn't really",
"# an escape, keep previous escape character and new character",
"if",
"tmp",
"in",
"self",
".",
"_escaped_characters",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"escaped",
"=",
"False",
"else",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"self",
".",
"_byte_escape_sep",
")",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"escaped",
"=",
"False",
"else",
":",
"# look for escape character",
"if",
"tmp",
"==",
"self",
".",
"_byte_escape_sep",
":",
"escaped",
"=",
"True",
"# or field separator",
"elif",
"tmp",
"==",
"self",
".",
"_byte_field_sep",
":",
"msg",
".",
"append",
"(",
"[",
"]",
")",
"# or command separator",
"elif",
"tmp",
"==",
"self",
".",
"_byte_command_sep",
":",
"command_sep_found",
"=",
"True",
"break",
"# or any empty characater ",
"elif",
"tmp",
"==",
"b''",
":",
"break",
"# okay, must be something",
"else",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"# No message received given timeouts",
"if",
"len",
"(",
"msg",
")",
"==",
"1",
"and",
"len",
"(",
"msg",
"[",
"0",
"]",
")",
"==",
"0",
":",
"return",
"None",
"# Make sure the message terminated properly",
"if",
"not",
"command_sep_found",
":",
"# empty message (likely from line endings being included) ",
"joined_raw",
"=",
"b''",
".",
"join",
"(",
"raw_msg",
")",
"if",
"joined_raw",
".",
"strip",
"(",
")",
"==",
"b''",
":",
"return",
"None",
"err",
"=",
"\"Incomplete message ({})\"",
".",
"format",
"(",
"joined_raw",
".",
"decode",
"(",
")",
")",
"raise",
"EOFError",
"(",
"err",
")",
"# Turn message into fields",
"fields",
"=",
"[",
"b''",
".",
"join",
"(",
"m",
")",
"for",
"m",
"in",
"msg",
"]",
"# Get the command name.",
"cmd",
"=",
"fields",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"decode",
"(",
")",
"try",
":",
"cmd_name",
"=",
"self",
".",
"_int_to_cmd_name",
"[",
"int",
"(",
"cmd",
")",
"]",
"except",
"(",
"ValueError",
",",
"IndexError",
")",
":",
"if",
"self",
".",
"give_warnings",
":",
"cmd_name",
"=",
"\"unknown\"",
"w",
"=",
"\"Recieved unrecognized command ({}).\"",
".",
"format",
"(",
"cmd",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"# Figure out what formats to use for each argument. ",
"arg_format_list",
"=",
"[",
"]",
"if",
"arg_formats",
"!=",
"None",
":",
"# The user specified formats",
"arg_format_list",
"=",
"list",
"(",
"arg_formats",
")",
"else",
":",
"try",
":",
"# See if class was initialized with a format for arguments to this",
"# command",
"arg_format_list",
"=",
"self",
".",
"_cmd_name_to_format",
"[",
"cmd_name",
"]",
"except",
"KeyError",
":",
"# if not, guess for all arguments",
"arg_format_list",
"=",
"[",
"\"g\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
")",
"]",
"# Deal with \"*\" format ",
"arg_format_list",
"=",
"self",
".",
"_treat_star_format",
"(",
"arg_format_list",
",",
"fields",
"[",
"1",
":",
"]",
")",
"if",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
">",
"0",
":",
"if",
"len",
"(",
"arg_format_list",
")",
"!=",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
":",
"err",
"=",
"\"Number of argument formats must match the number of recieved arguments.\"",
"raise",
"ValueError",
"(",
"err",
")",
"received",
"=",
"[",
"]",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fields",
"[",
"1",
":",
"]",
")",
":",
"received",
".",
"append",
"(",
"self",
".",
"_recv_methods",
"[",
"arg_format_list",
"[",
"i",
"]",
"]",
"(",
"f",
")",
")",
"# Record the time the message arrived",
"message_time",
"=",
"time",
".",
"time",
"(",
")",
"return",
"cmd_name",
",",
"received",
",",
"message_time"
] | Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization. | [
"Recieve",
"commands",
"coming",
"off",
"the",
"serial",
"port",
"."
] | python | train |
joferkington/mplstereonet | mplstereonet/stereonet_math.py | https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L356-L380 | def mean_vector(lons, lats):
"""
Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
"""
xyz = sph2cart(lons, lats)
xyz = np.vstack(xyz).T
mean_vec = xyz.mean(axis=0)
r_value = np.linalg.norm(mean_vec)
mean_vec = cart2sph(*mean_vec)
return mean_vec, r_value | [
"def",
"mean_vector",
"(",
"lons",
",",
"lats",
")",
":",
"xyz",
"=",
"sph2cart",
"(",
"lons",
",",
"lats",
")",
"xyz",
"=",
"np",
".",
"vstack",
"(",
"xyz",
")",
".",
"T",
"mean_vec",
"=",
"xyz",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"r_value",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"mean_vec",
")",
"mean_vec",
"=",
"cart2sph",
"(",
"*",
"mean_vec",
")",
"return",
"mean_vec",
",",
"r_value"
] | Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data. | [
"Returns",
"the",
"resultant",
"vector",
"from",
"a",
"series",
"of",
"longitudes",
"and",
"latitudes"
] | python | train |
cyrus-/cypy | cypy/__init__.py | https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/__init__.py#L583-L600 | def is_int_like(value):
"""Returns whether the value can be used as a standard integer.
>>> is_int_like(4)
True
>>> is_int_like(4.0)
False
>>> is_int_like("4")
False
>>> is_int_like("abc")
False
"""
try:
if isinstance(value, int): return True
return int(value) == value and str(value).isdigit()
except:
return False | [
"def",
"is_int_like",
"(",
"value",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"True",
"return",
"int",
"(",
"value",
")",
"==",
"value",
"and",
"str",
"(",
"value",
")",
".",
"isdigit",
"(",
")",
"except",
":",
"return",
"False"
] | Returns whether the value can be used as a standard integer.
>>> is_int_like(4)
True
>>> is_int_like(4.0)
False
>>> is_int_like("4")
False
>>> is_int_like("abc")
False | [
"Returns",
"whether",
"the",
"value",
"can",
"be",
"used",
"as",
"a",
"standard",
"integer",
"."
] | python | train |
cloudendpoints/endpoints-python | endpoints/api_config.py | https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/api_config.py#L1626-L1644 | def __parameter_default(self, final_subfield):
"""Returns default value of final subfield if it has one.
If this subfield comes from a field list returned from __field_to_subfields,
none of the fields in the subfield list can have a default except the final
one since they all must be message fields.
Args:
final_subfield: A simple field from the end of a subfield list.
Returns:
The default value of the subfield, if any exists, with the exception of an
enum field, which will have its value cast to a string.
"""
if final_subfield.default:
if isinstance(final_subfield, messages.EnumField):
return final_subfield.default.name
else:
return final_subfield.default | [
"def",
"__parameter_default",
"(",
"self",
",",
"final_subfield",
")",
":",
"if",
"final_subfield",
".",
"default",
":",
"if",
"isinstance",
"(",
"final_subfield",
",",
"messages",
".",
"EnumField",
")",
":",
"return",
"final_subfield",
".",
"default",
".",
"name",
"else",
":",
"return",
"final_subfield",
".",
"default"
] | Returns default value of final subfield if it has one.
If this subfield comes from a field list returned from __field_to_subfields,
none of the fields in the subfield list can have a default except the final
one since they all must be message fields.
Args:
final_subfield: A simple field from the end of a subfield list.
Returns:
The default value of the subfield, if any exists, with the exception of an
enum field, which will have its value cast to a string. | [
"Returns",
"default",
"value",
"of",
"final",
"subfield",
"if",
"it",
"has",
"one",
"."
] | python | train |
meyersj/geotweet | geotweet/mapreduce/poi_nearby_tweets.py | https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/mapreduce/poi_nearby_tweets.py#L110-L142 | def reducer_metro(self, metro, values):
"""
Output tags of POI locations nearby tweet locations
Values will be sorted coming into reducer.
First element in each value tuple will be either 1 (osm POI) or 2 (geotweet).
Build a spatial index with POI records.
For each tweet lookup nearby POI, and emit tag values for predefined tags.
"""
lookup = CachedLookup(precision=POI_GEOHASH_PRECISION)
for i, value in enumerate(values):
type_tag, lonlat, data = value
if type_tag == 1:
# OSM POI node, construct geojson and add to Rtree index
lookup.insert(i, dict(
geometry=dict(type='Point', coordinates=project(lonlat)),
properties=dict(tags=data)
))
else:
# geotweet, lookup nearest POI from index
if not lookup.data_store:
return
poi_names = []
kwargs = dict(buffer_size=POI_DISTANCE, multiple=True)
# lookup nearby POI from Rtree index (caching results)
# for any tags we care about emit the tags value and 1
for poi in lookup.get(lonlat, **kwargs):
has_tag = [ tag in poi['tags'] for tag in POI_TAGS ]
if any(has_tag) and 'name' in poi['tags']:
poi_names.append(poi['tags']['name'])
for poi in set(poi_names):
yield (metro, poi), 1 | [
"def",
"reducer_metro",
"(",
"self",
",",
"metro",
",",
"values",
")",
":",
"lookup",
"=",
"CachedLookup",
"(",
"precision",
"=",
"POI_GEOHASH_PRECISION",
")",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"values",
")",
":",
"type_tag",
",",
"lonlat",
",",
"data",
"=",
"value",
"if",
"type_tag",
"==",
"1",
":",
"# OSM POI node, construct geojson and add to Rtree index",
"lookup",
".",
"insert",
"(",
"i",
",",
"dict",
"(",
"geometry",
"=",
"dict",
"(",
"type",
"=",
"'Point'",
",",
"coordinates",
"=",
"project",
"(",
"lonlat",
")",
")",
",",
"properties",
"=",
"dict",
"(",
"tags",
"=",
"data",
")",
")",
")",
"else",
":",
"# geotweet, lookup nearest POI from index",
"if",
"not",
"lookup",
".",
"data_store",
":",
"return",
"poi_names",
"=",
"[",
"]",
"kwargs",
"=",
"dict",
"(",
"buffer_size",
"=",
"POI_DISTANCE",
",",
"multiple",
"=",
"True",
")",
"# lookup nearby POI from Rtree index (caching results)",
"# for any tags we care about emit the tags value and 1",
"for",
"poi",
"in",
"lookup",
".",
"get",
"(",
"lonlat",
",",
"*",
"*",
"kwargs",
")",
":",
"has_tag",
"=",
"[",
"tag",
"in",
"poi",
"[",
"'tags'",
"]",
"for",
"tag",
"in",
"POI_TAGS",
"]",
"if",
"any",
"(",
"has_tag",
")",
"and",
"'name'",
"in",
"poi",
"[",
"'tags'",
"]",
":",
"poi_names",
".",
"append",
"(",
"poi",
"[",
"'tags'",
"]",
"[",
"'name'",
"]",
")",
"for",
"poi",
"in",
"set",
"(",
"poi_names",
")",
":",
"yield",
"(",
"metro",
",",
"poi",
")",
",",
"1"
] | Output tags of POI locations nearby tweet locations
Values will be sorted coming into reducer.
First element in each value tuple will be either 1 (osm POI) or 2 (geotweet).
Build a spatial index with POI records.
For each tweet lookup nearby POI, and emit tag values for predefined tags. | [
"Output",
"tags",
"of",
"POI",
"locations",
"nearby",
"tweet",
"locations"
] | python | train |
MacHu-GWU/angora-project | angora/dtypes/dicttree.py | https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dtypes/dicttree.py#L287-L302 | def k_depth(d, depth, _counter=1):
"""Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
"""
if depth == 0:
yield d[_meta]["_rootname"]
else:
if _counter == depth:
for key in DictTree.k(d):
yield key
else:
_counter += 1
for node in DictTree.v(d):
for key in DictTree.k_depth(node, depth, _counter):
yield key | [
"def",
"k_depth",
"(",
"d",
",",
"depth",
",",
"_counter",
"=",
"1",
")",
":",
"if",
"depth",
"==",
"0",
":",
"yield",
"d",
"[",
"_meta",
"]",
"[",
"\"_rootname\"",
"]",
"else",
":",
"if",
"_counter",
"==",
"depth",
":",
"for",
"key",
"in",
"DictTree",
".",
"k",
"(",
"d",
")",
":",
"yield",
"key",
"else",
":",
"_counter",
"+=",
"1",
"for",
"node",
"in",
"DictTree",
".",
"v",
"(",
"d",
")",
":",
"for",
"key",
"in",
"DictTree",
".",
"k_depth",
"(",
"node",
",",
"depth",
",",
"_counter",
")",
":",
"yield",
"key"
] | Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>` | [
"Iterate",
"keys",
"on",
"specific",
"depth",
".",
"depth",
"has",
"to",
"be",
"greater",
"equal",
"than",
"0",
".",
"Usage",
"reference",
"see",
":",
"meth",
":",
"DictTree",
".",
"kv_depth",
"()",
"<DictTree",
".",
"kv_depth",
">"
] | python | train |
johnnoone/json-spec | src/jsonspec/validators/factorize.py | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/validators/factorize.py#L104-L131 | def register(compiler=None, spec=None):
"""
Expose compiler to factory.
:param compiler: the callable to expose
:type compiler: callable
:param spec: name of the spec
:type spec: str
It can be used as a decorator::
@register(spec='my:first:spec')
def my_compiler(schema, pointer, context):
return Validator(schema)
or as a function::
def my_compiler(schema, pointer, context):
return Validator(schema)
register(my_compiler, 'my:second:spec')
"""
if not spec:
raise CompilationError('Spec is required')
if not compiler:
return partial(register, spec=spec)
return Factory.register(spec, compiler) | [
"def",
"register",
"(",
"compiler",
"=",
"None",
",",
"spec",
"=",
"None",
")",
":",
"if",
"not",
"spec",
":",
"raise",
"CompilationError",
"(",
"'Spec is required'",
")",
"if",
"not",
"compiler",
":",
"return",
"partial",
"(",
"register",
",",
"spec",
"=",
"spec",
")",
"return",
"Factory",
".",
"register",
"(",
"spec",
",",
"compiler",
")"
] | Expose compiler to factory.
:param compiler: the callable to expose
:type compiler: callable
:param spec: name of the spec
:type spec: str
It can be used as a decorator::
@register(spec='my:first:spec')
def my_compiler(schema, pointer, context):
return Validator(schema)
or as a function::
def my_compiler(schema, pointer, context):
return Validator(schema)
register(my_compiler, 'my:second:spec') | [
"Expose",
"compiler",
"to",
"factory",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/libinfo.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/libinfo.py#L79-L110 | def find_include_path():
"""Find MXNet included header files.
Returns
-------
incl_path : string
Path to the header files.
"""
incl_from_env = os.environ.get('MXNET_INCLUDE_PATH')
if incl_from_env:
if os.path.isdir(incl_from_env):
if not os.path.isabs(incl_from_env):
logging.warning("MXNET_INCLUDE_PATH should be an absolute path, instead of: %s",
incl_from_env)
else:
return incl_from_env
else:
logging.warning("MXNET_INCLUDE_PATH '%s' doesn't exist", incl_from_env)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# include path in pip package
pip_incl_path = os.path.join(curr_path, 'include/')
if os.path.isdir(pip_incl_path):
return pip_incl_path
else:
# include path if build from source
src_incl_path = os.path.join(curr_path, '../../include/')
if os.path.isdir(src_incl_path):
return src_incl_path
else:
raise RuntimeError('Cannot find the MXNet include path in either ' + pip_incl_path +
' or ' + src_incl_path + '\n') | [
"def",
"find_include_path",
"(",
")",
":",
"incl_from_env",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'MXNET_INCLUDE_PATH'",
")",
"if",
"incl_from_env",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"incl_from_env",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"incl_from_env",
")",
":",
"logging",
".",
"warning",
"(",
"\"MXNET_INCLUDE_PATH should be an absolute path, instead of: %s\"",
",",
"incl_from_env",
")",
"else",
":",
"return",
"incl_from_env",
"else",
":",
"logging",
".",
"warning",
"(",
"\"MXNET_INCLUDE_PATH '%s' doesn't exist\"",
",",
"incl_from_env",
")",
"curr_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"__file__",
")",
")",
")",
"# include path in pip package",
"pip_incl_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'include/'",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"pip_incl_path",
")",
":",
"return",
"pip_incl_path",
"else",
":",
"# include path if build from source",
"src_incl_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'../../include/'",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"src_incl_path",
")",
":",
"return",
"src_incl_path",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Cannot find the MXNet include path in either '",
"+",
"pip_incl_path",
"+",
"' or '",
"+",
"src_incl_path",
"+",
"'\\n'",
")"
] | Find MXNet included header files.
Returns
-------
incl_path : string
Path to the header files. | [
"Find",
"MXNet",
"included",
"header",
"files",
"."
] | python | train |
amzn/ion-python | amazon/ion/reader_text.py | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_text.py#L654-L708 | def _numeric_handler_factory(charset, transition, assertion, illegal_before_underscore, parse_func,
illegal_at_end=(None,), ion_type=None, append_first_if_not=None, first_char=None):
"""Generates a handler co-routine which tokenizes a numeric component (a token or sub-token).
Args:
charset (sequence): Set of ordinals of legal characters for this numeric component.
transition (callable): Called upon termination of this component (i.e. when a character not in ``charset`` is
found). Accepts the previous character ordinal, the current character ordinal, the current context, and the
previous transition. Returns a Transition if the component ends legally; otherwise, raises an error.
assertion (callable): Accepts the first character's ordinal and the current context. Returns True if this is
a legal start to the component.
illegal_before_underscore (sequence): Set of ordinals of illegal characters to precede an underscore for this
component.
parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a
thunk that lazily parses the token.
illegal_at_end (Optional[sequence]): Set of ordinals of characters that may not legally end the value.
ion_type (Optional[IonType]): The type of the value if it were to end on this component.
append_first_if_not (Optional[int]): The ordinal of a character that should not be appended to the token if
it occurs first in this component (e.g. an underscore in many cases).
first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that
occurs first in this component. This is useful for preparing the token for parsing in the case where a
particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value
should be replaced with 'e' for compatibility with python's Decimal type).
"""
@coroutine
def numeric_handler(c, ctx):
assert assertion(c, ctx)
if ion_type is not None:
ctx.set_ion_type(ion_type)
val = ctx.value
if c != append_first_if_not:
first = c if first_char is None else first_char
val.append(first)
prev = c
c, self = yield
trans = ctx.immediate_transition(self)
while True:
if _ends_value(c):
if prev == _UNDERSCORE or prev in illegal_at_end:
_illegal_character(c, ctx, '%s at end of number.' % (_chr(prev),))
trans = ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ctx.ion_type, parse_func(ctx.value))
if c == _SLASH:
trans = ctx.immediate_transition(_number_slash_end_handler(c, ctx, trans))
else:
if c == _UNDERSCORE:
if prev == _UNDERSCORE or prev in illegal_before_underscore:
_illegal_character(c, ctx, 'Underscore after %s.' % (_chr(prev),))
else:
if c not in charset:
trans = transition(prev, c, ctx, trans)
else:
val.append(c)
prev = c
c, _ = yield trans
return numeric_handler | [
"def",
"_numeric_handler_factory",
"(",
"charset",
",",
"transition",
",",
"assertion",
",",
"illegal_before_underscore",
",",
"parse_func",
",",
"illegal_at_end",
"=",
"(",
"None",
",",
")",
",",
"ion_type",
"=",
"None",
",",
"append_first_if_not",
"=",
"None",
",",
"first_char",
"=",
"None",
")",
":",
"@",
"coroutine",
"def",
"numeric_handler",
"(",
"c",
",",
"ctx",
")",
":",
"assert",
"assertion",
"(",
"c",
",",
"ctx",
")",
"if",
"ion_type",
"is",
"not",
"None",
":",
"ctx",
".",
"set_ion_type",
"(",
"ion_type",
")",
"val",
"=",
"ctx",
".",
"value",
"if",
"c",
"!=",
"append_first_if_not",
":",
"first",
"=",
"c",
"if",
"first_char",
"is",
"None",
"else",
"first_char",
"val",
".",
"append",
"(",
"first",
")",
"prev",
"=",
"c",
"c",
",",
"self",
"=",
"yield",
"trans",
"=",
"ctx",
".",
"immediate_transition",
"(",
"self",
")",
"while",
"True",
":",
"if",
"_ends_value",
"(",
"c",
")",
":",
"if",
"prev",
"==",
"_UNDERSCORE",
"or",
"prev",
"in",
"illegal_at_end",
":",
"_illegal_character",
"(",
"c",
",",
"ctx",
",",
"'%s at end of number.'",
"%",
"(",
"_chr",
"(",
"prev",
")",
",",
")",
")",
"trans",
"=",
"ctx",
".",
"event_transition",
"(",
"IonThunkEvent",
",",
"IonEventType",
".",
"SCALAR",
",",
"ctx",
".",
"ion_type",
",",
"parse_func",
"(",
"ctx",
".",
"value",
")",
")",
"if",
"c",
"==",
"_SLASH",
":",
"trans",
"=",
"ctx",
".",
"immediate_transition",
"(",
"_number_slash_end_handler",
"(",
"c",
",",
"ctx",
",",
"trans",
")",
")",
"else",
":",
"if",
"c",
"==",
"_UNDERSCORE",
":",
"if",
"prev",
"==",
"_UNDERSCORE",
"or",
"prev",
"in",
"illegal_before_underscore",
":",
"_illegal_character",
"(",
"c",
",",
"ctx",
",",
"'Underscore after %s.'",
"%",
"(",
"_chr",
"(",
"prev",
")",
",",
")",
")",
"else",
":",
"if",
"c",
"not",
"in",
"charset",
":",
"trans",
"=",
"transition",
"(",
"prev",
",",
"c",
",",
"ctx",
",",
"trans",
")",
"else",
":",
"val",
".",
"append",
"(",
"c",
")",
"prev",
"=",
"c",
"c",
",",
"_",
"=",
"yield",
"trans",
"return",
"numeric_handler"
] | Generates a handler co-routine which tokenizes a numeric component (a token or sub-token).
Args:
charset (sequence): Set of ordinals of legal characters for this numeric component.
transition (callable): Called upon termination of this component (i.e. when a character not in ``charset`` is
found). Accepts the previous character ordinal, the current character ordinal, the current context, and the
previous transition. Returns a Transition if the component ends legally; otherwise, raises an error.
assertion (callable): Accepts the first character's ordinal and the current context. Returns True if this is
a legal start to the component.
illegal_before_underscore (sequence): Set of ordinals of illegal characters to precede an underscore for this
component.
parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a
thunk that lazily parses the token.
illegal_at_end (Optional[sequence]): Set of ordinals of characters that may not legally end the value.
ion_type (Optional[IonType]): The type of the value if it were to end on this component.
append_first_if_not (Optional[int]): The ordinal of a character that should not be appended to the token if
it occurs first in this component (e.g. an underscore in many cases).
first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that
occurs first in this component. This is useful for preparing the token for parsing in the case where a
particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value
should be replaced with 'e' for compatibility with python's Decimal type). | [
"Generates",
"a",
"handler",
"co",
"-",
"routine",
"which",
"tokenizes",
"a",
"numeric",
"component",
"(",
"a",
"token",
"or",
"sub",
"-",
"token",
")",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/findinfiles/plugin.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/findinfiles/plugin.py#L98-L113 | def findinfiles_callback(self):
"""Find in files callback"""
widget = QApplication.focusWidget()
if not self.ismaximized:
self.dockwidget.setVisible(True)
self.dockwidget.raise_()
text = ''
try:
if widget.has_selected_text():
text = widget.get_selected_text()
except AttributeError:
# This is not a text widget deriving from TextEditBaseWidget
pass
self.findinfiles.set_search_text(text)
if text:
self.findinfiles.find() | [
"def",
"findinfiles_callback",
"(",
"self",
")",
":",
"widget",
"=",
"QApplication",
".",
"focusWidget",
"(",
")",
"if",
"not",
"self",
".",
"ismaximized",
":",
"self",
".",
"dockwidget",
".",
"setVisible",
"(",
"True",
")",
"self",
".",
"dockwidget",
".",
"raise_",
"(",
")",
"text",
"=",
"''",
"try",
":",
"if",
"widget",
".",
"has_selected_text",
"(",
")",
":",
"text",
"=",
"widget",
".",
"get_selected_text",
"(",
")",
"except",
"AttributeError",
":",
"# This is not a text widget deriving from TextEditBaseWidget\r",
"pass",
"self",
".",
"findinfiles",
".",
"set_search_text",
"(",
"text",
")",
"if",
"text",
":",
"self",
".",
"findinfiles",
".",
"find",
"(",
")"
] | Find in files callback | [
"Find",
"in",
"files",
"callback"
] | python | train |
joeyespo/path-and-address | path_and_address/parsing.py | https://github.com/joeyespo/path-and-address/blob/f8193a09f4b785574d920e8a2aeeb55ea6ff4e20/path_and_address/parsing.py#L4-L18 | def resolve(path_or_address=None, address=None, *ignored):
"""
Returns (path, address) based on consecutive optional arguments,
[path] [address].
"""
if path_or_address is None or address is not None:
return path_or_address, address
path = None
if split_address(path_or_address)[1] is not None:
address = path_or_address
else:
path = path_or_address
return path, address | [
"def",
"resolve",
"(",
"path_or_address",
"=",
"None",
",",
"address",
"=",
"None",
",",
"*",
"ignored",
")",
":",
"if",
"path_or_address",
"is",
"None",
"or",
"address",
"is",
"not",
"None",
":",
"return",
"path_or_address",
",",
"address",
"path",
"=",
"None",
"if",
"split_address",
"(",
"path_or_address",
")",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"address",
"=",
"path_or_address",
"else",
":",
"path",
"=",
"path_or_address",
"return",
"path",
",",
"address"
] | Returns (path, address) based on consecutive optional arguments,
[path] [address]. | [
"Returns",
"(",
"path",
"address",
")",
"based",
"on",
"consecutive",
"optional",
"arguments",
"[",
"path",
"]",
"[",
"address",
"]",
"."
] | python | train |
ajslater/picopt | picopt/cli.py | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/cli.py#L126-L170 | def process_arguments(arguments):
"""Recompute special cases for input arguments."""
Settings.update(arguments)
Settings.config_program_reqs(PROGRAMS)
Settings.verbose = arguments.verbose + 1
Settings.paths = set(arguments.paths)
if arguments.formats == DEFAULT_FORMATS:
Settings.formats = arguments.to_png_formats | \
jpeg.FORMATS | gif.FORMATS
else:
Settings.formats = set(
arguments.formats.upper().split(FORMAT_DELIMETER))
if arguments.comics:
Settings.formats = Settings.formats | comic.FORMATS
if arguments.optimize_after is not None:
try:
after_dt = dateutil.parser.parse(arguments.optimize_after)
arguments.optimize_after = time.mktime(after_dt.timetuple())
except Exception as ex:
print(ex)
print('Could not parse date to optimize after.')
exit(1)
if arguments.jobs < 1:
Settings.jobs = 1
# Make a rough guess about weather or not to invoke multithreding
# jpegrescan '-t' uses three threads
# one off multithread switch bcaseu this is the only one right now
files_in_paths = 0
non_file_in_paths = False
for filename in arguments.paths:
if os.path.isfile(filename):
files_in_paths += 1
else:
non_file_in_paths = True
Settings.jpegrescan_multithread = not non_file_in_paths and \
Settings.jobs - (files_in_paths*3) > -1
return arguments | [
"def",
"process_arguments",
"(",
"arguments",
")",
":",
"Settings",
".",
"update",
"(",
"arguments",
")",
"Settings",
".",
"config_program_reqs",
"(",
"PROGRAMS",
")",
"Settings",
".",
"verbose",
"=",
"arguments",
".",
"verbose",
"+",
"1",
"Settings",
".",
"paths",
"=",
"set",
"(",
"arguments",
".",
"paths",
")",
"if",
"arguments",
".",
"formats",
"==",
"DEFAULT_FORMATS",
":",
"Settings",
".",
"formats",
"=",
"arguments",
".",
"to_png_formats",
"|",
"jpeg",
".",
"FORMATS",
"|",
"gif",
".",
"FORMATS",
"else",
":",
"Settings",
".",
"formats",
"=",
"set",
"(",
"arguments",
".",
"formats",
".",
"upper",
"(",
")",
".",
"split",
"(",
"FORMAT_DELIMETER",
")",
")",
"if",
"arguments",
".",
"comics",
":",
"Settings",
".",
"formats",
"=",
"Settings",
".",
"formats",
"|",
"comic",
".",
"FORMATS",
"if",
"arguments",
".",
"optimize_after",
"is",
"not",
"None",
":",
"try",
":",
"after_dt",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"arguments",
".",
"optimize_after",
")",
"arguments",
".",
"optimize_after",
"=",
"time",
".",
"mktime",
"(",
"after_dt",
".",
"timetuple",
"(",
")",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"ex",
")",
"print",
"(",
"'Could not parse date to optimize after.'",
")",
"exit",
"(",
"1",
")",
"if",
"arguments",
".",
"jobs",
"<",
"1",
":",
"Settings",
".",
"jobs",
"=",
"1",
"# Make a rough guess about weather or not to invoke multithreding",
"# jpegrescan '-t' uses three threads",
"# one off multithread switch bcaseu this is the only one right now",
"files_in_paths",
"=",
"0",
"non_file_in_paths",
"=",
"False",
"for",
"filename",
"in",
"arguments",
".",
"paths",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"files_in_paths",
"+=",
"1",
"else",
":",
"non_file_in_paths",
"=",
"True",
"Settings",
".",
"jpegrescan_multithread",
"=",
"not",
"non_file_in_paths",
"and",
"Settings",
".",
"jobs",
"-",
"(",
"files_in_paths",
"*",
"3",
")",
">",
"-",
"1",
"return",
"arguments"
] | Recompute special cases for input arguments. | [
"Recompute",
"special",
"cases",
"for",
"input",
"arguments",
"."
] | python | train |
Tanganelli/CoAPthon3 | coapthon/messages/message.py | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/messages/message.py#L114-L129 | def token(self, value):
"""
Set the Token of the message.
:type value: String
:param value: the Token
:raise AttributeError: if value is longer than 256
"""
if value is None:
self._token = value
return
if not isinstance(value, str):
value = str(value)
if len(value) > 256:
raise AttributeError
self._token = value | [
"def",
"token",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"self",
".",
"_token",
"=",
"value",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"len",
"(",
"value",
")",
">",
"256",
":",
"raise",
"AttributeError",
"self",
".",
"_token",
"=",
"value"
] | Set the Token of the message.
:type value: String
:param value: the Token
:raise AttributeError: if value is longer than 256 | [
"Set",
"the",
"Token",
"of",
"the",
"message",
"."
] | python | train |
saltstack/salt | salt/modules/gem.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L276-L310 | def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret | [
"def",
"list_",
"(",
"prefix",
"=",
"''",
",",
"ruby",
"=",
"None",
",",
"runas",
"=",
"None",
",",
"gem_bin",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'list'",
"]",
"if",
"prefix",
":",
"cmd",
".",
"append",
"(",
"prefix",
")",
"stdout",
"=",
"_gem",
"(",
"cmd",
",",
"ruby",
",",
"gem_bin",
"=",
"gem_bin",
",",
"runas",
"=",
"runas",
")",
"ret",
"=",
"{",
"}",
"for",
"line",
"in",
"salt",
".",
"utils",
".",
"itertools",
".",
"split",
"(",
"stdout",
",",
"'\\n'",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^([^ ]+) \\((.+)\\)'",
",",
"line",
")",
"if",
"match",
":",
"gem",
"=",
"match",
".",
"group",
"(",
"1",
")",
"versions",
"=",
"match",
".",
"group",
"(",
"2",
")",
".",
"split",
"(",
"', '",
")",
"ret",
"[",
"gem",
"]",
"=",
"versions",
"return",
"ret"
] | List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list | [
"List",
"locally",
"installed",
"gems",
"."
] | python | train |
explosion/spaCy | spacy/language.py | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L513-L524 | def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
"""
for name, proc in self.pipeline:
if hasattr(proc, "preprocess_gold"):
docs_golds = proc.preprocess_gold(docs_golds)
for doc, gold in docs_golds:
yield doc, gold | [
"def",
"preprocess_gold",
"(",
"self",
",",
"docs_golds",
")",
":",
"for",
"name",
",",
"proc",
"in",
"self",
".",
"pipeline",
":",
"if",
"hasattr",
"(",
"proc",
",",
"\"preprocess_gold\"",
")",
":",
"docs_golds",
"=",
"proc",
".",
"preprocess_gold",
"(",
"docs_golds",
")",
"for",
"doc",
",",
"gold",
"in",
"docs_golds",
":",
"yield",
"doc",
",",
"gold"
] | Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects. | [
"Can",
"be",
"called",
"before",
"training",
"to",
"pre",
"-",
"process",
"gold",
"data",
".",
"By",
"default",
"it",
"handles",
"nonprojectivity",
"and",
"adds",
"missing",
"tags",
"to",
"the",
"tag",
"map",
"."
] | python | train |
bwohlberg/sporco | sporco/util.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/util.py#L836-L874 | def netgetdata(url, maxtry=3, timeout=10):
"""
Get content of a file via a URL.
Parameters
----------
url : string
URL of the file to be downloaded
maxtry : int, optional (default 3)
Maximum number of download retries
timeout : int, optional (default 10)
Timeout in seconds for blocking operations
Returns
-------
str : io.BytesIO
Buffered I/O stream
Raises
------
urlerror.URLError (urllib2.URLError in Python 2,
urllib.error.URLError in Python 3)
If the file cannot be downloaded
"""
err = ValueError('maxtry parameter should be greater than zero')
for ntry in range(maxtry):
try:
rspns = urlrequest.urlopen(url, timeout=timeout)
cntnt = rspns.read()
break
except urlerror.URLError as e:
err = e
if not isinstance(e.reason, socket.timeout):
raise
else:
raise err
return io.BytesIO(cntnt) | [
"def",
"netgetdata",
"(",
"url",
",",
"maxtry",
"=",
"3",
",",
"timeout",
"=",
"10",
")",
":",
"err",
"=",
"ValueError",
"(",
"'maxtry parameter should be greater than zero'",
")",
"for",
"ntry",
"in",
"range",
"(",
"maxtry",
")",
":",
"try",
":",
"rspns",
"=",
"urlrequest",
".",
"urlopen",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
"cntnt",
"=",
"rspns",
".",
"read",
"(",
")",
"break",
"except",
"urlerror",
".",
"URLError",
"as",
"e",
":",
"err",
"=",
"e",
"if",
"not",
"isinstance",
"(",
"e",
".",
"reason",
",",
"socket",
".",
"timeout",
")",
":",
"raise",
"else",
":",
"raise",
"err",
"return",
"io",
".",
"BytesIO",
"(",
"cntnt",
")"
] | Get content of a file via a URL.
Parameters
----------
url : string
URL of the file to be downloaded
maxtry : int, optional (default 3)
Maximum number of download retries
timeout : int, optional (default 10)
Timeout in seconds for blocking operations
Returns
-------
str : io.BytesIO
Buffered I/O stream
Raises
------
urlerror.URLError (urllib2.URLError in Python 2,
urllib.error.URLError in Python 3)
If the file cannot be downloaded | [
"Get",
"content",
"of",
"a",
"file",
"via",
"a",
"URL",
"."
] | python | train |
molmod/molmod | molmod/pairff.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/pairff.py#L67-L89 | def update_coordinates(self, coordinates=None):
"""Update the coordinates (and derived quantities)
Argument:
coordinates -- new Cartesian coordinates of the system
"""
if coordinates is not None:
self.coordinates = coordinates
self.numc = len(self.coordinates)
self.distances = np.zeros((self.numc, self.numc), float)
self.deltas = np.zeros((self.numc, self.numc, 3), float)
self.directions = np.zeros((self.numc, self.numc, 3), float)
self.dirouters = np.zeros((self.numc, self.numc, 3, 3), float)
for index1, coordinate1 in enumerate(self.coordinates):
for index2, coordinate2 in enumerate(self.coordinates):
delta = coordinate1 - coordinate2
self.deltas[index1, index2] = delta
distance = np.linalg.norm(delta)
self.distances[index1, index2] = distance
if index1 != index2:
tmp = delta/distance
self.directions[index1, index2] = tmp
self.dirouters[index1, index2] = np.outer(tmp, tmp) | [
"def",
"update_coordinates",
"(",
"self",
",",
"coordinates",
"=",
"None",
")",
":",
"if",
"coordinates",
"is",
"not",
"None",
":",
"self",
".",
"coordinates",
"=",
"coordinates",
"self",
".",
"numc",
"=",
"len",
"(",
"self",
".",
"coordinates",
")",
"self",
".",
"distances",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"numc",
",",
"self",
".",
"numc",
")",
",",
"float",
")",
"self",
".",
"deltas",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"numc",
",",
"self",
".",
"numc",
",",
"3",
")",
",",
"float",
")",
"self",
".",
"directions",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"numc",
",",
"self",
".",
"numc",
",",
"3",
")",
",",
"float",
")",
"self",
".",
"dirouters",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"numc",
",",
"self",
".",
"numc",
",",
"3",
",",
"3",
")",
",",
"float",
")",
"for",
"index1",
",",
"coordinate1",
"in",
"enumerate",
"(",
"self",
".",
"coordinates",
")",
":",
"for",
"index2",
",",
"coordinate2",
"in",
"enumerate",
"(",
"self",
".",
"coordinates",
")",
":",
"delta",
"=",
"coordinate1",
"-",
"coordinate2",
"self",
".",
"deltas",
"[",
"index1",
",",
"index2",
"]",
"=",
"delta",
"distance",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"delta",
")",
"self",
".",
"distances",
"[",
"index1",
",",
"index2",
"]",
"=",
"distance",
"if",
"index1",
"!=",
"index2",
":",
"tmp",
"=",
"delta",
"/",
"distance",
"self",
".",
"directions",
"[",
"index1",
",",
"index2",
"]",
"=",
"tmp",
"self",
".",
"dirouters",
"[",
"index1",
",",
"index2",
"]",
"=",
"np",
".",
"outer",
"(",
"tmp",
",",
"tmp",
")"
] | Update the coordinates (and derived quantities)
Argument:
coordinates -- new Cartesian coordinates of the system | [
"Update",
"the",
"coordinates",
"(",
"and",
"derived",
"quantities",
")"
] | python | train |
awslabs/sockeye | sockeye/image_captioning/data_io.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/image_captioning/data_io.py#L168-L280 | def get_training_image_text_data_iters(source_root: str,
source: str, target: str,
validation_source_root: str,
validation_source: str, validation_target: str,
vocab_target: vocab.Vocab,
vocab_target_path: Optional[str],
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
source_image_size: tuple,
max_seq_len_target: int,
bucketing: bool,
bucket_width: int,
use_feature_loader: bool = False,
preload_features: bool = False) -> Tuple['ParallelSampleIter',
'ParallelSampleIter',
'DataConfig', 'DataInfo']:
"""
Returns data iterators for training and validation data.
:param source_root: Path to source images since the file in source contains relative paths.
:param source: Path to source training data.
:param target: Path to target training data.
:param validation_source_root: Path to validation source images since the file in validation_source contains relative paths.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param vocab_target: Target vocabulary.
:param vocab_target_path: Path to target vocabulary.
:param batch_size: Batch size.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param source_image_size: size to resize the image to (for iterator)
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:param use_feature_loader: If True, features are loaded instead of images.
:param preload_features: If use_feature_loader si True, this enables load all the feature to memory
:return: Tuple of (training data iterator, validation data iterator, data config).
"""
logger.info("===============================")
logger.info("Creating training data iterator")
logger.info("===============================")
# define buckets
buckets = define_empty_source_parallel_buckets(max_seq_len_target, bucket_width) if bucketing else [
(0, max_seq_len_target)]
source_images = [FileListReader(source, source_root)]
target_sentences = SequenceReader(target, vocab_target, add_bos=True)
# 2. pass: Get data statistics only on target (source not considered)
data_statistics = get_data_statistics(source_readers=None,
target_reader=target_sentences,
buckets=buckets,
length_ratio_mean=1.0,
length_ratio_std=1.0,
source_vocabs=None,
target_vocab=vocab_target)
bucket_batch_sizes = define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words,
batch_num_devices,
data_statistics.average_len_target_per_bucket)
data_statistics.log(bucket_batch_sizes)
data_loader = RawListTextDatasetLoader(buckets=buckets,
eos_id=vocab_target[C.EOS_SYMBOL],
pad_id=C.PAD_ID)
training_data = data_loader.load(source_images[0], target_sentences,
data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes)
data_info = DataInfo(sources=source_images,
target=target,
source_vocabs=None,
target_vocab=vocab_target_path,
shared_vocab=False,
num_shards=1)
config_data = DataConfig(data_statistics=data_statistics,
max_seq_len_source=0,
max_seq_len_target=max_seq_len_target,
num_source_factors=len(source_images))
# Add useful stuff to config_data
config_data.source_root = source_root
config_data.validation_source_root = validation_source_root
config_data.use_feature_loader = use_feature_loader
train_iter = ImageTextSampleIter(data=training_data,
buckets=buckets,
batch_size=batch_size,
bucket_batch_sizes=bucket_batch_sizes,
image_size=source_image_size,
use_feature_loader=use_feature_loader,
preload_features=preload_features)
validation_iter = get_validation_image_text_data_iter(data_loader=data_loader,
validation_source_root=validation_source_root,
validation_source=validation_source,
validation_target=validation_target,
buckets=buckets,
bucket_batch_sizes=bucket_batch_sizes,
source_image_size=source_image_size,
vocab_target=vocab_target,
max_seq_len_target=max_seq_len_target,
batch_size=batch_size,
use_feature_loader=use_feature_loader,
preload_features=preload_features)
return train_iter, validation_iter, config_data, data_info | [
"def",
"get_training_image_text_data_iters",
"(",
"source_root",
":",
"str",
",",
"source",
":",
"str",
",",
"target",
":",
"str",
",",
"validation_source_root",
":",
"str",
",",
"validation_source",
":",
"str",
",",
"validation_target",
":",
"str",
",",
"vocab_target",
":",
"vocab",
".",
"Vocab",
",",
"vocab_target_path",
":",
"Optional",
"[",
"str",
"]",
",",
"batch_size",
":",
"int",
",",
"batch_by_words",
":",
"bool",
",",
"batch_num_devices",
":",
"int",
",",
"source_image_size",
":",
"tuple",
",",
"max_seq_len_target",
":",
"int",
",",
"bucketing",
":",
"bool",
",",
"bucket_width",
":",
"int",
",",
"use_feature_loader",
":",
"bool",
"=",
"False",
",",
"preload_features",
":",
"bool",
"=",
"False",
")",
"->",
"Tuple",
"[",
"'ParallelSampleIter'",
",",
"'ParallelSampleIter'",
",",
"'DataConfig'",
",",
"'DataInfo'",
"]",
":",
"logger",
".",
"info",
"(",
"\"===============================\"",
")",
"logger",
".",
"info",
"(",
"\"Creating training data iterator\"",
")",
"logger",
".",
"info",
"(",
"\"===============================\"",
")",
"# define buckets",
"buckets",
"=",
"define_empty_source_parallel_buckets",
"(",
"max_seq_len_target",
",",
"bucket_width",
")",
"if",
"bucketing",
"else",
"[",
"(",
"0",
",",
"max_seq_len_target",
")",
"]",
"source_images",
"=",
"[",
"FileListReader",
"(",
"source",
",",
"source_root",
")",
"]",
"target_sentences",
"=",
"SequenceReader",
"(",
"target",
",",
"vocab_target",
",",
"add_bos",
"=",
"True",
")",
"# 2. pass: Get data statistics only on target (source not considered)",
"data_statistics",
"=",
"get_data_statistics",
"(",
"source_readers",
"=",
"None",
",",
"target_reader",
"=",
"target_sentences",
",",
"buckets",
"=",
"buckets",
",",
"length_ratio_mean",
"=",
"1.0",
",",
"length_ratio_std",
"=",
"1.0",
",",
"source_vocabs",
"=",
"None",
",",
"target_vocab",
"=",
"vocab_target",
")",
"bucket_batch_sizes",
"=",
"define_bucket_batch_sizes",
"(",
"buckets",
",",
"batch_size",
",",
"batch_by_words",
",",
"batch_num_devices",
",",
"data_statistics",
".",
"average_len_target_per_bucket",
")",
"data_statistics",
".",
"log",
"(",
"bucket_batch_sizes",
")",
"data_loader",
"=",
"RawListTextDatasetLoader",
"(",
"buckets",
"=",
"buckets",
",",
"eos_id",
"=",
"vocab_target",
"[",
"C",
".",
"EOS_SYMBOL",
"]",
",",
"pad_id",
"=",
"C",
".",
"PAD_ID",
")",
"training_data",
"=",
"data_loader",
".",
"load",
"(",
"source_images",
"[",
"0",
"]",
",",
"target_sentences",
",",
"data_statistics",
".",
"num_sents_per_bucket",
")",
".",
"fill_up",
"(",
"bucket_batch_sizes",
")",
"data_info",
"=",
"DataInfo",
"(",
"sources",
"=",
"source_images",
",",
"target",
"=",
"target",
",",
"source_vocabs",
"=",
"None",
",",
"target_vocab",
"=",
"vocab_target_path",
",",
"shared_vocab",
"=",
"False",
",",
"num_shards",
"=",
"1",
")",
"config_data",
"=",
"DataConfig",
"(",
"data_statistics",
"=",
"data_statistics",
",",
"max_seq_len_source",
"=",
"0",
",",
"max_seq_len_target",
"=",
"max_seq_len_target",
",",
"num_source_factors",
"=",
"len",
"(",
"source_images",
")",
")",
"# Add useful stuff to config_data",
"config_data",
".",
"source_root",
"=",
"source_root",
"config_data",
".",
"validation_source_root",
"=",
"validation_source_root",
"config_data",
".",
"use_feature_loader",
"=",
"use_feature_loader",
"train_iter",
"=",
"ImageTextSampleIter",
"(",
"data",
"=",
"training_data",
",",
"buckets",
"=",
"buckets",
",",
"batch_size",
"=",
"batch_size",
",",
"bucket_batch_sizes",
"=",
"bucket_batch_sizes",
",",
"image_size",
"=",
"source_image_size",
",",
"use_feature_loader",
"=",
"use_feature_loader",
",",
"preload_features",
"=",
"preload_features",
")",
"validation_iter",
"=",
"get_validation_image_text_data_iter",
"(",
"data_loader",
"=",
"data_loader",
",",
"validation_source_root",
"=",
"validation_source_root",
",",
"validation_source",
"=",
"validation_source",
",",
"validation_target",
"=",
"validation_target",
",",
"buckets",
"=",
"buckets",
",",
"bucket_batch_sizes",
"=",
"bucket_batch_sizes",
",",
"source_image_size",
"=",
"source_image_size",
",",
"vocab_target",
"=",
"vocab_target",
",",
"max_seq_len_target",
"=",
"max_seq_len_target",
",",
"batch_size",
"=",
"batch_size",
",",
"use_feature_loader",
"=",
"use_feature_loader",
",",
"preload_features",
"=",
"preload_features",
")",
"return",
"train_iter",
",",
"validation_iter",
",",
"config_data",
",",
"data_info"
] | Returns data iterators for training and validation data.
:param source_root: Path to source images since the file in source contains relative paths.
:param source: Path to source training data.
:param target: Path to target training data.
:param validation_source_root: Path to validation source images since the file in validation_source contains relative paths.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param vocab_target: Target vocabulary.
:param vocab_target_path: Path to target vocabulary.
:param batch_size: Batch size.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param source_image_size: size to resize the image to (for iterator)
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:param use_feature_loader: If True, features are loaded instead of images.
:param preload_features: If use_feature_loader si True, this enables load all the feature to memory
:return: Tuple of (training data iterator, validation data iterator, data config). | [
"Returns",
"data",
"iterators",
"for",
"training",
"and",
"validation",
"data",
"."
] | python | train |
codelv/enaml-web | examples/simple_site/main.py | https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/examples/simple_site/main.py#L101-L112 | def _default_handlers(self):
""" Generate the handlers for this site """
static_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"static"))
urls = [
(r"/static/(.*)", cyclone.web.StaticFileHandler, {"path": static_path}),
]
for p in self.pages:
handler = p.handler
handler.site = self
handler.page = p
urls.append((p.link.url,handler))
return urls | [
"def",
"_default_handlers",
"(",
"self",
")",
":",
"static_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"static\"",
")",
")",
"urls",
"=",
"[",
"(",
"r\"/static/(.*)\"",
",",
"cyclone",
".",
"web",
".",
"StaticFileHandler",
",",
"{",
"\"path\"",
":",
"static_path",
"}",
")",
",",
"]",
"for",
"p",
"in",
"self",
".",
"pages",
":",
"handler",
"=",
"p",
".",
"handler",
"handler",
".",
"site",
"=",
"self",
"handler",
".",
"page",
"=",
"p",
"urls",
".",
"append",
"(",
"(",
"p",
".",
"link",
".",
"url",
",",
"handler",
")",
")",
"return",
"urls"
] | Generate the handlers for this site | [
"Generate",
"the",
"handlers",
"for",
"this",
"site"
] | python | test |
jopohl/urh | src/urh/signalprocessing/ProtocolSniffer.py | https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/signalprocessing/ProtocolSniffer.py#L155-L203 | def __demodulate_data(self, data):
"""
Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return:
"""
if len(data) == 0:
return
power_spectrum = data.real ** 2 + data.imag ** 2
is_above_noise = np.sqrt(np.mean(power_spectrum)) > self.signal.noise_threshold
if self.adaptive_noise and not is_above_noise:
self.signal.noise_threshold = 0.9 * self.signal.noise_threshold + 0.1 * np.sqrt(np.max(power_spectrum))
if is_above_noise:
self.__add_to_buffer(data)
self.pause_length = 0
if not self.__buffer_is_full():
return
else:
self.pause_length += len(data)
if self.pause_length < 10 * self.signal.bit_len:
self.__add_to_buffer(data)
if not self.__buffer_is_full():
return
if self.__current_buffer_index == 0:
return
# clear cache and start a new message
self.signal._fulldata = self.__buffer[0:self.__current_buffer_index]
self.__clear_buffer()
self.signal._qad = None
bit_len = self.signal.bit_len
if self.automatic_center:
self.signal.qad_center = AutoInterpretation.detect_center(self.signal.qad, max_size=150*self.signal.bit_len)
ppseq = grab_pulse_lens(self.signal.qad, self.signal.qad_center,
self.signal.tolerance, self.signal.modulation_type, self.signal.bit_len)
bit_data, pauses, bit_sample_pos = self._ppseq_to_bits(ppseq, bit_len, write_bit_sample_pos=False)
for bits, pause in zip(bit_data, pauses):
message = Message(bits, pause, bit_len=bit_len, message_type=self.default_message_type,
decoder=self.decoder)
self.messages.append(message)
self.message_sniffed.emit(len(self.messages) - 1) | [
"def",
"__demodulate_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"power_spectrum",
"=",
"data",
".",
"real",
"**",
"2",
"+",
"data",
".",
"imag",
"**",
"2",
"is_above_noise",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"power_spectrum",
")",
")",
">",
"self",
".",
"signal",
".",
"noise_threshold",
"if",
"self",
".",
"adaptive_noise",
"and",
"not",
"is_above_noise",
":",
"self",
".",
"signal",
".",
"noise_threshold",
"=",
"0.9",
"*",
"self",
".",
"signal",
".",
"noise_threshold",
"+",
"0.1",
"*",
"np",
".",
"sqrt",
"(",
"np",
".",
"max",
"(",
"power_spectrum",
")",
")",
"if",
"is_above_noise",
":",
"self",
".",
"__add_to_buffer",
"(",
"data",
")",
"self",
".",
"pause_length",
"=",
"0",
"if",
"not",
"self",
".",
"__buffer_is_full",
"(",
")",
":",
"return",
"else",
":",
"self",
".",
"pause_length",
"+=",
"len",
"(",
"data",
")",
"if",
"self",
".",
"pause_length",
"<",
"10",
"*",
"self",
".",
"signal",
".",
"bit_len",
":",
"self",
".",
"__add_to_buffer",
"(",
"data",
")",
"if",
"not",
"self",
".",
"__buffer_is_full",
"(",
")",
":",
"return",
"if",
"self",
".",
"__current_buffer_index",
"==",
"0",
":",
"return",
"# clear cache and start a new message",
"self",
".",
"signal",
".",
"_fulldata",
"=",
"self",
".",
"__buffer",
"[",
"0",
":",
"self",
".",
"__current_buffer_index",
"]",
"self",
".",
"__clear_buffer",
"(",
")",
"self",
".",
"signal",
".",
"_qad",
"=",
"None",
"bit_len",
"=",
"self",
".",
"signal",
".",
"bit_len",
"if",
"self",
".",
"automatic_center",
":",
"self",
".",
"signal",
".",
"qad_center",
"=",
"AutoInterpretation",
".",
"detect_center",
"(",
"self",
".",
"signal",
".",
"qad",
",",
"max_size",
"=",
"150",
"*",
"self",
".",
"signal",
".",
"bit_len",
")",
"ppseq",
"=",
"grab_pulse_lens",
"(",
"self",
".",
"signal",
".",
"qad",
",",
"self",
".",
"signal",
".",
"qad_center",
",",
"self",
".",
"signal",
".",
"tolerance",
",",
"self",
".",
"signal",
".",
"modulation_type",
",",
"self",
".",
"signal",
".",
"bit_len",
")",
"bit_data",
",",
"pauses",
",",
"bit_sample_pos",
"=",
"self",
".",
"_ppseq_to_bits",
"(",
"ppseq",
",",
"bit_len",
",",
"write_bit_sample_pos",
"=",
"False",
")",
"for",
"bits",
",",
"pause",
"in",
"zip",
"(",
"bit_data",
",",
"pauses",
")",
":",
"message",
"=",
"Message",
"(",
"bits",
",",
"pause",
",",
"bit_len",
"=",
"bit_len",
",",
"message_type",
"=",
"self",
".",
"default_message_type",
",",
"decoder",
"=",
"self",
".",
"decoder",
")",
"self",
".",
"messages",
".",
"append",
"(",
"message",
")",
"self",
".",
"message_sniffed",
".",
"emit",
"(",
"len",
"(",
"self",
".",
"messages",
")",
"-",
"1",
")"
] | Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return: | [
"Demodulates",
"received",
"IQ",
"data",
"and",
"adds",
"demodulated",
"bits",
"to",
"messages",
":",
"param",
"data",
":",
":",
"return",
":"
] | python | train |
cvxopt/chompack | src/python/pybase/cholesky.py | https://github.com/cvxopt/chompack/blob/e07106b58b8055c34f6201e8c954482f86987833/src/python/pybase/cholesky.py#L5-L79 | def cholesky(X):
"""
Supernodal multifrontal Cholesky factorization:
.. math::
X = LL^T
where :math:`L` is lower-triangular. On exit, the argument :math:`X`
contains the Cholesky factor :math:`L`.
:param X: :py:class:`cspmatrix`
"""
assert isinstance(X, cspmatrix) and X.is_factor is False, "X must be a cspmatrix"
n = X.symb.n
snpost = X.symb.snpost
snptr = X.symb.snptr
chptr = X.symb.chptr
chidx = X.symb.chidx
relptr = X.symb.relptr
relidx = X.symb.relidx
blkptr = X.symb.blkptr
blkval = X.blkval
stack = []
for k in snpost:
nn = snptr[k+1]-snptr[k] # |Nk|
na = relptr[k+1]-relptr[k] # |Ak|
nj = na + nn
# build frontal matrix
F = matrix(0.0, (nj, nj))
lapack.lacpy(blkval, F, offsetA = blkptr[k], m = nj, n = nn, ldA = nj, uplo = 'L')
# add update matrices from children to frontal matrix
for i in range(chptr[k+1]-1,chptr[k]-1,-1):
Ui = stack.pop()
frontal_add_update(F, Ui, relidx, relptr, chidx[i])
# factor L_{Nk,Nk}
lapack.potrf(F, n = nn, ldA = nj)
# if supernode k is not a root node, compute and push update matrix onto stack
if na > 0:
# compute L_{Ak,Nk} := A_{Ak,Nk}*inv(L_{Nk,Nk}')
blas.trsm(F, F, m = na, n = nn, ldA = nj,
ldB = nj, offsetB = nn, transA = 'T', side = 'R')
# compute Uk = Uk - L_{Ak,Nk}*inv(D_{Nk,Nk})*L_{Ak,Nk}'
if nn == 1:
blas.syr(F, F, n = na, offsetx = nn, \
offsetA = nn*nj+nn, ldA = nj, alpha = -1.0)
else:
blas.syrk(F, F, k = nn, n = na, offsetA = nn, ldA = nj,
offsetC = nn*nj+nn, ldC = nj, alpha = -1.0, beta = 1.0)
# compute L_{Ak,Nk} := L_{Ak,Nk}*inv(L_{Nk,Nk})
blas.trsm(F, F, m = na, n = nn,\
ldA = nj, ldB = nj, offsetB = nn, side = 'R')
# add Uk to stack
Uk = matrix(0.0,(na,na))
lapack.lacpy(F, Uk, m = na, n = na, uplo = 'L', offsetA = nn*nj+nn, ldA = nj)
stack.append(Uk)
# copy the leading Nk columns of frontal matrix to blkval
lapack.lacpy(F, blkval, uplo = "L", offsetB = blkptr[k], m = nj, n = nn, ldB = nj)
X.is_factor = True
return | [
"def",
"cholesky",
"(",
"X",
")",
":",
"assert",
"isinstance",
"(",
"X",
",",
"cspmatrix",
")",
"and",
"X",
".",
"is_factor",
"is",
"False",
",",
"\"X must be a cspmatrix\"",
"n",
"=",
"X",
".",
"symb",
".",
"n",
"snpost",
"=",
"X",
".",
"symb",
".",
"snpost",
"snptr",
"=",
"X",
".",
"symb",
".",
"snptr",
"chptr",
"=",
"X",
".",
"symb",
".",
"chptr",
"chidx",
"=",
"X",
".",
"symb",
".",
"chidx",
"relptr",
"=",
"X",
".",
"symb",
".",
"relptr",
"relidx",
"=",
"X",
".",
"symb",
".",
"relidx",
"blkptr",
"=",
"X",
".",
"symb",
".",
"blkptr",
"blkval",
"=",
"X",
".",
"blkval",
"stack",
"=",
"[",
"]",
"for",
"k",
"in",
"snpost",
":",
"nn",
"=",
"snptr",
"[",
"k",
"+",
"1",
"]",
"-",
"snptr",
"[",
"k",
"]",
"# |Nk|",
"na",
"=",
"relptr",
"[",
"k",
"+",
"1",
"]",
"-",
"relptr",
"[",
"k",
"]",
"# |Ak|",
"nj",
"=",
"na",
"+",
"nn",
"# build frontal matrix",
"F",
"=",
"matrix",
"(",
"0.0",
",",
"(",
"nj",
",",
"nj",
")",
")",
"lapack",
".",
"lacpy",
"(",
"blkval",
",",
"F",
",",
"offsetA",
"=",
"blkptr",
"[",
"k",
"]",
",",
"m",
"=",
"nj",
",",
"n",
"=",
"nn",
",",
"ldA",
"=",
"nj",
",",
"uplo",
"=",
"'L'",
")",
"# add update matrices from children to frontal matrix",
"for",
"i",
"in",
"range",
"(",
"chptr",
"[",
"k",
"+",
"1",
"]",
"-",
"1",
",",
"chptr",
"[",
"k",
"]",
"-",
"1",
",",
"-",
"1",
")",
":",
"Ui",
"=",
"stack",
".",
"pop",
"(",
")",
"frontal_add_update",
"(",
"F",
",",
"Ui",
",",
"relidx",
",",
"relptr",
",",
"chidx",
"[",
"i",
"]",
")",
"# factor L_{Nk,Nk}",
"lapack",
".",
"potrf",
"(",
"F",
",",
"n",
"=",
"nn",
",",
"ldA",
"=",
"nj",
")",
"# if supernode k is not a root node, compute and push update matrix onto stack",
"if",
"na",
">",
"0",
":",
"# compute L_{Ak,Nk} := A_{Ak,Nk}*inv(L_{Nk,Nk}')",
"blas",
".",
"trsm",
"(",
"F",
",",
"F",
",",
"m",
"=",
"na",
",",
"n",
"=",
"nn",
",",
"ldA",
"=",
"nj",
",",
"ldB",
"=",
"nj",
",",
"offsetB",
"=",
"nn",
",",
"transA",
"=",
"'T'",
",",
"side",
"=",
"'R'",
")",
"# compute Uk = Uk - L_{Ak,Nk}*inv(D_{Nk,Nk})*L_{Ak,Nk}'",
"if",
"nn",
"==",
"1",
":",
"blas",
".",
"syr",
"(",
"F",
",",
"F",
",",
"n",
"=",
"na",
",",
"offsetx",
"=",
"nn",
",",
"offsetA",
"=",
"nn",
"*",
"nj",
"+",
"nn",
",",
"ldA",
"=",
"nj",
",",
"alpha",
"=",
"-",
"1.0",
")",
"else",
":",
"blas",
".",
"syrk",
"(",
"F",
",",
"F",
",",
"k",
"=",
"nn",
",",
"n",
"=",
"na",
",",
"offsetA",
"=",
"nn",
",",
"ldA",
"=",
"nj",
",",
"offsetC",
"=",
"nn",
"*",
"nj",
"+",
"nn",
",",
"ldC",
"=",
"nj",
",",
"alpha",
"=",
"-",
"1.0",
",",
"beta",
"=",
"1.0",
")",
"# compute L_{Ak,Nk} := L_{Ak,Nk}*inv(L_{Nk,Nk})",
"blas",
".",
"trsm",
"(",
"F",
",",
"F",
",",
"m",
"=",
"na",
",",
"n",
"=",
"nn",
",",
"ldA",
"=",
"nj",
",",
"ldB",
"=",
"nj",
",",
"offsetB",
"=",
"nn",
",",
"side",
"=",
"'R'",
")",
"# add Uk to stack",
"Uk",
"=",
"matrix",
"(",
"0.0",
",",
"(",
"na",
",",
"na",
")",
")",
"lapack",
".",
"lacpy",
"(",
"F",
",",
"Uk",
",",
"m",
"=",
"na",
",",
"n",
"=",
"na",
",",
"uplo",
"=",
"'L'",
",",
"offsetA",
"=",
"nn",
"*",
"nj",
"+",
"nn",
",",
"ldA",
"=",
"nj",
")",
"stack",
".",
"append",
"(",
"Uk",
")",
"# copy the leading Nk columns of frontal matrix to blkval",
"lapack",
".",
"lacpy",
"(",
"F",
",",
"blkval",
",",
"uplo",
"=",
"\"L\"",
",",
"offsetB",
"=",
"blkptr",
"[",
"k",
"]",
",",
"m",
"=",
"nj",
",",
"n",
"=",
"nn",
",",
"ldB",
"=",
"nj",
")",
"X",
".",
"is_factor",
"=",
"True",
"return"
] | Supernodal multifrontal Cholesky factorization:
.. math::
X = LL^T
where :math:`L` is lower-triangular. On exit, the argument :math:`X`
contains the Cholesky factor :math:`L`.
:param X: :py:class:`cspmatrix` | [
"Supernodal",
"multifrontal",
"Cholesky",
"factorization",
":"
] | python | train |
bram85/topydo | topydo/ui/columns/Main.py | https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/columns/Main.py#L495-L510 | def _update_view(self, p_data):
""" Creates a view from the data entered in the view widget. """
view = self._viewdata_to_view(p_data)
if self.column_mode == _APPEND_COLUMN or self.column_mode == _COPY_COLUMN:
self._add_column(view)
elif self.column_mode == _INSERT_COLUMN:
self._add_column(view, self.columns.focus_position)
elif self.column_mode == _EDIT_COLUMN:
current_column = self.columns.focus
current_column.title = p_data['title']
current_column.view = view
self._viewwidget_visible = False
self._blur_commandline() | [
"def",
"_update_view",
"(",
"self",
",",
"p_data",
")",
":",
"view",
"=",
"self",
".",
"_viewdata_to_view",
"(",
"p_data",
")",
"if",
"self",
".",
"column_mode",
"==",
"_APPEND_COLUMN",
"or",
"self",
".",
"column_mode",
"==",
"_COPY_COLUMN",
":",
"self",
".",
"_add_column",
"(",
"view",
")",
"elif",
"self",
".",
"column_mode",
"==",
"_INSERT_COLUMN",
":",
"self",
".",
"_add_column",
"(",
"view",
",",
"self",
".",
"columns",
".",
"focus_position",
")",
"elif",
"self",
".",
"column_mode",
"==",
"_EDIT_COLUMN",
":",
"current_column",
"=",
"self",
".",
"columns",
".",
"focus",
"current_column",
".",
"title",
"=",
"p_data",
"[",
"'title'",
"]",
"current_column",
".",
"view",
"=",
"view",
"self",
".",
"_viewwidget_visible",
"=",
"False",
"self",
".",
"_blur_commandline",
"(",
")"
] | Creates a view from the data entered in the view widget. | [
"Creates",
"a",
"view",
"from",
"the",
"data",
"entered",
"in",
"the",
"view",
"widget",
"."
] | python | train |
caktus/django-timepiece | timepiece/management/commands/check_entries.py | https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/management/commands/check_entries.py#L84-L110 | def check_entry(self, entries, *args, **kwargs):
"""
With a list of entries, check each entry against every other
"""
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
# Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
self.stdout.write('Total overlapping entries for user ' +
'%(first)s %(last)s: %(total)d' % overlap_data)
return user_total_overlaps | [
"def",
"check_entry",
"(",
"self",
",",
"entries",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"verbosity",
"=",
"kwargs",
".",
"get",
"(",
"'verbosity'",
",",
"1",
")",
"user_total_overlaps",
"=",
"0",
"user",
"=",
"''",
"for",
"index_a",
",",
"entry_a",
"in",
"enumerate",
"(",
"entries",
")",
":",
"# Show the name the first time through",
"if",
"index_a",
"==",
"0",
":",
"if",
"args",
"and",
"verbosity",
">=",
"1",
"or",
"verbosity",
">=",
"2",
":",
"self",
".",
"show_name",
"(",
"entry_a",
".",
"user",
")",
"user",
"=",
"entry_a",
".",
"user",
"for",
"index_b",
"in",
"range",
"(",
"index_a",
",",
"len",
"(",
"entries",
")",
")",
":",
"entry_b",
"=",
"entries",
"[",
"index_b",
"]",
"if",
"entry_a",
".",
"check_overlap",
"(",
"entry_b",
")",
":",
"user_total_overlaps",
"+=",
"1",
"self",
".",
"show_overlap",
"(",
"entry_a",
",",
"entry_b",
",",
"verbosity",
"=",
"verbosity",
")",
"if",
"user_total_overlaps",
"and",
"user",
"and",
"verbosity",
">=",
"1",
":",
"overlap_data",
"=",
"{",
"'first'",
":",
"user",
".",
"first_name",
",",
"'last'",
":",
"user",
".",
"last_name",
",",
"'total'",
":",
"user_total_overlaps",
",",
"}",
"self",
".",
"stdout",
".",
"write",
"(",
"'Total overlapping entries for user '",
"+",
"'%(first)s %(last)s: %(total)d'",
"%",
"overlap_data",
")",
"return",
"user_total_overlaps"
] | With a list of entries, check each entry against every other | [
"With",
"a",
"list",
"of",
"entries",
"check",
"each",
"entry",
"against",
"every",
"other"
] | python | train |
ariebovenberg/snug | tutorial/composed0.py | https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/composed0.py#L13-L15 | def repo(name: str, owner: str) -> snug.Query[dict]:
"""a repository lookup by owner and name"""
return json.loads((yield f'/repos/{owner}/{name}').content) | [
"def",
"repo",
"(",
"name",
":",
"str",
",",
"owner",
":",
"str",
")",
"->",
"snug",
".",
"Query",
"[",
"dict",
"]",
":",
"return",
"json",
".",
"loads",
"(",
"(",
"yield",
"f'/repos/{owner}/{name}'",
")",
".",
"content",
")"
] | a repository lookup by owner and name | [
"a",
"repository",
"lookup",
"by",
"owner",
"and",
"name"
] | python | train |
StanfordVL/robosuite | robosuite/environments/sawyer_lift.py | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/sawyer_lift.py#L138-L170 | def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
self.mujoco_robot.set_base_xpos([0, 0, 0])
# load model for table top workspace
self.mujoco_arena = TableArena(
table_full_size=self.table_full_size, table_friction=self.table_friction
)
if self.use_indicator_object:
self.mujoco_arena.add_pos_indicator()
# The sawyer robot has a pedestal, we want to align it with the table
self.mujoco_arena.set_origin([0.16 + self.table_full_size[0] / 2, 0, 0])
# initialize objects of interest
cube = BoxObject(
size_min=[0.020, 0.020, 0.020], # [0.015, 0.015, 0.015],
size_max=[0.022, 0.022, 0.022], # [0.018, 0.018, 0.018])
rgba=[1, 0, 0, 1],
)
self.mujoco_objects = OrderedDict([("cube", cube)])
# task includes arena, robot, and objects of interest
self.model = TableTopTask(
self.mujoco_arena,
self.mujoco_robot,
self.mujoco_objects,
initializer=self.placement_initializer,
)
self.model.place_objects() | [
"def",
"_load_model",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"_load_model",
"(",
")",
"self",
".",
"mujoco_robot",
".",
"set_base_xpos",
"(",
"[",
"0",
",",
"0",
",",
"0",
"]",
")",
"# load model for table top workspace",
"self",
".",
"mujoco_arena",
"=",
"TableArena",
"(",
"table_full_size",
"=",
"self",
".",
"table_full_size",
",",
"table_friction",
"=",
"self",
".",
"table_friction",
")",
"if",
"self",
".",
"use_indicator_object",
":",
"self",
".",
"mujoco_arena",
".",
"add_pos_indicator",
"(",
")",
"# The sawyer robot has a pedestal, we want to align it with the table",
"self",
".",
"mujoco_arena",
".",
"set_origin",
"(",
"[",
"0.16",
"+",
"self",
".",
"table_full_size",
"[",
"0",
"]",
"/",
"2",
",",
"0",
",",
"0",
"]",
")",
"# initialize objects of interest",
"cube",
"=",
"BoxObject",
"(",
"size_min",
"=",
"[",
"0.020",
",",
"0.020",
",",
"0.020",
"]",
",",
"# [0.015, 0.015, 0.015],",
"size_max",
"=",
"[",
"0.022",
",",
"0.022",
",",
"0.022",
"]",
",",
"# [0.018, 0.018, 0.018])",
"rgba",
"=",
"[",
"1",
",",
"0",
",",
"0",
",",
"1",
"]",
",",
")",
"self",
".",
"mujoco_objects",
"=",
"OrderedDict",
"(",
"[",
"(",
"\"cube\"",
",",
"cube",
")",
"]",
")",
"# task includes arena, robot, and objects of interest",
"self",
".",
"model",
"=",
"TableTopTask",
"(",
"self",
".",
"mujoco_arena",
",",
"self",
".",
"mujoco_robot",
",",
"self",
".",
"mujoco_objects",
",",
"initializer",
"=",
"self",
".",
"placement_initializer",
",",
")",
"self",
".",
"model",
".",
"place_objects",
"(",
")"
] | Loads an xml model, puts it in self.model | [
"Loads",
"an",
"xml",
"model",
"puts",
"it",
"in",
"self",
".",
"model"
] | python | train |
galactics/beyond | beyond/dates/eop.py | https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/dates/eop.py#L282-L293 | def register(cls, klass, name=DEFAULT_DBNAME):
"""Register an Eop Database
The only requirement of this database is that it should have ``__getitem__``
method accepting MJD as float.
"""
if name in cls._dbs:
msg = "'{}' is already registered for an Eop database. Skipping".format(name)
log.warning(msg)
else:
cls._dbs[name] = klass | [
"def",
"register",
"(",
"cls",
",",
"klass",
",",
"name",
"=",
"DEFAULT_DBNAME",
")",
":",
"if",
"name",
"in",
"cls",
".",
"_dbs",
":",
"msg",
"=",
"\"'{}' is already registered for an Eop database. Skipping\"",
".",
"format",
"(",
"name",
")",
"log",
".",
"warning",
"(",
"msg",
")",
"else",
":",
"cls",
".",
"_dbs",
"[",
"name",
"]",
"=",
"klass"
] | Register an Eop Database
The only requirement of this database is that it should have ``__getitem__``
method accepting MJD as float. | [
"Register",
"an",
"Eop",
"Database"
] | python | train |
globality-corp/microcosm-flask | microcosm_flask/conventions/discovery.py | https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/discovery.py#L81-L91 | def configure_discovery(graph):
"""
Build a singleton endpoint that provides a link to all search endpoints.
"""
ns = Namespace(
subject=graph.config.discovery_convention.name,
)
convention = DiscoveryConvention(graph)
convention.configure(ns, discover=tuple())
return ns.subject | [
"def",
"configure_discovery",
"(",
"graph",
")",
":",
"ns",
"=",
"Namespace",
"(",
"subject",
"=",
"graph",
".",
"config",
".",
"discovery_convention",
".",
"name",
",",
")",
"convention",
"=",
"DiscoveryConvention",
"(",
"graph",
")",
"convention",
".",
"configure",
"(",
"ns",
",",
"discover",
"=",
"tuple",
"(",
")",
")",
"return",
"ns",
".",
"subject"
] | Build a singleton endpoint that provides a link to all search endpoints. | [
"Build",
"a",
"singleton",
"endpoint",
"that",
"provides",
"a",
"link",
"to",
"all",
"search",
"endpoints",
"."
] | python | train |
yinkaisheng/Python-UIAutomation-for-Windows | uiautomation/uiautomation.py | https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L4739-L4749 | def RangeFromChild(self, child) -> TextRange:
"""
Call IUIAutomationTextPattern::RangeFromChild.
child: `Control` or its subclass.
Return `TextRange` or None, a text range enclosing a child element such as an image,
hyperlink, Microsoft Excel spreadsheet, or other embedded object.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-rangefromchild
"""
textRange = self.pattern.RangeFromChild(Control.Element)
if textRange:
return TextRange(textRange=textRange) | [
"def",
"RangeFromChild",
"(",
"self",
",",
"child",
")",
"->",
"TextRange",
":",
"textRange",
"=",
"self",
".",
"pattern",
".",
"RangeFromChild",
"(",
"Control",
".",
"Element",
")",
"if",
"textRange",
":",
"return",
"TextRange",
"(",
"textRange",
"=",
"textRange",
")"
] | Call IUIAutomationTextPattern::RangeFromChild.
child: `Control` or its subclass.
Return `TextRange` or None, a text range enclosing a child element such as an image,
hyperlink, Microsoft Excel spreadsheet, or other embedded object.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-rangefromchild | [
"Call",
"IUIAutomationTextPattern",
"::",
"RangeFromChild",
".",
"child",
":",
"Control",
"or",
"its",
"subclass",
".",
"Return",
"TextRange",
"or",
"None",
"a",
"text",
"range",
"enclosing",
"a",
"child",
"element",
"such",
"as",
"an",
"image",
"hyperlink",
"Microsoft",
"Excel",
"spreadsheet",
"or",
"other",
"embedded",
"object",
".",
"Refer",
"https",
":",
"//",
"docs",
".",
"microsoft",
".",
"com",
"/",
"en",
"-",
"us",
"/",
"windows",
"/",
"desktop",
"/",
"api",
"/",
"uiautomationclient",
"/",
"nf",
"-",
"uiautomationclient",
"-",
"iuiautomationtextpattern",
"-",
"rangefromchild"
] | python | valid |
TissueMAPS/TmDeploy | tmdeploy/config.py | https://github.com/TissueMAPS/TmDeploy/blob/f891b4ffb21431988bc4a063ae871da3bf284a45/tmdeploy/config.py#L630-L636 | def tm_group(self):
'''str: TissueMAPS system group (defaults to
:attr:`tm_user <tmdeploy.config.AnsibleHostVariableSection.tm_user>`)
'''
if self._tm_group is None:
self._tm_group = self.tm_user
return self._tm_group | [
"def",
"tm_group",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tm_group",
"is",
"None",
":",
"self",
".",
"_tm_group",
"=",
"self",
".",
"tm_user",
"return",
"self",
".",
"_tm_group"
] | str: TissueMAPS system group (defaults to
:attr:`tm_user <tmdeploy.config.AnsibleHostVariableSection.tm_user>`) | [
"str",
":",
"TissueMAPS",
"system",
"group",
"(",
"defaults",
"to",
":",
"attr",
":",
"tm_user",
"<tmdeploy",
".",
"config",
".",
"AnsibleHostVariableSection",
".",
"tm_user",
">",
")"
] | python | train |
frictionlessdata/tabulator-py | tabulator/helpers.py | https://github.com/frictionlessdata/tabulator-py/blob/06c25845a7139d919326388cc6335f33f909db8c/tabulator/helpers.py#L165-L173 | def extract_options(options, names):
"""Return options for names and remove it from given options in-place.
"""
result = {}
for name, value in copy(options).items():
if name in names:
result[name] = value
del options[name]
return result | [
"def",
"extract_options",
"(",
"options",
",",
"names",
")",
":",
"result",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"copy",
"(",
"options",
")",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"names",
":",
"result",
"[",
"name",
"]",
"=",
"value",
"del",
"options",
"[",
"name",
"]",
"return",
"result"
] | Return options for names and remove it from given options in-place. | [
"Return",
"options",
"for",
"names",
"and",
"remove",
"it",
"from",
"given",
"options",
"in",
"-",
"place",
"."
] | python | train |
Nekroze/librarian | librarian/library.py | https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/library.py#L11-L52 | def Where_filter_gen(*data):
"""
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
"""
where = []
def Fwhere(field, pattern):
"""Add where filter for the given field with the given pattern."""
where.append("WHERE {0} LIKE '{1}'".format(field, pattern))
def Fstring(field, string):
"""Add a where filter based on a string."""
Fwhere(field, "%{0}%".format(string if not isinstance(string, str)
else str(string)))
def Fdict(field, data):
"""Add where filters to search for dict keys and values."""
for key, value in data.items():
if value == '*':
Fstring(field, key)
else:
Fstring(field, "{0}:%{1}".format(key, value if not
isinstance(value, str)
else str(value)))
def Flist(field, data):
"""Add where filters to search for elements of a list."""
for elem in data:
Fstring(field, elem if not isinstance(elem, str) else
str(elem))
for field, data in data:
if isinstance(data, str):
Fstring(field, data)
elif isinstance(data, dict):
Fdict(field, data)
elif isinstance(data, list):
Flist(field, data)
return ' AND '.join(where) | [
"def",
"Where_filter_gen",
"(",
"*",
"data",
")",
":",
"where",
"=",
"[",
"]",
"def",
"Fwhere",
"(",
"field",
",",
"pattern",
")",
":",
"\"\"\"Add where filter for the given field with the given pattern.\"\"\"",
"where",
".",
"append",
"(",
"\"WHERE {0} LIKE '{1}'\"",
".",
"format",
"(",
"field",
",",
"pattern",
")",
")",
"def",
"Fstring",
"(",
"field",
",",
"string",
")",
":",
"\"\"\"Add a where filter based on a string.\"\"\"",
"Fwhere",
"(",
"field",
",",
"\"%{0}%\"",
".",
"format",
"(",
"string",
"if",
"not",
"isinstance",
"(",
"string",
",",
"str",
")",
"else",
"str",
"(",
"string",
")",
")",
")",
"def",
"Fdict",
"(",
"field",
",",
"data",
")",
":",
"\"\"\"Add where filters to search for dict keys and values.\"\"\"",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"'*'",
":",
"Fstring",
"(",
"field",
",",
"key",
")",
"else",
":",
"Fstring",
"(",
"field",
",",
"\"{0}:%{1}\"",
".",
"format",
"(",
"key",
",",
"value",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
"else",
"str",
"(",
"value",
")",
")",
")",
"def",
"Flist",
"(",
"field",
",",
"data",
")",
":",
"\"\"\"Add where filters to search for elements of a list.\"\"\"",
"for",
"elem",
"in",
"data",
":",
"Fstring",
"(",
"field",
",",
"elem",
"if",
"not",
"isinstance",
"(",
"elem",
",",
"str",
")",
"else",
"str",
"(",
"elem",
")",
")",
"for",
"field",
",",
"data",
"in",
"data",
":",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"Fstring",
"(",
"field",
",",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"Fdict",
"(",
"field",
",",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"Flist",
"(",
"field",
",",
"data",
")",
"return",
"' AND '",
".",
"join",
"(",
"where",
")"
] | Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples. | [
"Generate",
"an",
"sqlite",
"LIKE",
"filter",
"generator",
"based",
"on",
"the",
"given",
"data",
".",
"This",
"functions",
"arguments",
"should",
"be",
"a",
"N",
"length",
"series",
"of",
"field",
"and",
"data",
"tuples",
"."
] | python | train |
tBaxter/tango-photos | build/lib/photos/templatetags/gallery_tags.py | https://github.com/tBaxter/tango-photos/blob/aca52c6d6425cd6016468107a677479216285fc3/build/lib/photos/templatetags/gallery_tags.py#L20-L34 | def get_related_galleries(gallery, count=5):
"""
Gets latest related galleries from same section as originating gallery.
Count defaults to five but can be overridden.
Usage: {% get_related_galleries gallery <10> %}
"""
# just get the first cat. If they assigned to more than one, tough
try:
cat = gallery.sections.all()[0]
related = cat.gallery_categories.filter(published=True).exclude(id=gallery.id).order_by('-id')[:count]
except:
related = None
return {'related': related, 'MEDIA_URL': settings.MEDIA_URL} | [
"def",
"get_related_galleries",
"(",
"gallery",
",",
"count",
"=",
"5",
")",
":",
"# just get the first cat. If they assigned to more than one, tough",
"try",
":",
"cat",
"=",
"gallery",
".",
"sections",
".",
"all",
"(",
")",
"[",
"0",
"]",
"related",
"=",
"cat",
".",
"gallery_categories",
".",
"filter",
"(",
"published",
"=",
"True",
")",
".",
"exclude",
"(",
"id",
"=",
"gallery",
".",
"id",
")",
".",
"order_by",
"(",
"'-id'",
")",
"[",
":",
"count",
"]",
"except",
":",
"related",
"=",
"None",
"return",
"{",
"'related'",
":",
"related",
",",
"'MEDIA_URL'",
":",
"settings",
".",
"MEDIA_URL",
"}"
] | Gets latest related galleries from same section as originating gallery.
Count defaults to five but can be overridden.
Usage: {% get_related_galleries gallery <10> %} | [
"Gets",
"latest",
"related",
"galleries",
"from",
"same",
"section",
"as",
"originating",
"gallery",
"."
] | python | train |
glomex/gcdt | gcdt/yugen_core.py | https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/yugen_core.py#L139-L159 | def create_api_key(awsclient, api_name, api_key_name):
"""Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key
"""
_sleep()
client_api = awsclient.get_client('apigateway')
print('create api key: %s' % api_key_name)
response = client_api.create_api_key(
name=api_key_name,
description='Created for ' + api_name,
enabled=True
)
#print(json2table(response))
print('Add this api key \'%s\' to your api.conf' % response['id'])
return response['id'] | [
"def",
"create_api_key",
"(",
"awsclient",
",",
"api_name",
",",
"api_key_name",
")",
":",
"_sleep",
"(",
")",
"client_api",
"=",
"awsclient",
".",
"get_client",
"(",
"'apigateway'",
")",
"print",
"(",
"'create api key: %s'",
"%",
"api_key_name",
")",
"response",
"=",
"client_api",
".",
"create_api_key",
"(",
"name",
"=",
"api_key_name",
",",
"description",
"=",
"'Created for '",
"+",
"api_name",
",",
"enabled",
"=",
"True",
")",
"#print(json2table(response))",
"print",
"(",
"'Add this api key \\'%s\\' to your api.conf'",
"%",
"response",
"[",
"'id'",
"]",
")",
"return",
"response",
"[",
"'id'",
"]"
] | Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key | [
"Create",
"a",
"new",
"API",
"key",
"as",
"reference",
"for",
"api",
".",
"conf",
"."
] | python | train |
rcarmo/pngcanvas | pngcanvas.py | https://github.com/rcarmo/pngcanvas/blob/e2eaa0d5ba353005b3b658f6ee453c1956340670/pngcanvas.py#L164-L172 | def blend_rect(self, x0, y0, x1, y1, dx, dy, destination, alpha=0xff):
"""Blend a rectangle onto the image"""
x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1)
for x in range(x0, x1 + 1):
for y in range(y0, y1 + 1):
o = self._offset(x, y)
rgba = self.canvas[o:o + 4]
rgba[3] = alpha
destination.point(dx + x - x0, dy + y - y0, rgba) | [
"def",
"blend_rect",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
",",
"dx",
",",
"dy",
",",
"destination",
",",
"alpha",
"=",
"0xff",
")",
":",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
"=",
"self",
".",
"rect_helper",
"(",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
")",
"for",
"x",
"in",
"range",
"(",
"x0",
",",
"x1",
"+",
"1",
")",
":",
"for",
"y",
"in",
"range",
"(",
"y0",
",",
"y1",
"+",
"1",
")",
":",
"o",
"=",
"self",
".",
"_offset",
"(",
"x",
",",
"y",
")",
"rgba",
"=",
"self",
".",
"canvas",
"[",
"o",
":",
"o",
"+",
"4",
"]",
"rgba",
"[",
"3",
"]",
"=",
"alpha",
"destination",
".",
"point",
"(",
"dx",
"+",
"x",
"-",
"x0",
",",
"dy",
"+",
"y",
"-",
"y0",
",",
"rgba",
")"
] | Blend a rectangle onto the image | [
"Blend",
"a",
"rectangle",
"onto",
"the",
"image"
] | python | train |
mental32/spotify.py | spotify/models/player.py | https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/player.py#L124-L135 | async def set_volume(self, volume: int, *, device: Optional[SomeDevice] = None):
"""Set the volume for the user’s current playback device.
Parameters
----------
volume : int
The volume to set. Must be a value from 0 to 100 inclusive.
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target.
"""
await self._user.http.set_playback_volume(volume, device_id=str(device)) | [
"async",
"def",
"set_volume",
"(",
"self",
",",
"volume",
":",
"int",
",",
"*",
",",
"device",
":",
"Optional",
"[",
"SomeDevice",
"]",
"=",
"None",
")",
":",
"await",
"self",
".",
"_user",
".",
"http",
".",
"set_playback_volume",
"(",
"volume",
",",
"device_id",
"=",
"str",
"(",
"device",
")",
")"
] | Set the volume for the user’s current playback device.
Parameters
----------
volume : int
The volume to set. Must be a value from 0 to 100 inclusive.
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target. | [
"Set",
"the",
"volume",
"for",
"the",
"user’s",
"current",
"playback",
"device",
"."
] | python | test |
liamw9534/bt-manager | bt_manager/cod.py | https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/cod.py#L189-L201 | def minor_device_class(self):
"""
Return the minor device class property decoded e.g.,
Scanner, Printer, Loudspeaker, Camera, etc.
"""
minor_device = []
minor_lookup = BTCoD._MINOR_DEVICE_CLASS.get(self.cod &
BTCoD._MAJOR_DEVICE_MASK,
[])
for i in minor_lookup:
minor_value = self.cod & i.get('mask')
minor_device.append(i.get(minor_value, 'Unknown'))
return minor_device | [
"def",
"minor_device_class",
"(",
"self",
")",
":",
"minor_device",
"=",
"[",
"]",
"minor_lookup",
"=",
"BTCoD",
".",
"_MINOR_DEVICE_CLASS",
".",
"get",
"(",
"self",
".",
"cod",
"&",
"BTCoD",
".",
"_MAJOR_DEVICE_MASK",
",",
"[",
"]",
")",
"for",
"i",
"in",
"minor_lookup",
":",
"minor_value",
"=",
"self",
".",
"cod",
"&",
"i",
".",
"get",
"(",
"'mask'",
")",
"minor_device",
".",
"append",
"(",
"i",
".",
"get",
"(",
"minor_value",
",",
"'Unknown'",
")",
")",
"return",
"minor_device"
] | Return the minor device class property decoded e.g.,
Scanner, Printer, Loudspeaker, Camera, etc. | [
"Return",
"the",
"minor",
"device",
"class",
"property",
"decoded",
"e",
".",
"g",
".",
"Scanner",
"Printer",
"Loudspeaker",
"Camera",
"etc",
"."
] | python | train |
has2k1/plotnine | plotnine/guides/guides.py | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guides.py#L213-L223 | def validate(self, guide):
"""
Validate guide object
"""
if is_string(guide):
guide = Registry['guide_{}'.format(guide)]()
if not isinstance(guide, guide_class):
raise PlotnineError(
"Unknown guide: {}".format(guide))
return guide | [
"def",
"validate",
"(",
"self",
",",
"guide",
")",
":",
"if",
"is_string",
"(",
"guide",
")",
":",
"guide",
"=",
"Registry",
"[",
"'guide_{}'",
".",
"format",
"(",
"guide",
")",
"]",
"(",
")",
"if",
"not",
"isinstance",
"(",
"guide",
",",
"guide_class",
")",
":",
"raise",
"PlotnineError",
"(",
"\"Unknown guide: {}\"",
".",
"format",
"(",
"guide",
")",
")",
"return",
"guide"
] | Validate guide object | [
"Validate",
"guide",
"object"
] | python | train |
signalfx/signalfx-python | signalfx/ingest.py | https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/ingest.py#L199-L207 | def stop(self, msg='Thread stopped'):
"""Stop send thread and flush points for a safe exit."""
with self._lock:
if not self._thread_running:
return
self._thread_running = False
self._queue.put(_BaseSignalFxIngestClient._QUEUE_STOP)
self._send_thread.join()
_logger.debug(msg) | [
"def",
"stop",
"(",
"self",
",",
"msg",
"=",
"'Thread stopped'",
")",
":",
"with",
"self",
".",
"_lock",
":",
"if",
"not",
"self",
".",
"_thread_running",
":",
"return",
"self",
".",
"_thread_running",
"=",
"False",
"self",
".",
"_queue",
".",
"put",
"(",
"_BaseSignalFxIngestClient",
".",
"_QUEUE_STOP",
")",
"self",
".",
"_send_thread",
".",
"join",
"(",
")",
"_logger",
".",
"debug",
"(",
"msg",
")"
] | Stop send thread and flush points for a safe exit. | [
"Stop",
"send",
"thread",
"and",
"flush",
"points",
"for",
"a",
"safe",
"exit",
"."
] | python | train |
bachya/regenmaschine | regenmaschine/controller.py | https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/controller.py#L85-L97 | async def login(self, password):
"""Authenticate against the device (locally)."""
auth_resp = await self._client_request(
'post',
'{0}/auth/login'.format(self._host),
json={
'pwd': password,
'remember': 1
})
self._access_token = auth_resp['access_token']
self._access_token_expiration = datetime.now() + timedelta(
seconds=int(auth_resp['expires_in']) - 10) | [
"async",
"def",
"login",
"(",
"self",
",",
"password",
")",
":",
"auth_resp",
"=",
"await",
"self",
".",
"_client_request",
"(",
"'post'",
",",
"'{0}/auth/login'",
".",
"format",
"(",
"self",
".",
"_host",
")",
",",
"json",
"=",
"{",
"'pwd'",
":",
"password",
",",
"'remember'",
":",
"1",
"}",
")",
"self",
".",
"_access_token",
"=",
"auth_resp",
"[",
"'access_token'",
"]",
"self",
".",
"_access_token_expiration",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"auth_resp",
"[",
"'expires_in'",
"]",
")",
"-",
"10",
")"
] | Authenticate against the device (locally). | [
"Authenticate",
"against",
"the",
"device",
"(",
"locally",
")",
"."
] | python | train |
geopy/geopy | geopy/geocoders/mapbox.py | https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/mapbox.py#L70-L85 | def _parse_json(self, json, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
features = json['features']
if features == []:
return None
def parse_feature(feature):
location = feature['place_name']
place = feature['text']
longitude = feature['geometry']['coordinates'][0]
latitude = feature['geometry']['coordinates'][1]
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_feature(features[0])
else:
return [parse_feature(feature) for feature in features] | [
"def",
"_parse_json",
"(",
"self",
",",
"json",
",",
"exactly_one",
"=",
"True",
")",
":",
"features",
"=",
"json",
"[",
"'features'",
"]",
"if",
"features",
"==",
"[",
"]",
":",
"return",
"None",
"def",
"parse_feature",
"(",
"feature",
")",
":",
"location",
"=",
"feature",
"[",
"'place_name'",
"]",
"place",
"=",
"feature",
"[",
"'text'",
"]",
"longitude",
"=",
"feature",
"[",
"'geometry'",
"]",
"[",
"'coordinates'",
"]",
"[",
"0",
"]",
"latitude",
"=",
"feature",
"[",
"'geometry'",
"]",
"[",
"'coordinates'",
"]",
"[",
"1",
"]",
"return",
"Location",
"(",
"location",
",",
"(",
"latitude",
",",
"longitude",
")",
",",
"place",
")",
"if",
"exactly_one",
":",
"return",
"parse_feature",
"(",
"features",
"[",
"0",
"]",
")",
"else",
":",
"return",
"[",
"parse_feature",
"(",
"feature",
")",
"for",
"feature",
"in",
"features",
"]"
] | Returns location, (latitude, longitude) from json feed. | [
"Returns",
"location",
"(",
"latitude",
"longitude",
")",
"from",
"json",
"feed",
"."
] | python | train |
ciena/afkak | afkak/producer.py | https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L315-L378 | def _send_requests(self, parts_results, requests):
"""Send the requests
We've determined the partition for each message group in the batch, or
got errors for them.
"""
# We use these dictionaries to be able to combine all the messages
# destined to the same topic/partition into one request
# the messages & deferreds, both by topic+partition
reqsByTopicPart = defaultdict(list)
payloadsByTopicPart = defaultdict(list)
deferredsByTopicPart = defaultdict(list)
# We now have a list of (succeeded/failed, partition/None) tuples
# for the partition lookups we did on each message group, zipped with
# the requests
for (success, part_or_failure), req in zip(parts_results, requests):
if req.deferred.called:
# Submitter cancelled the request while we were waiting for
# the topic/partition, skip it
continue
if not success:
# We failed to get a partition for this request, errback to the
# caller with the failure. Maybe this should retry? However,
# since this failure is likely to affect an entire Topic, there
# should be no issues with ordering of messages within a
# partition of a topic getting out of order. Let the caller
# retry the particular request if they like, or they could
# cancel all their outstanding requests in
req.deferred.errback(part_or_failure)
continue
# Ok, we now have a partition for this request, we can add the
# request for this topic/partition to reqsByTopicPart, and the
# caller's deferred to deferredsByTopicPart
topicPart = TopicAndPartition(req.topic, part_or_failure)
reqsByTopicPart[topicPart].append(req)
deferredsByTopicPart[topicPart].append(req.deferred)
# Build list of payloads grouped by topic/partition
# That is, we bundle all the messages destined for a given
# topic/partition, even if they were submitted by different
# requests into a single 'payload', and then we submit all the
# payloads as a list to the client for sending to the various
# brokers. The finest granularity of success/failure is at the
# payload (topic/partition) level.
payloads = []
for (topic, partition), reqs in reqsByTopicPart.items():
msgSet = create_message_set(reqs, self.codec)
req = ProduceRequest(topic, partition, msgSet)
topicPart = TopicAndPartition(topic, partition)
payloads.append(req)
payloadsByTopicPart[topicPart] = req
# Make sure we have some payloads to send
if not payloads:
return
# send the request
d = self.client.send_produce_request(
payloads, acks=self.req_acks, timeout=self.ack_timeout,
fail_on_error=False)
self._req_attempts += 1
# add our handlers
d.addBoth(self._handle_send_response, payloadsByTopicPart,
deferredsByTopicPart)
return d | [
"def",
"_send_requests",
"(",
"self",
",",
"parts_results",
",",
"requests",
")",
":",
"# We use these dictionaries to be able to combine all the messages",
"# destined to the same topic/partition into one request",
"# the messages & deferreds, both by topic+partition",
"reqsByTopicPart",
"=",
"defaultdict",
"(",
"list",
")",
"payloadsByTopicPart",
"=",
"defaultdict",
"(",
"list",
")",
"deferredsByTopicPart",
"=",
"defaultdict",
"(",
"list",
")",
"# We now have a list of (succeeded/failed, partition/None) tuples",
"# for the partition lookups we did on each message group, zipped with",
"# the requests",
"for",
"(",
"success",
",",
"part_or_failure",
")",
",",
"req",
"in",
"zip",
"(",
"parts_results",
",",
"requests",
")",
":",
"if",
"req",
".",
"deferred",
".",
"called",
":",
"# Submitter cancelled the request while we were waiting for",
"# the topic/partition, skip it",
"continue",
"if",
"not",
"success",
":",
"# We failed to get a partition for this request, errback to the",
"# caller with the failure. Maybe this should retry? However,",
"# since this failure is likely to affect an entire Topic, there",
"# should be no issues with ordering of messages within a",
"# partition of a topic getting out of order. Let the caller",
"# retry the particular request if they like, or they could",
"# cancel all their outstanding requests in",
"req",
".",
"deferred",
".",
"errback",
"(",
"part_or_failure",
")",
"continue",
"# Ok, we now have a partition for this request, we can add the",
"# request for this topic/partition to reqsByTopicPart, and the",
"# caller's deferred to deferredsByTopicPart",
"topicPart",
"=",
"TopicAndPartition",
"(",
"req",
".",
"topic",
",",
"part_or_failure",
")",
"reqsByTopicPart",
"[",
"topicPart",
"]",
".",
"append",
"(",
"req",
")",
"deferredsByTopicPart",
"[",
"topicPart",
"]",
".",
"append",
"(",
"req",
".",
"deferred",
")",
"# Build list of payloads grouped by topic/partition",
"# That is, we bundle all the messages destined for a given",
"# topic/partition, even if they were submitted by different",
"# requests into a single 'payload', and then we submit all the",
"# payloads as a list to the client for sending to the various",
"# brokers. The finest granularity of success/failure is at the",
"# payload (topic/partition) level.",
"payloads",
"=",
"[",
"]",
"for",
"(",
"topic",
",",
"partition",
")",
",",
"reqs",
"in",
"reqsByTopicPart",
".",
"items",
"(",
")",
":",
"msgSet",
"=",
"create_message_set",
"(",
"reqs",
",",
"self",
".",
"codec",
")",
"req",
"=",
"ProduceRequest",
"(",
"topic",
",",
"partition",
",",
"msgSet",
")",
"topicPart",
"=",
"TopicAndPartition",
"(",
"topic",
",",
"partition",
")",
"payloads",
".",
"append",
"(",
"req",
")",
"payloadsByTopicPart",
"[",
"topicPart",
"]",
"=",
"req",
"# Make sure we have some payloads to send",
"if",
"not",
"payloads",
":",
"return",
"# send the request",
"d",
"=",
"self",
".",
"client",
".",
"send_produce_request",
"(",
"payloads",
",",
"acks",
"=",
"self",
".",
"req_acks",
",",
"timeout",
"=",
"self",
".",
"ack_timeout",
",",
"fail_on_error",
"=",
"False",
")",
"self",
".",
"_req_attempts",
"+=",
"1",
"# add our handlers",
"d",
".",
"addBoth",
"(",
"self",
".",
"_handle_send_response",
",",
"payloadsByTopicPart",
",",
"deferredsByTopicPart",
")",
"return",
"d"
] | Send the requests
We've determined the partition for each message group in the batch, or
got errors for them. | [
"Send",
"the",
"requests"
] | python | train |
inspirehep/refextract | refextract/references/tag.py | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1052-L1070 | def identify_ibids(line):
"""Find IBIDs within the line, record their position and length,
and replace them with underscores.
@param line: (string) the working reference line
@return: (tuple) containing 2 dictionaries and a string:
Dictionary: matched IBID text: (Key: position of IBID in
line; Value: matched IBID text)
String: working line with matched IBIDs removed
"""
ibid_match_txt = {}
# Record details of each matched ibid:
for m_ibid in re_ibid.finditer(line):
ibid_match_txt[m_ibid.start()] = m_ibid.group(0)
# Replace matched text in line with underscores:
line = line[0:m_ibid.start()] + \
"_" * len(m_ibid.group(0)) + \
line[m_ibid.end():]
return ibid_match_txt, line | [
"def",
"identify_ibids",
"(",
"line",
")",
":",
"ibid_match_txt",
"=",
"{",
"}",
"# Record details of each matched ibid:",
"for",
"m_ibid",
"in",
"re_ibid",
".",
"finditer",
"(",
"line",
")",
":",
"ibid_match_txt",
"[",
"m_ibid",
".",
"start",
"(",
")",
"]",
"=",
"m_ibid",
".",
"group",
"(",
"0",
")",
"# Replace matched text in line with underscores:",
"line",
"=",
"line",
"[",
"0",
":",
"m_ibid",
".",
"start",
"(",
")",
"]",
"+",
"\"_\"",
"*",
"len",
"(",
"m_ibid",
".",
"group",
"(",
"0",
")",
")",
"+",
"line",
"[",
"m_ibid",
".",
"end",
"(",
")",
":",
"]",
"return",
"ibid_match_txt",
",",
"line"
] | Find IBIDs within the line, record their position and length,
and replace them with underscores.
@param line: (string) the working reference line
@return: (tuple) containing 2 dictionaries and a string:
Dictionary: matched IBID text: (Key: position of IBID in
line; Value: matched IBID text)
String: working line with matched IBIDs removed | [
"Find",
"IBIDs",
"within",
"the",
"line",
"record",
"their",
"position",
"and",
"length",
"and",
"replace",
"them",
"with",
"underscores",
"."
] | python | train |
psss/did | did/plugins/sentry.py | https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/sentry.py#L75-L79 | def activities(self):
""" Return all activites (fetch only once) """
if self._activities is None:
self._activities = self._fetch_activities()
return self._activities | [
"def",
"activities",
"(",
"self",
")",
":",
"if",
"self",
".",
"_activities",
"is",
"None",
":",
"self",
".",
"_activities",
"=",
"self",
".",
"_fetch_activities",
"(",
")",
"return",
"self",
".",
"_activities"
] | Return all activites (fetch only once) | [
"Return",
"all",
"activites",
"(",
"fetch",
"only",
"once",
")"
] | python | train |
HPENetworking/PYHPEIMC | pyhpeimc/objects.py | https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/objects.py#L139-L144 | def getipmacarp(self):
"""
Function operates on the IMCDev object and updates the ipmacarp attribute
:return:
"""
self.ipmacarp = get_ip_mac_arp_list(self.auth, self.url, devid = self.devid) | [
"def",
"getipmacarp",
"(",
"self",
")",
":",
"self",
".",
"ipmacarp",
"=",
"get_ip_mac_arp_list",
"(",
"self",
".",
"auth",
",",
"self",
".",
"url",
",",
"devid",
"=",
"self",
".",
"devid",
")"
] | Function operates on the IMCDev object and updates the ipmacarp attribute
:return: | [
"Function",
"operates",
"on",
"the",
"IMCDev",
"object",
"and",
"updates",
"the",
"ipmacarp",
"attribute",
":",
"return",
":"
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.