repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
pandas-dev/pandas | pandas/core/computation/scope.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L22-L26 | def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target) | [
"def",
"_ensure_scope",
"(",
"level",
",",
"global_dict",
"=",
"None",
",",
"local_dict",
"=",
"None",
",",
"resolvers",
"=",
"(",
")",
",",
"target",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Scope",
"(",
"level",
"+",
"1",
",",
"global_dict",
"=",
"global_dict",
",",
"local_dict",
"=",
"local_dict",
",",
"resolvers",
"=",
"resolvers",
",",
"target",
"=",
"target",
")"
] | Ensure that we are grabbing the correct scope. | [
"Ensure",
"that",
"we",
"are",
"grabbing",
"the",
"correct",
"scope",
"."
] | python | train |
ebu/PlugIt | plugit_proxy/utils.py | https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/utils.py#L10-L18 | def create_secret(*args, **kwargs):
"""Return a secure key generated from the user and the object. As we load elements fron any class from user imput, this prevent the user to specify arbitrary class"""
to_sign = '-!'.join(args) + '$$'.join(kwargs.values())
key = settings.SECRET_FOR_SIGNS
hashed = hmac.new(key, to_sign, sha1)
return re.sub(r'[\W_]+', '', binascii.b2a_base64(hashed.digest())) | [
"def",
"create_secret",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"to_sign",
"=",
"'-!'",
".",
"join",
"(",
"args",
")",
"+",
"'$$'",
".",
"join",
"(",
"kwargs",
".",
"values",
"(",
")",
")",
"key",
"=",
"settings",
".",
"SECRET_FOR_SIGNS",
"hashed",
"=",
"hmac",
".",
"new",
"(",
"key",
",",
"to_sign",
",",
"sha1",
")",
"return",
"re",
".",
"sub",
"(",
"r'[\\W_]+'",
",",
"''",
",",
"binascii",
".",
"b2a_base64",
"(",
"hashed",
".",
"digest",
"(",
")",
")",
")"
] | Return a secure key generated from the user and the object. As we load elements fron any class from user imput, this prevent the user to specify arbitrary class | [
"Return",
"a",
"secure",
"key",
"generated",
"from",
"the",
"user",
"and",
"the",
"object",
".",
"As",
"we",
"load",
"elements",
"fron",
"any",
"class",
"from",
"user",
"imput",
"this",
"prevent",
"the",
"user",
"to",
"specify",
"arbitrary",
"class"
] | python | train |
praekeltfoundation/seaworthy | seaworthy/stream/matchers.py | https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/seaworthy/stream/matchers.py#L99-L106 | def args_str(self):
"""
Return an args string for the repr.
"""
matched = [str(m) for m in self._matchers[:self._position]]
unmatched = [str(m) for m in self._matchers[self._position:]]
return 'matched=[{}], unmatched=[{}]'.format(
', '.join(matched), ', '.join(unmatched)) | [
"def",
"args_str",
"(",
"self",
")",
":",
"matched",
"=",
"[",
"str",
"(",
"m",
")",
"for",
"m",
"in",
"self",
".",
"_matchers",
"[",
":",
"self",
".",
"_position",
"]",
"]",
"unmatched",
"=",
"[",
"str",
"(",
"m",
")",
"for",
"m",
"in",
"self",
".",
"_matchers",
"[",
"self",
".",
"_position",
":",
"]",
"]",
"return",
"'matched=[{}], unmatched=[{}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"matched",
")",
",",
"', '",
".",
"join",
"(",
"unmatched",
")",
")"
] | Return an args string for the repr. | [
"Return",
"an",
"args",
"string",
"for",
"the",
"repr",
"."
] | python | train |
dw/mitogen | mitogen/core.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L2953-L2962 | def _call(self, stream, func):
"""
Call `func(self)`, catching any exception that might occur, logging it,
and force-disconnecting the related `stream`.
"""
try:
func(self)
except Exception:
LOG.exception('%r crashed', stream)
stream.on_disconnect(self) | [
"def",
"_call",
"(",
"self",
",",
"stream",
",",
"func",
")",
":",
"try",
":",
"func",
"(",
"self",
")",
"except",
"Exception",
":",
"LOG",
".",
"exception",
"(",
"'%r crashed'",
",",
"stream",
")",
"stream",
".",
"on_disconnect",
"(",
"self",
")"
] | Call `func(self)`, catching any exception that might occur, logging it,
and force-disconnecting the related `stream`. | [
"Call",
"func",
"(",
"self",
")",
"catching",
"any",
"exception",
"that",
"might",
"occur",
"logging",
"it",
"and",
"force",
"-",
"disconnecting",
"the",
"related",
"stream",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/msazure.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L415-L692 | def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'azure',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
label = vm_.get('label', vm_['name'])
service_name = vm_.get('service_name', vm_['name'])
service_kwargs = {
'service_name': service_name,
'label': label,
'description': vm_.get('desc', vm_['name']),
}
loc_error = False
if 'location' in vm_:
if 'affinity_group' in vm_:
loc_error = True
else:
service_kwargs['location'] = vm_['location']
elif 'affinity_group' in vm_:
service_kwargs['affinity_group'] = vm_['affinity_group']
else:
loc_error = True
if loc_error:
raise SaltCloudSystemExit(
'Either a location or affinity group must be specified, but not both'
)
ssh_port = config.get_cloud_config_value('port', vm_, __opts__,
default=22, search_global=True)
ssh_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
name='SSH',
protocol='TCP',
port=ssh_port,
local_port=22,
)
network_config = azure.servicemanagement.ConfigurationSet()
network_config.input_endpoints.input_endpoints.append(ssh_endpoint)
network_config.configuration_set_type = 'NetworkConfiguration'
if 'win_username' in vm_:
system_config = azure.servicemanagement.WindowsConfigurationSet(
computer_name=vm_['name'],
admin_username=vm_['win_username'],
admin_password=vm_['win_password'],
)
smb_port = '445'
if 'smb_port' in vm_:
smb_port = vm_['smb_port']
smb_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
name='SMB',
protocol='TCP',
port=smb_port,
local_port=smb_port,
)
network_config.input_endpoints.input_endpoints.append(smb_endpoint)
# Domain and WinRM configuration not yet supported by Salt Cloud
system_config.domain_join = None
system_config.win_rm = None
else:
system_config = azure.servicemanagement.LinuxConfigurationSet(
host_name=vm_['name'],
user_name=vm_['ssh_username'],
user_password=vm_['ssh_password'],
disable_ssh_password_authentication=False,
)
# TODO: Might need to create a storage account
media_link = vm_['media_link']
# TODO: Probably better to use more than just the name in the media_link
media_link += '/{0}.vhd'.format(vm_['name'])
os_hd = azure.servicemanagement.OSVirtualHardDisk(vm_['image'], media_link)
vm_kwargs = {
'service_name': service_name,
'deployment_name': service_name,
'deployment_slot': vm_['slot'],
'label': label,
'role_name': vm_['name'],
'system_config': system_config,
'os_virtual_hard_disk': os_hd,
'role_size': vm_['size'],
'network_config': network_config,
}
if 'virtual_network_name' in vm_:
vm_kwargs['virtual_network_name'] = vm_['virtual_network_name']
if 'subnet_name' in vm_:
network_config.subnet_names.append(vm_['subnet_name'])
log.debug('vm_kwargs: %s', vm_kwargs)
event_kwargs = {'service_kwargs': service_kwargs.copy(),
'vm_kwargs': vm_kwargs.copy()}
del event_kwargs['vm_kwargs']['system_config']
del event_kwargs['vm_kwargs']['os_virtual_hard_disk']
del event_kwargs['vm_kwargs']['network_config']
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('vm_kwargs: %s', vm_kwargs)
# Azure lets you open winrm on a new VM
# Can open up specific ports in Azure; but not on Windows
try:
conn.create_hosted_service(**service_kwargs)
except AzureConflictHttpError:
log.debug('Cloud service already exists')
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in six.text_type(exc):
log.error(
'Error creating %s on Azure.\n\n'
'The hosted service name is invalid. The name can contain '
'only letters, numbers, and hyphens. The name must start with '
'a letter and must end with a letter or a number.',
vm_['name'],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
else:
log.error(
'Error creating %s on Azure\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
result = conn.create_virtual_machine_deployment(**vm_kwargs)
log.debug('Request ID for machine: %s', result.request_id)
_wait_for_async(conn, result.request_id)
except AzureConflictHttpError:
log.debug('Conflict error. The deployment may already exist, trying add_role')
# Deleting two useless keywords
del vm_kwargs['deployment_slot']
del vm_kwargs['label']
del vm_kwargs['virtual_network_name']
result = conn.add_role(**vm_kwargs)
_wait_for_async(conn, result.request_id)
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in six.text_type(exc):
log.error(
'Error creating %s on Azure.\n\n'
'The VM name is invalid. The name can contain '
'only letters, numbers, and hyphens. The name must start with '
'a letter and must end with a letter or a number.',
vm_['name'],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
else:
log.error(
'Error creating %s on Azure.\n\n'
'The Virtual Machine could not be created. If you '
'are using an already existing Cloud Service, '
'make sure you set up the `port` variable corresponding '
'to the SSH port exists and that the port number is not '
'already in use.\nThe following exception was thrown when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
def wait_for_hostname():
'''
Wait for the IP address to become available
'''
try:
conn.get_role(service_name, service_name, vm_['name'])
data = show_instance(vm_['name'], call='action')
if 'url' in data and data['url'] != six.text_type(''):
return data['url']
except AzureMissingResourceHttpError:
pass
time.sleep(1)
return False
hostname = salt.utils.cloud.wait_for_fun(
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
if not hostname:
log.error('Failed to get a value for the hostname.')
return False
vm_['ssh_host'] = hostname.replace('http://', '').replace('/', '')
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
# Attaching volumes
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
__utils__['cloud.fire_event'](
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
args=__utils__['cloud.filter_event']('attaching_volumes', vm_, ['volumes']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'service_name': service_name,
'deployment_name': vm_['name'],
'media_link': media_link,
'role_name': vm_['name'],
'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_)
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret | [
"def",
"create",
"(",
"vm_",
")",
":",
"try",
":",
"# Check for required profile parameters before sending any API calls.",
"if",
"vm_",
"[",
"'profile'",
"]",
"and",
"config",
".",
"is_profile_configured",
"(",
"__opts__",
",",
"__active_provider_name__",
"or",
"'azure'",
",",
"vm_",
"[",
"'profile'",
"]",
",",
"vm_",
"=",
"vm_",
")",
"is",
"False",
":",
"return",
"False",
"except",
"AttributeError",
":",
"pass",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'starting create'",
",",
"'salt/cloud/{0}/creating'",
".",
"format",
"(",
"vm_",
"[",
"'name'",
"]",
")",
",",
"args",
"=",
"__utils__",
"[",
"'cloud.filter_event'",
"]",
"(",
"'creating'",
",",
"vm_",
",",
"[",
"'name'",
",",
"'profile'",
",",
"'provider'",
",",
"'driver'",
"]",
")",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"log",
".",
"info",
"(",
"'Creating Cloud VM %s'",
",",
"vm_",
"[",
"'name'",
"]",
")",
"conn",
"=",
"get_conn",
"(",
")",
"label",
"=",
"vm_",
".",
"get",
"(",
"'label'",
",",
"vm_",
"[",
"'name'",
"]",
")",
"service_name",
"=",
"vm_",
".",
"get",
"(",
"'service_name'",
",",
"vm_",
"[",
"'name'",
"]",
")",
"service_kwargs",
"=",
"{",
"'service_name'",
":",
"service_name",
",",
"'label'",
":",
"label",
",",
"'description'",
":",
"vm_",
".",
"get",
"(",
"'desc'",
",",
"vm_",
"[",
"'name'",
"]",
")",
",",
"}",
"loc_error",
"=",
"False",
"if",
"'location'",
"in",
"vm_",
":",
"if",
"'affinity_group'",
"in",
"vm_",
":",
"loc_error",
"=",
"True",
"else",
":",
"service_kwargs",
"[",
"'location'",
"]",
"=",
"vm_",
"[",
"'location'",
"]",
"elif",
"'affinity_group'",
"in",
"vm_",
":",
"service_kwargs",
"[",
"'affinity_group'",
"]",
"=",
"vm_",
"[",
"'affinity_group'",
"]",
"else",
":",
"loc_error",
"=",
"True",
"if",
"loc_error",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'Either a location or affinity group must be specified, but not both'",
")",
"ssh_port",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'port'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"22",
",",
"search_global",
"=",
"True",
")",
"ssh_endpoint",
"=",
"azure",
".",
"servicemanagement",
".",
"ConfigurationSetInputEndpoint",
"(",
"name",
"=",
"'SSH'",
",",
"protocol",
"=",
"'TCP'",
",",
"port",
"=",
"ssh_port",
",",
"local_port",
"=",
"22",
",",
")",
"network_config",
"=",
"azure",
".",
"servicemanagement",
".",
"ConfigurationSet",
"(",
")",
"network_config",
".",
"input_endpoints",
".",
"input_endpoints",
".",
"append",
"(",
"ssh_endpoint",
")",
"network_config",
".",
"configuration_set_type",
"=",
"'NetworkConfiguration'",
"if",
"'win_username'",
"in",
"vm_",
":",
"system_config",
"=",
"azure",
".",
"servicemanagement",
".",
"WindowsConfigurationSet",
"(",
"computer_name",
"=",
"vm_",
"[",
"'name'",
"]",
",",
"admin_username",
"=",
"vm_",
"[",
"'win_username'",
"]",
",",
"admin_password",
"=",
"vm_",
"[",
"'win_password'",
"]",
",",
")",
"smb_port",
"=",
"'445'",
"if",
"'smb_port'",
"in",
"vm_",
":",
"smb_port",
"=",
"vm_",
"[",
"'smb_port'",
"]",
"smb_endpoint",
"=",
"azure",
".",
"servicemanagement",
".",
"ConfigurationSetInputEndpoint",
"(",
"name",
"=",
"'SMB'",
",",
"protocol",
"=",
"'TCP'",
",",
"port",
"=",
"smb_port",
",",
"local_port",
"=",
"smb_port",
",",
")",
"network_config",
".",
"input_endpoints",
".",
"input_endpoints",
".",
"append",
"(",
"smb_endpoint",
")",
"# Domain and WinRM configuration not yet supported by Salt Cloud",
"system_config",
".",
"domain_join",
"=",
"None",
"system_config",
".",
"win_rm",
"=",
"None",
"else",
":",
"system_config",
"=",
"azure",
".",
"servicemanagement",
".",
"LinuxConfigurationSet",
"(",
"host_name",
"=",
"vm_",
"[",
"'name'",
"]",
",",
"user_name",
"=",
"vm_",
"[",
"'ssh_username'",
"]",
",",
"user_password",
"=",
"vm_",
"[",
"'ssh_password'",
"]",
",",
"disable_ssh_password_authentication",
"=",
"False",
",",
")",
"# TODO: Might need to create a storage account",
"media_link",
"=",
"vm_",
"[",
"'media_link'",
"]",
"# TODO: Probably better to use more than just the name in the media_link",
"media_link",
"+=",
"'/{0}.vhd'",
".",
"format",
"(",
"vm_",
"[",
"'name'",
"]",
")",
"os_hd",
"=",
"azure",
".",
"servicemanagement",
".",
"OSVirtualHardDisk",
"(",
"vm_",
"[",
"'image'",
"]",
",",
"media_link",
")",
"vm_kwargs",
"=",
"{",
"'service_name'",
":",
"service_name",
",",
"'deployment_name'",
":",
"service_name",
",",
"'deployment_slot'",
":",
"vm_",
"[",
"'slot'",
"]",
",",
"'label'",
":",
"label",
",",
"'role_name'",
":",
"vm_",
"[",
"'name'",
"]",
",",
"'system_config'",
":",
"system_config",
",",
"'os_virtual_hard_disk'",
":",
"os_hd",
",",
"'role_size'",
":",
"vm_",
"[",
"'size'",
"]",
",",
"'network_config'",
":",
"network_config",
",",
"}",
"if",
"'virtual_network_name'",
"in",
"vm_",
":",
"vm_kwargs",
"[",
"'virtual_network_name'",
"]",
"=",
"vm_",
"[",
"'virtual_network_name'",
"]",
"if",
"'subnet_name'",
"in",
"vm_",
":",
"network_config",
".",
"subnet_names",
".",
"append",
"(",
"vm_",
"[",
"'subnet_name'",
"]",
")",
"log",
".",
"debug",
"(",
"'vm_kwargs: %s'",
",",
"vm_kwargs",
")",
"event_kwargs",
"=",
"{",
"'service_kwargs'",
":",
"service_kwargs",
".",
"copy",
"(",
")",
",",
"'vm_kwargs'",
":",
"vm_kwargs",
".",
"copy",
"(",
")",
"}",
"del",
"event_kwargs",
"[",
"'vm_kwargs'",
"]",
"[",
"'system_config'",
"]",
"del",
"event_kwargs",
"[",
"'vm_kwargs'",
"]",
"[",
"'os_virtual_hard_disk'",
"]",
"del",
"event_kwargs",
"[",
"'vm_kwargs'",
"]",
"[",
"'network_config'",
"]",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'requesting instance'",
",",
"'salt/cloud/{0}/requesting'",
".",
"format",
"(",
"vm_",
"[",
"'name'",
"]",
")",
",",
"args",
"=",
"__utils__",
"[",
"'cloud.filter_event'",
"]",
"(",
"'requesting'",
",",
"event_kwargs",
",",
"list",
"(",
"event_kwargs",
")",
")",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"log",
".",
"debug",
"(",
"'vm_kwargs: %s'",
",",
"vm_kwargs",
")",
"# Azure lets you open winrm on a new VM",
"# Can open up specific ports in Azure; but not on Windows",
"try",
":",
"conn",
".",
"create_hosted_service",
"(",
"*",
"*",
"service_kwargs",
")",
"except",
"AzureConflictHttpError",
":",
"log",
".",
"debug",
"(",
"'Cloud service already exists'",
")",
"except",
"Exception",
"as",
"exc",
":",
"error",
"=",
"'The hosted service name is invalid.'",
"if",
"error",
"in",
"six",
".",
"text_type",
"(",
"exc",
")",
":",
"log",
".",
"error",
"(",
"'Error creating %s on Azure.\\n\\n'",
"'The hosted service name is invalid. The name can contain '",
"'only letters, numbers, and hyphens. The name must start with '",
"'a letter and must end with a letter or a number.'",
",",
"vm_",
"[",
"'name'",
"]",
",",
"# Show the traceback if the debug logging level is enabled",
"exc_info_on_loglevel",
"=",
"logging",
".",
"DEBUG",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Error creating %s on Azure\\n\\n'",
"'The following exception was thrown when trying to '",
"'run the initial deployment: \\n%s'",
",",
"vm_",
"[",
"'name'",
"]",
",",
"exc",
",",
"# Show the traceback if the debug logging level is enabled",
"exc_info_on_loglevel",
"=",
"logging",
".",
"DEBUG",
")",
"return",
"False",
"try",
":",
"result",
"=",
"conn",
".",
"create_virtual_machine_deployment",
"(",
"*",
"*",
"vm_kwargs",
")",
"log",
".",
"debug",
"(",
"'Request ID for machine: %s'",
",",
"result",
".",
"request_id",
")",
"_wait_for_async",
"(",
"conn",
",",
"result",
".",
"request_id",
")",
"except",
"AzureConflictHttpError",
":",
"log",
".",
"debug",
"(",
"'Conflict error. The deployment may already exist, trying add_role'",
")",
"# Deleting two useless keywords",
"del",
"vm_kwargs",
"[",
"'deployment_slot'",
"]",
"del",
"vm_kwargs",
"[",
"'label'",
"]",
"del",
"vm_kwargs",
"[",
"'virtual_network_name'",
"]",
"result",
"=",
"conn",
".",
"add_role",
"(",
"*",
"*",
"vm_kwargs",
")",
"_wait_for_async",
"(",
"conn",
",",
"result",
".",
"request_id",
")",
"except",
"Exception",
"as",
"exc",
":",
"error",
"=",
"'The hosted service name is invalid.'",
"if",
"error",
"in",
"six",
".",
"text_type",
"(",
"exc",
")",
":",
"log",
".",
"error",
"(",
"'Error creating %s on Azure.\\n\\n'",
"'The VM name is invalid. The name can contain '",
"'only letters, numbers, and hyphens. The name must start with '",
"'a letter and must end with a letter or a number.'",
",",
"vm_",
"[",
"'name'",
"]",
",",
"# Show the traceback if the debug logging level is enabled",
"exc_info_on_loglevel",
"=",
"logging",
".",
"DEBUG",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Error creating %s on Azure.\\n\\n'",
"'The Virtual Machine could not be created. If you '",
"'are using an already existing Cloud Service, '",
"'make sure you set up the `port` variable corresponding '",
"'to the SSH port exists and that the port number is not '",
"'already in use.\\nThe following exception was thrown when trying to '",
"'run the initial deployment: \\n%s'",
",",
"vm_",
"[",
"'name'",
"]",
",",
"exc",
",",
"# Show the traceback if the debug logging level is enabled",
"exc_info_on_loglevel",
"=",
"logging",
".",
"DEBUG",
")",
"return",
"False",
"def",
"wait_for_hostname",
"(",
")",
":",
"'''\n Wait for the IP address to become available\n '''",
"try",
":",
"conn",
".",
"get_role",
"(",
"service_name",
",",
"service_name",
",",
"vm_",
"[",
"'name'",
"]",
")",
"data",
"=",
"show_instance",
"(",
"vm_",
"[",
"'name'",
"]",
",",
"call",
"=",
"'action'",
")",
"if",
"'url'",
"in",
"data",
"and",
"data",
"[",
"'url'",
"]",
"!=",
"six",
".",
"text_type",
"(",
"''",
")",
":",
"return",
"data",
"[",
"'url'",
"]",
"except",
"AzureMissingResourceHttpError",
":",
"pass",
"time",
".",
"sleep",
"(",
"1",
")",
"return",
"False",
"hostname",
"=",
"salt",
".",
"utils",
".",
"cloud",
".",
"wait_for_fun",
"(",
"wait_for_hostname",
",",
"timeout",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'wait_for_fun_timeout'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"15",
"*",
"60",
")",
",",
")",
"if",
"not",
"hostname",
":",
"log",
".",
"error",
"(",
"'Failed to get a value for the hostname.'",
")",
"return",
"False",
"vm_",
"[",
"'ssh_host'",
"]",
"=",
"hostname",
".",
"replace",
"(",
"'http://'",
",",
"''",
")",
".",
"replace",
"(",
"'/'",
",",
"''",
")",
"vm_",
"[",
"'password'",
"]",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'ssh_password'",
",",
"vm_",
",",
"__opts__",
")",
"ret",
"=",
"__utils__",
"[",
"'cloud.bootstrap'",
"]",
"(",
"vm_",
",",
"__opts__",
")",
"# Attaching volumes",
"volumes",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'volumes'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"True",
")",
"if",
"volumes",
":",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'attaching volumes'",
",",
"'salt/cloud/{0}/attaching_volumes'",
".",
"format",
"(",
"vm_",
"[",
"'name'",
"]",
")",
",",
"args",
"=",
"__utils__",
"[",
"'cloud.filter_event'",
"]",
"(",
"'attaching_volumes'",
",",
"vm_",
",",
"[",
"'volumes'",
"]",
")",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"log",
".",
"info",
"(",
"'Create and attach volumes to node %s'",
",",
"vm_",
"[",
"'name'",
"]",
")",
"created",
"=",
"create_attach_volumes",
"(",
"vm_",
"[",
"'name'",
"]",
",",
"{",
"'volumes'",
":",
"volumes",
",",
"'service_name'",
":",
"service_name",
",",
"'deployment_name'",
":",
"vm_",
"[",
"'name'",
"]",
",",
"'media_link'",
":",
"media_link",
",",
"'role_name'",
":",
"vm_",
"[",
"'name'",
"]",
",",
"'del_all_vols_on_destroy'",
":",
"vm_",
".",
"get",
"(",
"'set_del_all_vols_on_destroy'",
",",
"False",
")",
"}",
",",
"call",
"=",
"'action'",
")",
"ret",
"[",
"'Attached Volumes'",
"]",
"=",
"created",
"data",
"=",
"show_instance",
"(",
"vm_",
"[",
"'name'",
"]",
",",
"call",
"=",
"'action'",
")",
"log",
".",
"info",
"(",
"'Created Cloud VM \\'%s\\''",
",",
"vm_",
")",
"log",
".",
"debug",
"(",
"'\\'%s\\' VM creation details:\\n%s'",
",",
"vm_",
"[",
"'name'",
"]",
",",
"pprint",
".",
"pformat",
"(",
"data",
")",
")",
"ret",
".",
"update",
"(",
"data",
")",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'created instance'",
",",
"'salt/cloud/{0}/created'",
".",
"format",
"(",
"vm_",
"[",
"'name'",
"]",
")",
",",
"args",
"=",
"__utils__",
"[",
"'cloud.filter_event'",
"]",
"(",
"'created'",
",",
"vm_",
",",
"[",
"'name'",
",",
"'profile'",
",",
"'provider'",
",",
"'driver'",
"]",
")",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"return",
"ret"
] | Create a single VM from a data dict | [
"Create",
"a",
"single",
"VM",
"from",
"a",
"data",
"dict"
] | python | train |
urschrei/pyzotero | pyzotero/zotero.py | https://github.com/urschrei/pyzotero/blob/b378966b30146a952f7953c23202fb5a1ddf81d9/pyzotero/zotero.py#L1448-L1482 | def delete_item(self, payload, last_modified=None):
"""
Delete Items from a Zotero library
Accepts a single argument:
a dict containing item data
OR a list of dicts containing item data
"""
params = None
if isinstance(payload, list):
params = {"itemKey": ",".join([p["key"] for p in payload])}
if last_modified is not None:
modified = last_modified
else:
modified = payload[0]["version"]
url = self.endpoint + "/{t}/{u}/items".format(
t=self.library_type, u=self.library_id
)
else:
ident = payload["key"]
if last_modified is not None:
modified = last_modified
else:
modified = payload["version"]
url = self.endpoint + "/{t}/{u}/items/{c}".format(
t=self.library_type, u=self.library_id, c=ident
)
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
req = requests.delete(url=url, params=params, headers=headers)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return True | [
"def",
"delete_item",
"(",
"self",
",",
"payload",
",",
"last_modified",
"=",
"None",
")",
":",
"params",
"=",
"None",
"if",
"isinstance",
"(",
"payload",
",",
"list",
")",
":",
"params",
"=",
"{",
"\"itemKey\"",
":",
"\",\"",
".",
"join",
"(",
"[",
"p",
"[",
"\"key\"",
"]",
"for",
"p",
"in",
"payload",
"]",
")",
"}",
"if",
"last_modified",
"is",
"not",
"None",
":",
"modified",
"=",
"last_modified",
"else",
":",
"modified",
"=",
"payload",
"[",
"0",
"]",
"[",
"\"version\"",
"]",
"url",
"=",
"self",
".",
"endpoint",
"+",
"\"/{t}/{u}/items\"",
".",
"format",
"(",
"t",
"=",
"self",
".",
"library_type",
",",
"u",
"=",
"self",
".",
"library_id",
")",
"else",
":",
"ident",
"=",
"payload",
"[",
"\"key\"",
"]",
"if",
"last_modified",
"is",
"not",
"None",
":",
"modified",
"=",
"last_modified",
"else",
":",
"modified",
"=",
"payload",
"[",
"\"version\"",
"]",
"url",
"=",
"self",
".",
"endpoint",
"+",
"\"/{t}/{u}/items/{c}\"",
".",
"format",
"(",
"t",
"=",
"self",
".",
"library_type",
",",
"u",
"=",
"self",
".",
"library_id",
",",
"c",
"=",
"ident",
")",
"headers",
"=",
"{",
"\"If-Unmodified-Since-Version\"",
":",
"str",
"(",
"modified",
")",
"}",
"headers",
".",
"update",
"(",
"self",
".",
"default_headers",
"(",
")",
")",
"req",
"=",
"requests",
".",
"delete",
"(",
"url",
"=",
"url",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"headers",
")",
"self",
".",
"request",
"=",
"req",
"try",
":",
"req",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
":",
"error_handler",
"(",
"req",
")",
"return",
"True"
] | Delete Items from a Zotero library
Accepts a single argument:
a dict containing item data
OR a list of dicts containing item data | [
"Delete",
"Items",
"from",
"a",
"Zotero",
"library",
"Accepts",
"a",
"single",
"argument",
":",
"a",
"dict",
"containing",
"item",
"data",
"OR",
"a",
"list",
"of",
"dicts",
"containing",
"item",
"data"
] | python | valid |
tBaxter/tango-articles | build/lib/articles/signals.py | https://github.com/tBaxter/tango-articles/blob/93818dcca1b62042a4fc19af63474691b0fe931c/build/lib/articles/signals.py#L10-L52 | def auto_tweet(sender, instance, *args, **kwargs):
"""
Allows auto-tweeting newly created object to twitter
on accounts configured in settings.
You MUST create an app to allow oAuth authentication to work:
-- https://dev.twitter.com/apps/
You also must set the app to "Read and Write" access level,
and create an access token. Whew.
"""
if not twitter or getattr(settings, 'TWITTER_SETTINGS') is False:
#print 'WARNING: Twitter account not configured.'
return False
if not kwargs.get('created'):
return False
twitter_key = settings.TWITTER_SETTINGS
try:
api = twitter.Api(
consumer_key = twitter_key['consumer_key'],
consumer_secret = twitter_key['consumer_secret'],
access_token_key = twitter_key['access_token_key'],
access_token_secret = twitter_key['access_token_secret']
)
except Exception as error:
print("failed to authenticate: {}".format(error))
text = instance.text
if instance.link:
link = instance.link
else:
link = instance.get_absolute_url()
text = '{} {}'.format(text, link)
try:
api.PostUpdate(text)
except Exception as error:
print("Error posting to twitter: {}".format(error)) | [
"def",
"auto_tweet",
"(",
"sender",
",",
"instance",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"twitter",
"or",
"getattr",
"(",
"settings",
",",
"'TWITTER_SETTINGS'",
")",
"is",
"False",
":",
"#print 'WARNING: Twitter account not configured.'",
"return",
"False",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'created'",
")",
":",
"return",
"False",
"twitter_key",
"=",
"settings",
".",
"TWITTER_SETTINGS",
"try",
":",
"api",
"=",
"twitter",
".",
"Api",
"(",
"consumer_key",
"=",
"twitter_key",
"[",
"'consumer_key'",
"]",
",",
"consumer_secret",
"=",
"twitter_key",
"[",
"'consumer_secret'",
"]",
",",
"access_token_key",
"=",
"twitter_key",
"[",
"'access_token_key'",
"]",
",",
"access_token_secret",
"=",
"twitter_key",
"[",
"'access_token_secret'",
"]",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"\"failed to authenticate: {}\"",
".",
"format",
"(",
"error",
")",
")",
"text",
"=",
"instance",
".",
"text",
"if",
"instance",
".",
"link",
":",
"link",
"=",
"instance",
".",
"link",
"else",
":",
"link",
"=",
"instance",
".",
"get_absolute_url",
"(",
")",
"text",
"=",
"'{} {}'",
".",
"format",
"(",
"text",
",",
"link",
")",
"try",
":",
"api",
".",
"PostUpdate",
"(",
"text",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"\"Error posting to twitter: {}\"",
".",
"format",
"(",
"error",
")",
")"
] | Allows auto-tweeting newly created object to twitter
on accounts configured in settings.
You MUST create an app to allow oAuth authentication to work:
-- https://dev.twitter.com/apps/
You also must set the app to "Read and Write" access level,
and create an access token. Whew. | [
"Allows",
"auto",
"-",
"tweeting",
"newly",
"created",
"object",
"to",
"twitter",
"on",
"accounts",
"configured",
"in",
"settings",
"."
] | python | train |
delfick/harpoon | harpoon/actions.py | https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/actions.py#L159-L169 | def make(collector, image, **kwargs):
"""Just create an image"""
tag = kwargs.get("artifact", NotSpecified)
if tag is NotSpecified:
tag = collector.configuration["harpoon"].tag
if tag is not NotSpecified:
image.tag = tag
Builder().make_image(image, collector.configuration["images"])
print("Created image {0}".format(image.image_name)) | [
"def",
"make",
"(",
"collector",
",",
"image",
",",
"*",
"*",
"kwargs",
")",
":",
"tag",
"=",
"kwargs",
".",
"get",
"(",
"\"artifact\"",
",",
"NotSpecified",
")",
"if",
"tag",
"is",
"NotSpecified",
":",
"tag",
"=",
"collector",
".",
"configuration",
"[",
"\"harpoon\"",
"]",
".",
"tag",
"if",
"tag",
"is",
"not",
"NotSpecified",
":",
"image",
".",
"tag",
"=",
"tag",
"Builder",
"(",
")",
".",
"make_image",
"(",
"image",
",",
"collector",
".",
"configuration",
"[",
"\"images\"",
"]",
")",
"print",
"(",
"\"Created image {0}\"",
".",
"format",
"(",
"image",
".",
"image_name",
")",
")"
] | Just create an image | [
"Just",
"create",
"an",
"image"
] | python | train |
SeabornGames/RequestClient | seaborn/request_client/connection_basic.py | https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/connection_basic.py#L37-L51 | def no_history_check(func):
"""
Decorator function to setup a check to see if history has been turned off,
because if it has then the decorated function needs to throw an exception
:param func: function to decorate
:return: original results or exception
"""
def no_history_check_decorator(self, *args, **kwargs):
if ConnectionBasic.max_history is 0:
raise IndexError("ConnectionBasic.max_history is set to 0, "
"therefore this functionality is disabled")
return func(self, *args, **kwargs)
return no_history_check_decorator | [
"def",
"no_history_check",
"(",
"func",
")",
":",
"def",
"no_history_check_decorator",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ConnectionBasic",
".",
"max_history",
"is",
"0",
":",
"raise",
"IndexError",
"(",
"\"ConnectionBasic.max_history is set to 0, \"",
"\"therefore this functionality is disabled\"",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"no_history_check_decorator"
] | Decorator function to setup a check to see if history has been turned off,
because if it has then the decorated function needs to throw an exception
:param func: function to decorate
:return: original results or exception | [
"Decorator",
"function",
"to",
"setup",
"a",
"check",
"to",
"see",
"if",
"history",
"has",
"been",
"turned",
"off",
"because",
"if",
"it",
"has",
"then",
"the",
"decorated",
"function",
"needs",
"to",
"throw",
"an",
"exception",
":",
"param",
"func",
":",
"function",
"to",
"decorate",
":",
"return",
":",
"original",
"results",
"or",
"exception"
] | python | train |
gabstopper/smc-python | smc/elements/service.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/service.py#L138-L156 | def create(cls, name, protocol_number, protocol_agent=None, comment=None):
"""
Create the IP Service
:param str name: name of ip-service
:param int protocol_number: ip proto number for this service
:param str,ProtocolAgent protocol_agent: optional protocol agent for
this service
:param str comment: optional comment
:raises CreateElementFailed: failure creating element with reason
:return: instance with meta
:rtype: IPService
"""
json = {'name': name,
'protocol_number': protocol_number,
'protocol_agent_ref': element_resolver(protocol_agent) or None,
'comment': comment}
return ElementCreator(cls, json) | [
"def",
"create",
"(",
"cls",
",",
"name",
",",
"protocol_number",
",",
"protocol_agent",
"=",
"None",
",",
"comment",
"=",
"None",
")",
":",
"json",
"=",
"{",
"'name'",
":",
"name",
",",
"'protocol_number'",
":",
"protocol_number",
",",
"'protocol_agent_ref'",
":",
"element_resolver",
"(",
"protocol_agent",
")",
"or",
"None",
",",
"'comment'",
":",
"comment",
"}",
"return",
"ElementCreator",
"(",
"cls",
",",
"json",
")"
] | Create the IP Service
:param str name: name of ip-service
:param int protocol_number: ip proto number for this service
:param str,ProtocolAgent protocol_agent: optional protocol agent for
this service
:param str comment: optional comment
:raises CreateElementFailed: failure creating element with reason
:return: instance with meta
:rtype: IPService | [
"Create",
"the",
"IP",
"Service"
] | python | train |
saltstack/salt | salt/modules/lxc.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L4141-L4152 | def _get_md5(name, path):
'''
Get the MD5 checksum of a file from a container
'''
output = run_stdout(name, 'md5sum "{0}"'.format(path),
chroot_fallback=True,
ignore_retcode=True)
try:
return output.split()[0]
except IndexError:
# Destination file does not exist or could not be accessed
return None | [
"def",
"_get_md5",
"(",
"name",
",",
"path",
")",
":",
"output",
"=",
"run_stdout",
"(",
"name",
",",
"'md5sum \"{0}\"'",
".",
"format",
"(",
"path",
")",
",",
"chroot_fallback",
"=",
"True",
",",
"ignore_retcode",
"=",
"True",
")",
"try",
":",
"return",
"output",
".",
"split",
"(",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"# Destination file does not exist or could not be accessed",
"return",
"None"
] | Get the MD5 checksum of a file from a container | [
"Get",
"the",
"MD5",
"checksum",
"of",
"a",
"file",
"from",
"a",
"container"
] | python | train |
taskcluster/taskcluster-client.py | taskcluster/queue.py | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L693-L715 | def declareProvisioner(self, *args, **kwargs):
"""
Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs) | [
"def",
"declareProvisioner",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"declareProvisioner\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental`` | [
"Update",
"a",
"provisioner"
] | python | train |
raiden-network/raiden | raiden/network/proxies/token_network_registry.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/token_network_registry.py#L119-L131 | def add_token_without_limits(
self,
token_address: TokenAddress,
) -> Address:
"""
Register token of `token_address` with the token network.
This applies for versions prior to 0.13.0 of raiden-contracts,
since limits were hardcoded into the TokenNetwork contract.
"""
return self._add_token(
token_address=token_address,
additional_arguments=dict(),
) | [
"def",
"add_token_without_limits",
"(",
"self",
",",
"token_address",
":",
"TokenAddress",
",",
")",
"->",
"Address",
":",
"return",
"self",
".",
"_add_token",
"(",
"token_address",
"=",
"token_address",
",",
"additional_arguments",
"=",
"dict",
"(",
")",
",",
")"
] | Register token of `token_address` with the token network.
This applies for versions prior to 0.13.0 of raiden-contracts,
since limits were hardcoded into the TokenNetwork contract. | [
"Register",
"token",
"of",
"token_address",
"with",
"the",
"token",
"network",
".",
"This",
"applies",
"for",
"versions",
"prior",
"to",
"0",
".",
"13",
".",
"0",
"of",
"raiden",
"-",
"contracts",
"since",
"limits",
"were",
"hardcoded",
"into",
"the",
"TokenNetwork",
"contract",
"."
] | python | train |
saltstack/salt | salt/modules/dockermod.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockermod.py#L521-L556 | def _get_exec_driver():
'''
Get the method to be used in shell commands
'''
contextkey = 'docker.exec_driver'
if contextkey not in __context__:
from_config = __salt__['config.get'](contextkey, None)
# This if block can be removed once we make docker-exec a default
# option, as it is part of the logic in the commented block above.
if from_config is not None:
__context__[contextkey] = from_config
return from_config
# The execution driver was removed in Docker 1.13.1, docker-exec is now
# the default.
driver = info().get('ExecutionDriver', 'docker-exec')
if driver == 'docker-exec':
__context__[contextkey] = driver
elif driver.startswith('lxc-'):
__context__[contextkey] = 'lxc-attach'
elif driver.startswith('native-') and HAS_NSENTER:
__context__[contextkey] = 'nsenter'
elif not driver.strip() and HAS_NSENTER:
log.warning(
'ExecutionDriver from \'docker info\' is blank, falling '
'back to using \'nsenter\'. To squelch this warning, set '
'docker.exec_driver. See the Salt documentation for the '
'docker module for more information.'
)
__context__[contextkey] = 'nsenter'
else:
raise NotImplementedError(
'Unknown docker ExecutionDriver \'{0}\', or didn\'t find '
'command to attach to the container'.format(driver)
)
return __context__[contextkey] | [
"def",
"_get_exec_driver",
"(",
")",
":",
"contextkey",
"=",
"'docker.exec_driver'",
"if",
"contextkey",
"not",
"in",
"__context__",
":",
"from_config",
"=",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"contextkey",
",",
"None",
")",
"# This if block can be removed once we make docker-exec a default",
"# option, as it is part of the logic in the commented block above.",
"if",
"from_config",
"is",
"not",
"None",
":",
"__context__",
"[",
"contextkey",
"]",
"=",
"from_config",
"return",
"from_config",
"# The execution driver was removed in Docker 1.13.1, docker-exec is now",
"# the default.",
"driver",
"=",
"info",
"(",
")",
".",
"get",
"(",
"'ExecutionDriver'",
",",
"'docker-exec'",
")",
"if",
"driver",
"==",
"'docker-exec'",
":",
"__context__",
"[",
"contextkey",
"]",
"=",
"driver",
"elif",
"driver",
".",
"startswith",
"(",
"'lxc-'",
")",
":",
"__context__",
"[",
"contextkey",
"]",
"=",
"'lxc-attach'",
"elif",
"driver",
".",
"startswith",
"(",
"'native-'",
")",
"and",
"HAS_NSENTER",
":",
"__context__",
"[",
"contextkey",
"]",
"=",
"'nsenter'",
"elif",
"not",
"driver",
".",
"strip",
"(",
")",
"and",
"HAS_NSENTER",
":",
"log",
".",
"warning",
"(",
"'ExecutionDriver from \\'docker info\\' is blank, falling '",
"'back to using \\'nsenter\\'. To squelch this warning, set '",
"'docker.exec_driver. See the Salt documentation for the '",
"'docker module for more information.'",
")",
"__context__",
"[",
"contextkey",
"]",
"=",
"'nsenter'",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown docker ExecutionDriver \\'{0}\\', or didn\\'t find '",
"'command to attach to the container'",
".",
"format",
"(",
"driver",
")",
")",
"return",
"__context__",
"[",
"contextkey",
"]"
] | Get the method to be used in shell commands | [
"Get",
"the",
"method",
"to",
"be",
"used",
"in",
"shell",
"commands"
] | python | train |
gwastro/pycbc | pycbc/coordinates.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/coordinates.py#L84-L109 | def cartesian_to_spherical(x, y, z):
""" Maps cartesian coordinates (x,y,z) to spherical coordinates
(rho,phi,theta) where phi is in [0,2*pi] and theta is in [0,pi].
Parameters
----------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
z : {numpy.array, float}
Z-coordinate.
Returns
-------
rho : {numpy.array, float}
The radial amplitude.
phi : {numpy.array, float}
The azimuthal angle.
theta : {numpy.array, float}
The polar angle.
"""
rho = cartesian_to_spherical_rho(x, y, z)
phi = cartesian_to_spherical_azimuthal(x, y)
theta = cartesian_to_spherical_polar(x, y, z)
return rho, phi, theta | [
"def",
"cartesian_to_spherical",
"(",
"x",
",",
"y",
",",
"z",
")",
":",
"rho",
"=",
"cartesian_to_spherical_rho",
"(",
"x",
",",
"y",
",",
"z",
")",
"phi",
"=",
"cartesian_to_spherical_azimuthal",
"(",
"x",
",",
"y",
")",
"theta",
"=",
"cartesian_to_spherical_polar",
"(",
"x",
",",
"y",
",",
"z",
")",
"return",
"rho",
",",
"phi",
",",
"theta"
] | Maps cartesian coordinates (x,y,z) to spherical coordinates
(rho,phi,theta) where phi is in [0,2*pi] and theta is in [0,pi].
Parameters
----------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
z : {numpy.array, float}
Z-coordinate.
Returns
-------
rho : {numpy.array, float}
The radial amplitude.
phi : {numpy.array, float}
The azimuthal angle.
theta : {numpy.array, float}
The polar angle. | [
"Maps",
"cartesian",
"coordinates",
"(",
"x",
"y",
"z",
")",
"to",
"spherical",
"coordinates",
"(",
"rho",
"phi",
"theta",
")",
"where",
"phi",
"is",
"in",
"[",
"0",
"2",
"*",
"pi",
"]",
"and",
"theta",
"is",
"in",
"[",
"0",
"pi",
"]",
"."
] | python | train |
cloudmesh/cloudmesh-common | cloudmesh/common/ConfigDict.py | https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ConfigDict.py#L264-L285 | def load(self, filename):
"""
loads the configuration from the yaml filename
:param filename:
:type filename: string
:return:
"""
# print ("LOAD CONFIGDICT", filename)
self.data = BaseConfigDict(filename=Config.path_expand(filename))
try:
version = str(self.data["meta"]["version"])
if version not in self.versions:
Console.error("The yaml file version must be {}".format(
', '.join(self.versions)))
sys.exit(1)
except Exception as e:
Console.error(
"Your yaml file ~/.cloudmesh/cloudmesh.yaml is not up to date.",
traceflag=False)
Console.error(e.message, traceflag=False)
sys.exit(1) | [
"def",
"load",
"(",
"self",
",",
"filename",
")",
":",
"# print (\"LOAD CONFIGDICT\", filename)",
"self",
".",
"data",
"=",
"BaseConfigDict",
"(",
"filename",
"=",
"Config",
".",
"path_expand",
"(",
"filename",
")",
")",
"try",
":",
"version",
"=",
"str",
"(",
"self",
".",
"data",
"[",
"\"meta\"",
"]",
"[",
"\"version\"",
"]",
")",
"if",
"version",
"not",
"in",
"self",
".",
"versions",
":",
"Console",
".",
"error",
"(",
"\"The yaml file version must be {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"self",
".",
"versions",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"Exception",
"as",
"e",
":",
"Console",
".",
"error",
"(",
"\"Your yaml file ~/.cloudmesh/cloudmesh.yaml is not up to date.\"",
",",
"traceflag",
"=",
"False",
")",
"Console",
".",
"error",
"(",
"e",
".",
"message",
",",
"traceflag",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | loads the configuration from the yaml filename
:param filename:
:type filename: string
:return: | [
"loads",
"the",
"configuration",
"from",
"the",
"yaml",
"filename",
":",
"param",
"filename",
":",
":",
"type",
"filename",
":",
"string",
":",
"return",
":"
] | python | train |
nerdvegas/rez | src/rez/vendor/version/version.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/version.py#L861-L874 | def inverse(self):
"""Calculate the inverse of the range.
Returns:
New VersionRange object representing the inverse of this range, or
None if there is no inverse (ie, this range is the any range).
"""
if self.is_any():
return None
else:
bounds = self._inverse(self.bounds)
range = VersionRange(None)
range.bounds = bounds
return range | [
"def",
"inverse",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_any",
"(",
")",
":",
"return",
"None",
"else",
":",
"bounds",
"=",
"self",
".",
"_inverse",
"(",
"self",
".",
"bounds",
")",
"range",
"=",
"VersionRange",
"(",
"None",
")",
"range",
".",
"bounds",
"=",
"bounds",
"return",
"range"
] | Calculate the inverse of the range.
Returns:
New VersionRange object representing the inverse of this range, or
None if there is no inverse (ie, this range is the any range). | [
"Calculate",
"the",
"inverse",
"of",
"the",
"range",
"."
] | python | train |
odlgroup/odl | odl/tomo/geometry/parallel.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L947-L1013 | def det_axes(self, angles):
"""Return the detector axes tuple at ``angles``.
Parameters
----------
angles : `array-like` or sequence
Euler angles in radians describing the rotation of the detector.
The length of the provided argument (along the first axis in
case of an array) must be equal to the number of Euler angles
in this geometry.
Returns
-------
axes : `numpy.ndarray`
Unit vector(s) along which the detector is aligned.
If ``angles`` is a single pair (or triplet) of Euler angles,
the returned array has shape ``(2, 3)``, otherwise
``broadcast(*angles).shape + (2, 3)``.
Notes
-----
To get an array that enumerates the detector axes in the first
dimension, move the second-to-last axis to the first position:
axes = det_axes(angle)
axes_enumeration = np.moveaxis(deriv, -2, 0)
Examples
--------
Calling the method with a single set of angles produces a
``(2, 3)`` array of vertically stacked vectors:
>>> apart = odl.uniform_partition([0, 0], [np.pi, 2 * np.pi],
... (10, 20))
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> geom = Parallel3dEulerGeometry(apart, dpart)
>>> geom.det_axes([0, 0])
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> np.allclose(geom.det_axes([np.pi / 2, 0]), [[0, 1, 0],
... [0, 0, 1]])
True
The method is vectorized, i.e., it can be called with multiple
angle parameters at once. Each of the angle arrays can have
different shapes and will be broadcast against each other to
determine the final shape:
>>> # The first axis enumerates the angles
>>> np.allclose(geom.det_axes(([0, np.pi / 2], [0, 0])),
... [[[1, 0, 0],
... [0, 0, 1]],
... [[0, 1, 0],
... [0, 0, 1]]])
True
>>> # Pairs of Euler angles in a (4, 5) array each
>>> geom.det_axes((np.zeros((4, 5)), np.zeros((4, 5)))).shape
(4, 5, 2, 3)
>>> # Using broadcasting for "outer product" type result
>>> geom.det_axes((np.zeros((4, 1)), np.zeros((1, 5)))).shape
(4, 5, 2, 3)
"""
# Transpose to take dot along axis 1
axes = self.rotation_matrix(angles).dot(self.det_axes_init.T)
# `axes` has shape (a, 3, 2), need to roll the last dimensions
# to the second to last place
return np.rollaxis(axes, -1, -2) | [
"def",
"det_axes",
"(",
"self",
",",
"angles",
")",
":",
"# Transpose to take dot along axis 1",
"axes",
"=",
"self",
".",
"rotation_matrix",
"(",
"angles",
")",
".",
"dot",
"(",
"self",
".",
"det_axes_init",
".",
"T",
")",
"# `axes` has shape (a, 3, 2), need to roll the last dimensions",
"# to the second to last place",
"return",
"np",
".",
"rollaxis",
"(",
"axes",
",",
"-",
"1",
",",
"-",
"2",
")"
] | Return the detector axes tuple at ``angles``.
Parameters
----------
angles : `array-like` or sequence
Euler angles in radians describing the rotation of the detector.
The length of the provided argument (along the first axis in
case of an array) must be equal to the number of Euler angles
in this geometry.
Returns
-------
axes : `numpy.ndarray`
Unit vector(s) along which the detector is aligned.
If ``angles`` is a single pair (or triplet) of Euler angles,
the returned array has shape ``(2, 3)``, otherwise
``broadcast(*angles).shape + (2, 3)``.
Notes
-----
To get an array that enumerates the detector axes in the first
dimension, move the second-to-last axis to the first position:
axes = det_axes(angle)
axes_enumeration = np.moveaxis(deriv, -2, 0)
Examples
--------
Calling the method with a single set of angles produces a
``(2, 3)`` array of vertically stacked vectors:
>>> apart = odl.uniform_partition([0, 0], [np.pi, 2 * np.pi],
... (10, 20))
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> geom = Parallel3dEulerGeometry(apart, dpart)
>>> geom.det_axes([0, 0])
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> np.allclose(geom.det_axes([np.pi / 2, 0]), [[0, 1, 0],
... [0, 0, 1]])
True
The method is vectorized, i.e., it can be called with multiple
angle parameters at once. Each of the angle arrays can have
different shapes and will be broadcast against each other to
determine the final shape:
>>> # The first axis enumerates the angles
>>> np.allclose(geom.det_axes(([0, np.pi / 2], [0, 0])),
... [[[1, 0, 0],
... [0, 0, 1]],
... [[0, 1, 0],
... [0, 0, 1]]])
True
>>> # Pairs of Euler angles in a (4, 5) array each
>>> geom.det_axes((np.zeros((4, 5)), np.zeros((4, 5)))).shape
(4, 5, 2, 3)
>>> # Using broadcasting for "outer product" type result
>>> geom.det_axes((np.zeros((4, 1)), np.zeros((1, 5)))).shape
(4, 5, 2, 3) | [
"Return",
"the",
"detector",
"axes",
"tuple",
"at",
"angles",
"."
] | python | train |
peri-source/peri | peri/comp/objs.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/objs.py#L160-L163 | def _tile(self, n):
"""Get the update tile surrounding particle `n` """
pos = self._trans(self.pos[n])
return Tile(pos, pos).pad(self.support_pad) | [
"def",
"_tile",
"(",
"self",
",",
"n",
")",
":",
"pos",
"=",
"self",
".",
"_trans",
"(",
"self",
".",
"pos",
"[",
"n",
"]",
")",
"return",
"Tile",
"(",
"pos",
",",
"pos",
")",
".",
"pad",
"(",
"self",
".",
"support_pad",
")"
] | Get the update tile surrounding particle `n` | [
"Get",
"the",
"update",
"tile",
"surrounding",
"particle",
"n"
] | python | valid |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/__init__.py | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/__init__.py#L973-L1053 | def nexson_frag_write_newick(out,
edges,
nodes,
otu_group,
label_key,
leaf_labels,
root_id,
needs_quotes_pattern=NEWICK_NEEDING_QUOTING,
ingroup_id=None,
bracket_ingroup=False,
with_edge_lengths=True):
"""`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
"""
unlabeled_counter = 0
curr_node_id = root_id
assert curr_node_id
curr_edge = None
curr_sib_list = []
curr_stack = []
going_tipward = True
while True:
if going_tipward:
outgoing_edges = edges.get(curr_node_id)
if outgoing_edges is None:
curr_node = nodes[curr_node_id]
assert curr_node_id is not None
assert curr_node_id is not None
unlabeled_counter = _write_newick_leaf_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
leaf_labels,
unlabeled_counter,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
going_tipward = False
else:
te = [(i, e) for i, e in outgoing_edges.items()]
te.sort() # produce a consistent rotation... Necessary?
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[pre-ingroup-marker]')
out.write('(')
next_p = te.pop(0)
curr_stack.append((curr_edge, curr_node_id, curr_sib_list))
curr_edge, curr_sib_list = next_p[1], te
curr_node_id = curr_edge['@target']
if not going_tipward:
next_up_edge_id = None
while True:
if curr_sib_list:
out.write(',')
next_up_edge_id, next_up_edge = curr_sib_list.pop(0)
break
if curr_stack:
curr_edge, curr_node_id, curr_sib_list = curr_stack.pop(-1)
curr_node = nodes[curr_node_id]
out.write(')')
_write_newick_internal_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[post-ingroup-marker]')
else:
break
if next_up_edge_id is None:
break
curr_edge = next_up_edge
curr_node_id = curr_edge['@target']
going_tipward = True
out.write(';') | [
"def",
"nexson_frag_write_newick",
"(",
"out",
",",
"edges",
",",
"nodes",
",",
"otu_group",
",",
"label_key",
",",
"leaf_labels",
",",
"root_id",
",",
"needs_quotes_pattern",
"=",
"NEWICK_NEEDING_QUOTING",
",",
"ingroup_id",
"=",
"None",
",",
"bracket_ingroup",
"=",
"False",
",",
"with_edge_lengths",
"=",
"True",
")",
":",
"unlabeled_counter",
"=",
"0",
"curr_node_id",
"=",
"root_id",
"assert",
"curr_node_id",
"curr_edge",
"=",
"None",
"curr_sib_list",
"=",
"[",
"]",
"curr_stack",
"=",
"[",
"]",
"going_tipward",
"=",
"True",
"while",
"True",
":",
"if",
"going_tipward",
":",
"outgoing_edges",
"=",
"edges",
".",
"get",
"(",
"curr_node_id",
")",
"if",
"outgoing_edges",
"is",
"None",
":",
"curr_node",
"=",
"nodes",
"[",
"curr_node_id",
"]",
"assert",
"curr_node_id",
"is",
"not",
"None",
"assert",
"curr_node_id",
"is",
"not",
"None",
"unlabeled_counter",
"=",
"_write_newick_leaf_label",
"(",
"out",
",",
"curr_node_id",
",",
"curr_node",
",",
"otu_group",
",",
"label_key",
",",
"leaf_labels",
",",
"unlabeled_counter",
",",
"needs_quotes_pattern",
")",
"if",
"with_edge_lengths",
":",
"_write_newick_edge_len",
"(",
"out",
",",
"curr_edge",
")",
"going_tipward",
"=",
"False",
"else",
":",
"te",
"=",
"[",
"(",
"i",
",",
"e",
")",
"for",
"i",
",",
"e",
"in",
"outgoing_edges",
".",
"items",
"(",
")",
"]",
"te",
".",
"sort",
"(",
")",
"# produce a consistent rotation... Necessary?",
"if",
"bracket_ingroup",
"and",
"(",
"ingroup_id",
"==",
"curr_node_id",
")",
":",
"out",
".",
"write",
"(",
"'[pre-ingroup-marker]'",
")",
"out",
".",
"write",
"(",
"'('",
")",
"next_p",
"=",
"te",
".",
"pop",
"(",
"0",
")",
"curr_stack",
".",
"append",
"(",
"(",
"curr_edge",
",",
"curr_node_id",
",",
"curr_sib_list",
")",
")",
"curr_edge",
",",
"curr_sib_list",
"=",
"next_p",
"[",
"1",
"]",
",",
"te",
"curr_node_id",
"=",
"curr_edge",
"[",
"'@target'",
"]",
"if",
"not",
"going_tipward",
":",
"next_up_edge_id",
"=",
"None",
"while",
"True",
":",
"if",
"curr_sib_list",
":",
"out",
".",
"write",
"(",
"','",
")",
"next_up_edge_id",
",",
"next_up_edge",
"=",
"curr_sib_list",
".",
"pop",
"(",
"0",
")",
"break",
"if",
"curr_stack",
":",
"curr_edge",
",",
"curr_node_id",
",",
"curr_sib_list",
"=",
"curr_stack",
".",
"pop",
"(",
"-",
"1",
")",
"curr_node",
"=",
"nodes",
"[",
"curr_node_id",
"]",
"out",
".",
"write",
"(",
"')'",
")",
"_write_newick_internal_label",
"(",
"out",
",",
"curr_node_id",
",",
"curr_node",
",",
"otu_group",
",",
"label_key",
",",
"needs_quotes_pattern",
")",
"if",
"with_edge_lengths",
":",
"_write_newick_edge_len",
"(",
"out",
",",
"curr_edge",
")",
"if",
"bracket_ingroup",
"and",
"(",
"ingroup_id",
"==",
"curr_node_id",
")",
":",
"out",
".",
"write",
"(",
"'[post-ingroup-marker]'",
")",
"else",
":",
"break",
"if",
"next_up_edge_id",
"is",
"None",
":",
"break",
"curr_edge",
"=",
"next_up_edge",
"curr_node_id",
"=",
"curr_edge",
"[",
"'@target'",
"]",
"going_tipward",
"=",
"True",
"out",
".",
"write",
"(",
"';'",
")"
] | `label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list | [
"label_key",
"is",
"a",
"string",
"(",
"a",
"key",
"in",
"the",
"otu",
"object",
")",
"or",
"a",
"callable",
"that",
"takes",
"two",
"arguments",
":",
"the",
"node",
"and",
"the",
"otu",
"(",
"which",
"may",
"be",
"None",
"for",
"an",
"internal",
"node",
")",
"If",
"leaf_labels",
"is",
"not",
"None",
"it",
"shoulr",
"be",
"a",
"(",
"list",
"dict",
")",
"pair",
"which",
"will",
"be",
"filled",
".",
"The",
"list",
"will",
"hold",
"the",
"order",
"encountered",
"and",
"the",
"dict",
"will",
"map",
"name",
"to",
"index",
"in",
"the",
"list"
] | python | train |
alantygel/ckanext-semantictags | ckanext/semantictags/db.py | https://github.com/alantygel/ckanext-semantictags/blob/10bb31d29f34b2b5a6feae693961842f93007ce1/ckanext/semantictags/db.py#L252-L260 | def list_unique(cls):
'''Return all unique namespaces
:returns: a list of all predicates
:rtype: list of ckan.model.semantictag.Predicate objects
'''
query = meta.Session.query(Predicate).distinct(Predicate.namespace)
return query.all() | [
"def",
"list_unique",
"(",
"cls",
")",
":",
"query",
"=",
"meta",
".",
"Session",
".",
"query",
"(",
"Predicate",
")",
".",
"distinct",
"(",
"Predicate",
".",
"namespace",
")",
"return",
"query",
".",
"all",
"(",
")"
] | Return all unique namespaces
:returns: a list of all predicates
:rtype: list of ckan.model.semantictag.Predicate objects | [
"Return",
"all",
"unique",
"namespaces"
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/brownanrs/rs.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/rs.py#L248-L371 | def decode(self, r, nostrip=False, k=None, erasures_pos=None, only_erasures=False, return_string=True):
'''Given a received string or byte array or list r of values between
0 and gf2_charac, attempts to decode it. If it's a valid codeword, or
if there are no more than (n-k)/2 errors, the repaired message is returned.
A message always has k bytes, if a message contained less it is left
padded with null bytes. When decoded, these leading null bytes are
stripped, but that can cause problems if decoding binary data. When
nostrip is True, messages returned are always k bytes long. This is
useful to make sure no data is lost when decoding binary data.
Theoretically, we have R(x) = C(x) + E(x) + V(x), where R is the received message, C is the correct message without errors nor erasures, E are the errors and V the erasures. Thus the goal is to compute E and V from R, so that we can compute: C(x) = R(x) - E(x) - V(x), and then we have our original message! The main problem of decoding is to solve the so-called Key Equation, here we use Berlekamp-Massey.
When stated in the language of spectral estimation, consists of a Fourier transform (syndrome computer), followed by a spectral analysis (Berlekamp-Massey or Euclidian algorithm), followed by an inverse Fourier transform (Chien search).
(see Blahut, "Algebraic Codes for Data Transmission", 2003, chapter 7.6 Decoding in Time Domain).
'''
n = self.n
if not k: k = self.k
# If we were given a string, convert to a list (important to support fields above 2^8)
if isinstance(r, _str):
r = [ord(x) for x in r]
# Turn r into a polynomial
rp = Polynomial([GF2int(x) for x in r])
if erasures_pos:
# Convert string positions to coefficients positions for the algebra to work (see _find_erasures_locator(), ecc characters represent the first coefficients while the message is put last, so it's exactly the reverse of the string positions where the message is first and the ecc is last, thus it's just like if you read the message+ecc string in reverse)
erasures_pos = [len(r)-1-x for x in erasures_pos]
# Set erasures characters to null bytes
# Note that you can just leave the original characters as they are, you don't need to set erased characters to null bytes for the decoding to work, but note that it won't help either (ie, fake erasures, meaning characters that were detected as erasures but actually aren't, will still "consume" one ecc symbol, even if you don't set them to null byte, this is because the syndrome is limited to n-k and thus you can't decode above this bound without a clever trick).
# Example string containing a fake erasure: "hello sam" -> "ooooo sam" with erasures_pos = [0, 1, 2, 3, 4]. Here in fact the last erasure is fake because the original character also was "o" so if we detect "o" as an erasure, we will end up with one fake erasure. But setting it to null byte or not, it will still use up one ecc symbol, it will always be counted as a real erasure. If you're below the n-k bound, then the doceding will be ok. If you're above, then you can't do anything, the decoding won't work. Maybe todo: try to find a clever list decoding algorithm to account for fake erasures....
# Note: commented out so that the resulting omega (error evaluator polynomial) is the same as the erasure evaluator polynomial when decoding the same number of errors or erasures (ie, decoding 3 erasures only will give the same result as 3 errors only, with of course the errors/erasures on the same characters).
#for erasure in erasures_pos:
#rp[erasure] = GF2int(0)
# Compute the syndromes:
sz = self._syndromes(rp, k=k)
if sz.coefficients.count(GF2int(0)) == len(sz): # the code is already valid, there's nothing to do
# The last n-k bytes are parity
ret = r[:-(n-k)]
ecc = r[-(n-k):]
if not nostrip:
ret = self._list_lstrip(r[:-(n-k)], 0)
if return_string and self.gf2_charac < 256:
ret = self._list2str(ret)
ecc = self._list2str(ecc)
return ret, ecc
# Erasures locator polynomial computation
erasures_loc = None
erasures_eval = None
erasures_count = 0
if erasures_pos:
erasures_count = len(erasures_pos)
# Compute the erasure locator polynomial
erasures_loc = self._find_erasures_locator(erasures_pos)
# Compute the erasure evaluator polynomial
erasures_eval = self._find_error_evaluator(sz, erasures_loc, k=k)
if only_erasures:
sigma = erasures_loc
omega = erasures_eval
else:
# Find the error locator polynomial and error evaluator polynomial
# using the Berlekamp-Massey algorithm
# if erasures were supplied, BM will generate the errata (errors-and-erasures) locator and evaluator polynomials
sigma, omega = self._berlekamp_massey(sz, k=k, erasures_loc=erasures_loc, erasures_eval=erasures_eval, erasures_count=erasures_count)
omega = self._find_error_evaluator(sz, sigma, k=k) # we want to make sure that omega is correct (we know that sigma is always correct, but omega not really)
# Now use Chien's procedure to find the error locations
# j is an array of integers representing the positions of the errors, 0
# being the rightmost byte
# X is a corresponding array of GF(2^8) values where X_i = alpha^(j_i)
X, j = self._chien_search(sigma)
# Sanity check: Cannot guarantee correct decoding of more than n-k errata (Singleton Bound, n-k being the minimum distance), and we cannot even check if it's correct (the syndrome will always be all 0 if we try to decode above the bound), thus it's better to just return the input as-is.
if len(j) > n-k:
ret = r[:-(n-k)]
ecc = r[-(n-k):]
if not nostrip:
ret = self._list_lstrip(r[:-(n-k)], 0)
if return_string and self.gf2_charac < 256:
ret = self._list2str(ret)
ecc = self._list2str(ecc)
return ret, ecc
# And finally, find the error magnitudes with Forney's Formula
# Y is an array of GF(2^8) values corresponding to the error magnitude
# at the position given by the j array
Y = self._forney(omega, X)
# Put the error and locations together to form the error polynomial
# Note that an alternative would be to compute the error-spectrum polynomial E(x) which satisfies E(x)*Sigma(x) = 0 (mod x^n - 1) = Omega(x)(x^n - 1) -- see Blahut, Algebraic codes for data transmission
Elist = [GF2int(0)] * self.gf2_charac
if len(Y) >= len(j): # failsafe: if the number of erratas is higher than the number of coefficients in the magnitude polynomial, we failed!
for i in _range(self.gf2_charac): # FIXME? is this really necessary to go to self.gf2_charac? len(rp) wouldn't be just enough? (since the goal is anyway to substract E to rp)
if i in j:
Elist[i] = Y[j.index(i)]
E = Polynomial( Elist[::-1] ) # reverse the list because we used the coefficient degrees (j) instead of the error positions
else:
E = Polynomial()
# And we get our real codeword!
c = rp - E # Remember what we wrote above: R(x) = C(x) + E(x), so here to get back the original codeword C(x) = R(x) - E(x) ! (V(x) the erasures are here is included inside E(x))
if len(c) > len(r): c = rp # failsafe: in case the correction went totally wrong (we repaired padded null bytes instead of the message! thus we end up with a longer message than what we should have), then we just return the uncorrected message. Note: we compare the length of c with r on purpose, that's not an error: if we compare with rp, if the first few characters were erased (null bytes) in r, then in rp the Polynomial will automatically skip them, thus the length will always be smaller in that case.
# Split the polynomial into two parts: the corrected message and the corrected ecc
ret = c.coefficients[:-(n-k)]
ecc = c.coefficients[-(n-k):]
if nostrip:
# Polynomial objects don't store leading 0 coefficients, so we
# actually need to pad this to k bytes
ret = self._list_rjust(ret, k, 0)
if return_string and self.gf2_charac < 256: # automatically disable return_string if the field is above 255 (chr would fail, so it's up to the user to define the mapping)
# Form it back into a string
ret = self._list2str(ret)
ecc = self._list2str(ecc)
return ret, ecc | [
"def",
"decode",
"(",
"self",
",",
"r",
",",
"nostrip",
"=",
"False",
",",
"k",
"=",
"None",
",",
"erasures_pos",
"=",
"None",
",",
"only_erasures",
"=",
"False",
",",
"return_string",
"=",
"True",
")",
":",
"n",
"=",
"self",
".",
"n",
"if",
"not",
"k",
":",
"k",
"=",
"self",
".",
"k",
"# If we were given a string, convert to a list (important to support fields above 2^8)",
"if",
"isinstance",
"(",
"r",
",",
"_str",
")",
":",
"r",
"=",
"[",
"ord",
"(",
"x",
")",
"for",
"x",
"in",
"r",
"]",
"# Turn r into a polynomial",
"rp",
"=",
"Polynomial",
"(",
"[",
"GF2int",
"(",
"x",
")",
"for",
"x",
"in",
"r",
"]",
")",
"if",
"erasures_pos",
":",
"# Convert string positions to coefficients positions for the algebra to work (see _find_erasures_locator(), ecc characters represent the first coefficients while the message is put last, so it's exactly the reverse of the string positions where the message is first and the ecc is last, thus it's just like if you read the message+ecc string in reverse)",
"erasures_pos",
"=",
"[",
"len",
"(",
"r",
")",
"-",
"1",
"-",
"x",
"for",
"x",
"in",
"erasures_pos",
"]",
"# Set erasures characters to null bytes",
"# Note that you can just leave the original characters as they are, you don't need to set erased characters to null bytes for the decoding to work, but note that it won't help either (ie, fake erasures, meaning characters that were detected as erasures but actually aren't, will still \"consume\" one ecc symbol, even if you don't set them to null byte, this is because the syndrome is limited to n-k and thus you can't decode above this bound without a clever trick).",
"# Example string containing a fake erasure: \"hello sam\" -> \"ooooo sam\" with erasures_pos = [0, 1, 2, 3, 4]. Here in fact the last erasure is fake because the original character also was \"o\" so if we detect \"o\" as an erasure, we will end up with one fake erasure. But setting it to null byte or not, it will still use up one ecc symbol, it will always be counted as a real erasure. If you're below the n-k bound, then the doceding will be ok. If you're above, then you can't do anything, the decoding won't work. Maybe todo: try to find a clever list decoding algorithm to account for fake erasures....",
"# Note: commented out so that the resulting omega (error evaluator polynomial) is the same as the erasure evaluator polynomial when decoding the same number of errors or erasures (ie, decoding 3 erasures only will give the same result as 3 errors only, with of course the errors/erasures on the same characters).",
"#for erasure in erasures_pos:",
"#rp[erasure] = GF2int(0)",
"# Compute the syndromes:",
"sz",
"=",
"self",
".",
"_syndromes",
"(",
"rp",
",",
"k",
"=",
"k",
")",
"if",
"sz",
".",
"coefficients",
".",
"count",
"(",
"GF2int",
"(",
"0",
")",
")",
"==",
"len",
"(",
"sz",
")",
":",
"# the code is already valid, there's nothing to do",
"# The last n-k bytes are parity",
"ret",
"=",
"r",
"[",
":",
"-",
"(",
"n",
"-",
"k",
")",
"]",
"ecc",
"=",
"r",
"[",
"-",
"(",
"n",
"-",
"k",
")",
":",
"]",
"if",
"not",
"nostrip",
":",
"ret",
"=",
"self",
".",
"_list_lstrip",
"(",
"r",
"[",
":",
"-",
"(",
"n",
"-",
"k",
")",
"]",
",",
"0",
")",
"if",
"return_string",
"and",
"self",
".",
"gf2_charac",
"<",
"256",
":",
"ret",
"=",
"self",
".",
"_list2str",
"(",
"ret",
")",
"ecc",
"=",
"self",
".",
"_list2str",
"(",
"ecc",
")",
"return",
"ret",
",",
"ecc",
"# Erasures locator polynomial computation",
"erasures_loc",
"=",
"None",
"erasures_eval",
"=",
"None",
"erasures_count",
"=",
"0",
"if",
"erasures_pos",
":",
"erasures_count",
"=",
"len",
"(",
"erasures_pos",
")",
"# Compute the erasure locator polynomial",
"erasures_loc",
"=",
"self",
".",
"_find_erasures_locator",
"(",
"erasures_pos",
")",
"# Compute the erasure evaluator polynomial",
"erasures_eval",
"=",
"self",
".",
"_find_error_evaluator",
"(",
"sz",
",",
"erasures_loc",
",",
"k",
"=",
"k",
")",
"if",
"only_erasures",
":",
"sigma",
"=",
"erasures_loc",
"omega",
"=",
"erasures_eval",
"else",
":",
"# Find the error locator polynomial and error evaluator polynomial",
"# using the Berlekamp-Massey algorithm",
"# if erasures were supplied, BM will generate the errata (errors-and-erasures) locator and evaluator polynomials",
"sigma",
",",
"omega",
"=",
"self",
".",
"_berlekamp_massey",
"(",
"sz",
",",
"k",
"=",
"k",
",",
"erasures_loc",
"=",
"erasures_loc",
",",
"erasures_eval",
"=",
"erasures_eval",
",",
"erasures_count",
"=",
"erasures_count",
")",
"omega",
"=",
"self",
".",
"_find_error_evaluator",
"(",
"sz",
",",
"sigma",
",",
"k",
"=",
"k",
")",
"# we want to make sure that omega is correct (we know that sigma is always correct, but omega not really)",
"# Now use Chien's procedure to find the error locations",
"# j is an array of integers representing the positions of the errors, 0",
"# being the rightmost byte",
"# X is a corresponding array of GF(2^8) values where X_i = alpha^(j_i)",
"X",
",",
"j",
"=",
"self",
".",
"_chien_search",
"(",
"sigma",
")",
"# Sanity check: Cannot guarantee correct decoding of more than n-k errata (Singleton Bound, n-k being the minimum distance), and we cannot even check if it's correct (the syndrome will always be all 0 if we try to decode above the bound), thus it's better to just return the input as-is.",
"if",
"len",
"(",
"j",
")",
">",
"n",
"-",
"k",
":",
"ret",
"=",
"r",
"[",
":",
"-",
"(",
"n",
"-",
"k",
")",
"]",
"ecc",
"=",
"r",
"[",
"-",
"(",
"n",
"-",
"k",
")",
":",
"]",
"if",
"not",
"nostrip",
":",
"ret",
"=",
"self",
".",
"_list_lstrip",
"(",
"r",
"[",
":",
"-",
"(",
"n",
"-",
"k",
")",
"]",
",",
"0",
")",
"if",
"return_string",
"and",
"self",
".",
"gf2_charac",
"<",
"256",
":",
"ret",
"=",
"self",
".",
"_list2str",
"(",
"ret",
")",
"ecc",
"=",
"self",
".",
"_list2str",
"(",
"ecc",
")",
"return",
"ret",
",",
"ecc",
"# And finally, find the error magnitudes with Forney's Formula",
"# Y is an array of GF(2^8) values corresponding to the error magnitude",
"# at the position given by the j array",
"Y",
"=",
"self",
".",
"_forney",
"(",
"omega",
",",
"X",
")",
"# Put the error and locations together to form the error polynomial",
"# Note that an alternative would be to compute the error-spectrum polynomial E(x) which satisfies E(x)*Sigma(x) = 0 (mod x^n - 1) = Omega(x)(x^n - 1) -- see Blahut, Algebraic codes for data transmission",
"Elist",
"=",
"[",
"GF2int",
"(",
"0",
")",
"]",
"*",
"self",
".",
"gf2_charac",
"if",
"len",
"(",
"Y",
")",
">=",
"len",
"(",
"j",
")",
":",
"# failsafe: if the number of erratas is higher than the number of coefficients in the magnitude polynomial, we failed!",
"for",
"i",
"in",
"_range",
"(",
"self",
".",
"gf2_charac",
")",
":",
"# FIXME? is this really necessary to go to self.gf2_charac? len(rp) wouldn't be just enough? (since the goal is anyway to substract E to rp)",
"if",
"i",
"in",
"j",
":",
"Elist",
"[",
"i",
"]",
"=",
"Y",
"[",
"j",
".",
"index",
"(",
"i",
")",
"]",
"E",
"=",
"Polynomial",
"(",
"Elist",
"[",
":",
":",
"-",
"1",
"]",
")",
"# reverse the list because we used the coefficient degrees (j) instead of the error positions",
"else",
":",
"E",
"=",
"Polynomial",
"(",
")",
"# And we get our real codeword!",
"c",
"=",
"rp",
"-",
"E",
"# Remember what we wrote above: R(x) = C(x) + E(x), so here to get back the original codeword C(x) = R(x) - E(x) ! (V(x) the erasures are here is included inside E(x))",
"if",
"len",
"(",
"c",
")",
">",
"len",
"(",
"r",
")",
":",
"c",
"=",
"rp",
"# failsafe: in case the correction went totally wrong (we repaired padded null bytes instead of the message! thus we end up with a longer message than what we should have), then we just return the uncorrected message. Note: we compare the length of c with r on purpose, that's not an error: if we compare with rp, if the first few characters were erased (null bytes) in r, then in rp the Polynomial will automatically skip them, thus the length will always be smaller in that case.",
"# Split the polynomial into two parts: the corrected message and the corrected ecc",
"ret",
"=",
"c",
".",
"coefficients",
"[",
":",
"-",
"(",
"n",
"-",
"k",
")",
"]",
"ecc",
"=",
"c",
".",
"coefficients",
"[",
"-",
"(",
"n",
"-",
"k",
")",
":",
"]",
"if",
"nostrip",
":",
"# Polynomial objects don't store leading 0 coefficients, so we",
"# actually need to pad this to k bytes",
"ret",
"=",
"self",
".",
"_list_rjust",
"(",
"ret",
",",
"k",
",",
"0",
")",
"if",
"return_string",
"and",
"self",
".",
"gf2_charac",
"<",
"256",
":",
"# automatically disable return_string if the field is above 255 (chr would fail, so it's up to the user to define the mapping)",
"# Form it back into a string ",
"ret",
"=",
"self",
".",
"_list2str",
"(",
"ret",
")",
"ecc",
"=",
"self",
".",
"_list2str",
"(",
"ecc",
")",
"return",
"ret",
",",
"ecc"
] | Given a received string or byte array or list r of values between
0 and gf2_charac, attempts to decode it. If it's a valid codeword, or
if there are no more than (n-k)/2 errors, the repaired message is returned.
A message always has k bytes, if a message contained less it is left
padded with null bytes. When decoded, these leading null bytes are
stripped, but that can cause problems if decoding binary data. When
nostrip is True, messages returned are always k bytes long. This is
useful to make sure no data is lost when decoding binary data.
Theoretically, we have R(x) = C(x) + E(x) + V(x), where R is the received message, C is the correct message without errors nor erasures, E are the errors and V the erasures. Thus the goal is to compute E and V from R, so that we can compute: C(x) = R(x) - E(x) - V(x), and then we have our original message! The main problem of decoding is to solve the so-called Key Equation, here we use Berlekamp-Massey.
When stated in the language of spectral estimation, consists of a Fourier transform (syndrome computer), followed by a spectral analysis (Berlekamp-Massey or Euclidian algorithm), followed by an inverse Fourier transform (Chien search).
(see Blahut, "Algebraic Codes for Data Transmission", 2003, chapter 7.6 Decoding in Time Domain). | [
"Given",
"a",
"received",
"string",
"or",
"byte",
"array",
"or",
"list",
"r",
"of",
"values",
"between",
"0",
"and",
"gf2_charac",
"attempts",
"to",
"decode",
"it",
".",
"If",
"it",
"s",
"a",
"valid",
"codeword",
"or",
"if",
"there",
"are",
"no",
"more",
"than",
"(",
"n",
"-",
"k",
")",
"/",
"2",
"errors",
"the",
"repaired",
"message",
"is",
"returned",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py#L53-L65 | def show_firmware_version_output_show_firmware_version_os_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
os_version = ET.SubElement(show_firmware_version, "os-version")
os_version.text = kwargs.pop('os_version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"show_firmware_version_output_show_firmware_version_os_version",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"show_firmware_version",
"=",
"ET",
".",
"Element",
"(",
"\"show_firmware_version\"",
")",
"config",
"=",
"show_firmware_version",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"show_firmware_version",
",",
"\"output\"",
")",
"show_firmware_version",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"show-firmware-version\"",
")",
"os_version",
"=",
"ET",
".",
"SubElement",
"(",
"show_firmware_version",
",",
"\"os-version\"",
")",
"os_version",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'os_version'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
senaite/senaite.core | bika/lims/browser/analyses/view.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L296-L320 | def is_result_edition_allowed(self, analysis_brain):
"""Checks if the edition of the result field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
"""
# Always check general edition first
if not self.is_analysis_edition_allowed(analysis_brain):
return False
# Get the ananylsis object
obj = api.get_object(analysis_brain)
if not obj.getDetectionLimitOperand():
# This is a regular result (not a detection limit)
return True
# Detection limit selector is enabled in the Analysis Service
if obj.getDetectionLimitSelector():
# Manual detection limit entry is *not* allowed
if not obj.getAllowManualDetectionLimit():
return False
return True | [
"def",
"is_result_edition_allowed",
"(",
"self",
",",
"analysis_brain",
")",
":",
"# Always check general edition first",
"if",
"not",
"self",
".",
"is_analysis_edition_allowed",
"(",
"analysis_brain",
")",
":",
"return",
"False",
"# Get the ananylsis object",
"obj",
"=",
"api",
".",
"get_object",
"(",
"analysis_brain",
")",
"if",
"not",
"obj",
".",
"getDetectionLimitOperand",
"(",
")",
":",
"# This is a regular result (not a detection limit)",
"return",
"True",
"# Detection limit selector is enabled in the Analysis Service",
"if",
"obj",
".",
"getDetectionLimitSelector",
"(",
")",
":",
"# Manual detection limit entry is *not* allowed",
"if",
"not",
"obj",
".",
"getAllowManualDetectionLimit",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | Checks if the edition of the result field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False | [
"Checks",
"if",
"the",
"edition",
"of",
"the",
"result",
"field",
"is",
"allowed"
] | python | train |
GoogleCloudPlatform/appengine-pipelines | python/src/pipeline/pipeline.py | https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1989-L2359 | def evaluate(self, pipeline_key, purpose=None, attempt=0):
"""Evaluates the given Pipeline and enqueues sub-stages for execution.
Args:
pipeline_key: The db.Key or stringified key of the _PipelineRecord to run.
purpose: Why evaluate was called ('start', 'finalize', or 'abort').
attempt: The attempt number that should be tried.
"""
After._thread_init()
InOrder._thread_init()
InOrder._local._activated = False
if not isinstance(pipeline_key, db.Key):
pipeline_key = db.Key(pipeline_key)
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.error('Pipeline ID "%s" does not exist.', pipeline_key.name())
return
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
# If we're attempting to abort an already aborted pipeline,
# we silently advance. #50
if (pipeline_record.status == _PipelineRecord.ABORTED and
purpose == _BarrierRecord.ABORT):
return
logging.error('Pipeline ID "%s" in bad state for purpose "%s": "%s"',
pipeline_key.name(), purpose or _BarrierRecord.START,
pipeline_record.status)
return
params = pipeline_record.params
root_pipeline_key = \
_PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record)
default_slot_key = db.Key(params['output_slots']['default'])
default_slot_record, root_pipeline_record = db.get([
default_slot_key, root_pipeline_key])
if default_slot_record is None:
logging.error('Pipeline ID "%s" default slot "%s" does not exist.',
pipeline_key.name(), default_slot_key)
return
if root_pipeline_record is None:
logging.error('Pipeline ID "%s" root pipeline ID "%s" is missing.',
pipeline_key.name(), root_pipeline_key.name())
return
# Always finalize if we're aborting so pipelines have a chance to cleanup
# before they terminate. Pipelines must access 'was_aborted' to find
# out how their finalization should work.
abort_signal = (
purpose == _BarrierRecord.ABORT or
root_pipeline_record.abort_requested == True)
finalize_signal = (
(default_slot_record.status == _SlotRecord.FILLED and
purpose == _BarrierRecord.FINALIZE) or abort_signal)
try:
pipeline_func_class = mr_util.for_name(pipeline_record.class_path)
except ImportError, e:
# This means something is wrong with the deployed code. Rely on the
# taskqueue system to do retries.
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception(
'Could not locate %s#%s. %s',
pipeline_record.class_path, pipeline_key.name(), retry_message)
raise
try:
pipeline_func = pipeline_func_class.from_id(
pipeline_key.name(),
resolve_outputs=finalize_signal,
_pipeline_record=pipeline_record)
except SlotNotFilledError, e:
logging.exception(
'Could not resolve arguments for %s#%s. Most likely this means there '
'is a bug in the Pipeline runtime or some intermediate data has been '
'deleted from the Datastore. Giving up.',
pipeline_record.class_path, pipeline_key.name())
self.transition_aborted(pipeline_key)
return
except Exception, e:
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception(
'Instantiating %s#%s raised exception. %s',
pipeline_record.class_path, pipeline_key.name(), retry_message)
self.transition_retry(pipeline_key, retry_message)
if pipeline_record.params['task_retry']:
raise
else:
return
else:
pipeline_generator = mr_util.is_generator_function(
pipeline_func_class.run)
caller_output = pipeline_func.outputs
if (abort_signal and pipeline_func.async and
pipeline_record.status == _PipelineRecord.RUN
and not pipeline_func.try_cancel()):
logging.warning(
'Could not cancel and abort mid-flight async pipeline: %r#%s',
pipeline_func, pipeline_key.name())
return
if finalize_signal:
try:
pipeline_func._finalized_internal(
self, pipeline_key, root_pipeline_key,
caller_output, abort_signal)
except Exception, e:
# This means something is wrong with the deployed finalization code.
# Rely on the taskqueue system to do retries.
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception('Finalizing %r#%s raised exception. %s',
pipeline_func, pipeline_key.name(), retry_message)
raise
else:
if not abort_signal:
self.transition_complete(pipeline_key)
return
if abort_signal:
logging.debug('Marking as aborted %s#%s', pipeline_func,
pipeline_key.name())
self.transition_aborted(pipeline_key)
return
if pipeline_record.current_attempt != attempt:
logging.error(
'Received evaluation task for pipeline ID "%s" attempt %d but '
'current pending attempt is %d', pipeline_key.name(), attempt,
pipeline_record.current_attempt)
return
if pipeline_record.current_attempt >= pipeline_record.max_attempts:
logging.error(
'Received evaluation task for pipeline ID "%s" on attempt %d '
'but that exceeds max attempts %d', pipeline_key.name(), attempt,
pipeline_record.max_attempts)
return
if pipeline_record.next_retry_time is not None:
retry_time = pipeline_record.next_retry_time - _RETRY_WIGGLE_TIMEDELTA
if self._gettime() <= retry_time:
detail_message = (
'Received evaluation task for pipeline ID "%s" on attempt %d, '
'which will not be ready until: %s' % (pipeline_key.name(),
pipeline_record.current_attempt, pipeline_record.next_retry_time))
logging.warning(detail_message)
raise UnexpectedPipelineError(detail_message)
if pipeline_record.status == _PipelineRecord.RUN and pipeline_generator:
if (default_slot_record.status == _SlotRecord.WAITING and
not pipeline_record.fanned_out):
# This properly handles the yield-less generator case when the
# RUN state transition worked properly but outputting to the default
# slot failed.
self.fill_slot(pipeline_key, caller_output.default, None)
return
if (pipeline_record.status == _PipelineRecord.WAITING and
pipeline_func.async):
self.transition_run(pipeline_key)
try:
result = pipeline_func._run_internal(
self, pipeline_key, root_pipeline_key, caller_output)
except Exception, e:
if self.handle_run_exception(pipeline_key, pipeline_func, e):
raise
else:
return
if pipeline_func.async:
return
if not pipeline_generator:
# Catch any exceptions that are thrown when the pipeline's return
# value is being serialized. This ensures that serialization errors
# will cause normal abort/retry behavior.
try:
self.fill_slot(pipeline_key, caller_output.default, result)
except Exception, e:
retry_message = 'Bad return value. %s: %s' % (
e.__class__.__name__, str(e))
logging.exception(
'Generator %r#%s caused exception while serializing return '
'value %r. %s', pipeline_func, pipeline_key.name(), result,
retry_message)
self.transition_retry(pipeline_key, retry_message)
if pipeline_func.task_retry:
raise
else:
return
expected_outputs = set(caller_output._output_dict.iterkeys())
found_outputs = self.session_filled_output_names
if expected_outputs != found_outputs:
exception = SlotNotFilledError(
'Outputs %r for pipeline ID "%s" were never filled by "%s".' % (
expected_outputs - found_outputs,
pipeline_key.name(), pipeline_func._class_path))
if self.handle_run_exception(pipeline_key, pipeline_func, exception):
raise exception
return
pipeline_iter = result
next_value = None
last_sub_stage = None
sub_stage = None
sub_stage_dict = {}
sub_stage_ordering = []
while True:
try:
yielded = pipeline_iter.send(next_value)
except StopIteration:
break
except Exception, e:
if self.handle_run_exception(pipeline_key, pipeline_func, e):
raise
else:
return
if isinstance(yielded, Pipeline):
if yielded in sub_stage_dict:
raise UnexpectedPipelineError(
'Already yielded pipeline object %r with pipeline ID %s' %
(yielded, yielded.pipeline_id))
last_sub_stage = yielded
next_value = PipelineFuture(yielded.output_names)
next_value._after_all_pipelines.update(After._local._after_all_futures)
next_value._after_all_pipelines.update(InOrder._local._in_order_futures)
sub_stage_dict[yielded] = next_value
sub_stage_ordering.append(yielded)
InOrder._add_future(next_value)
# To aid local testing, the task_retry flag (which instructs the
# evaluator to raise all exceptions back up to the task queue) is
# inherited by all children from the root down.
yielded.task_retry = pipeline_func.task_retry
else:
raise UnexpectedPipelineError(
'Yielded a disallowed value: %r' % yielded)
if last_sub_stage:
# Final yielded stage inherits outputs from calling pipeline that were not
# already filled during the generator's execution.
inherited_outputs = params['output_slots']
for slot_name in self.session_filled_output_names:
del inherited_outputs[slot_name]
sub_stage_dict[last_sub_stage]._inherit_outputs(
pipeline_record.class_path, inherited_outputs)
else:
# Here the generator has yielded nothing, and thus acts as a synchronous
# function. We can skip the rest of the generator steps completely and
# fill the default output slot to cause finalizing.
expected_outputs = set(caller_output._output_dict.iterkeys())
expected_outputs.remove('default')
found_outputs = self.session_filled_output_names
if expected_outputs != found_outputs:
exception = SlotNotFilledError(
'Outputs %r for pipeline ID "%s" were never filled by "%s".' % (
expected_outputs - found_outputs,
pipeline_key.name(), pipeline_func._class_path))
if self.handle_run_exception(pipeline_key, pipeline_func, exception):
raise exception
else:
self.fill_slot(pipeline_key, caller_output.default, None)
self.transition_run(pipeline_key)
return
# Allocate any SlotRecords that do not yet exist.
entities_to_put = []
for future in sub_stage_dict.itervalues():
for slot in future._output_dict.itervalues():
if not slot._exists:
entities_to_put.append(_SlotRecord(
key=slot.key, root_pipeline=root_pipeline_key))
# Allocate PipelineRecords and BarrierRecords for generator-run Pipelines.
pipelines_to_run = set()
all_children_keys = []
all_output_slots = set()
for sub_stage in sub_stage_ordering:
future = sub_stage_dict[sub_stage]
# Catch any exceptions that are thrown when the pipeline's parameters
# are being serialized. This ensures that serialization errors will
# cause normal retry/abort behavior.
try:
dependent_slots, output_slots, params_text, params_blob = \
_generate_args(sub_stage, future, self.queue_name, self.base_path)
except Exception, e:
retry_message = 'Bad child arguments. %s: %s' % (
e.__class__.__name__, str(e))
logging.exception(
'Generator %r#%s caused exception while serializing args for '
'child pipeline %r. %s', pipeline_func, pipeline_key.name(),
sub_stage, retry_message)
self.transition_retry(pipeline_key, retry_message)
if pipeline_func.task_retry:
raise
else:
return
child_pipeline_key = db.Key.from_path(
_PipelineRecord.kind(), uuid.uuid4().hex)
all_output_slots.update(output_slots)
all_children_keys.append(child_pipeline_key)
child_pipeline = _PipelineRecord(
key=child_pipeline_key,
root_pipeline=root_pipeline_key,
# Bug in DB means we need to use the storage name here,
# not the local property name.
params=params_text,
params_blob=params_blob,
class_path=sub_stage._class_path,
max_attempts=sub_stage.max_attempts)
entities_to_put.append(child_pipeline)
if not dependent_slots:
# This child pipeline will run immediately.
pipelines_to_run.add(child_pipeline_key)
child_pipeline.start_time = self._gettime()
else:
entities_to_put.extend(_PipelineContext._create_barrier_entities(
root_pipeline_key,
child_pipeline_key,
_BarrierRecord.START,
dependent_slots))
entities_to_put.extend(_PipelineContext._create_barrier_entities(
root_pipeline_key,
child_pipeline_key,
_BarrierRecord.FINALIZE,
output_slots))
# This generator pipeline's finalization barrier must include all of the
# outputs of any child pipelines that it runs. This ensures the finalized
# calls will not happen until all child pipelines have completed.
#
# The transition_run() call below will update the FINALIZE _BarrierRecord
# for this generator pipeline to include all of these child outputs in
# its list of blocking_slots. That update is done transactionally to
# make sure the _BarrierRecord only lists the slots that matter.
#
# However, the notify_barriers() method doesn't find _BarrierRecords
# through the blocking_slots field. It finds them through _BarrierIndexes
# entities. Thus, before we update the FINALIZE _BarrierRecord in
# transition_run(), we need to write _BarrierIndexes for all child outputs.
barrier_entities = _PipelineContext._create_barrier_entities(
root_pipeline_key,
pipeline_key,
_BarrierRecord.FINALIZE,
all_output_slots)
# Ignore the first element which is the _BarrierRecord. That entity must
# have already been created and put in the datastore for the parent
# pipeline before this code generated child pipelines.
barrier_indexes = barrier_entities[1:]
entities_to_put.extend(barrier_indexes)
db.put(entities_to_put)
self.transition_run(pipeline_key,
blocking_slot_keys=all_output_slots,
fanned_out_pipelines=all_children_keys,
pipelines_to_run=pipelines_to_run) | [
"def",
"evaluate",
"(",
"self",
",",
"pipeline_key",
",",
"purpose",
"=",
"None",
",",
"attempt",
"=",
"0",
")",
":",
"After",
".",
"_thread_init",
"(",
")",
"InOrder",
".",
"_thread_init",
"(",
")",
"InOrder",
".",
"_local",
".",
"_activated",
"=",
"False",
"if",
"not",
"isinstance",
"(",
"pipeline_key",
",",
"db",
".",
"Key",
")",
":",
"pipeline_key",
"=",
"db",
".",
"Key",
"(",
"pipeline_key",
")",
"pipeline_record",
"=",
"db",
".",
"get",
"(",
"pipeline_key",
")",
"if",
"pipeline_record",
"is",
"None",
":",
"logging",
".",
"error",
"(",
"'Pipeline ID \"%s\" does not exist.'",
",",
"pipeline_key",
".",
"name",
"(",
")",
")",
"return",
"if",
"pipeline_record",
".",
"status",
"not",
"in",
"(",
"_PipelineRecord",
".",
"WAITING",
",",
"_PipelineRecord",
".",
"RUN",
")",
":",
"# If we're attempting to abort an already aborted pipeline,",
"# we silently advance. #50",
"if",
"(",
"pipeline_record",
".",
"status",
"==",
"_PipelineRecord",
".",
"ABORTED",
"and",
"purpose",
"==",
"_BarrierRecord",
".",
"ABORT",
")",
":",
"return",
"logging",
".",
"error",
"(",
"'Pipeline ID \"%s\" in bad state for purpose \"%s\": \"%s\"'",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"purpose",
"or",
"_BarrierRecord",
".",
"START",
",",
"pipeline_record",
".",
"status",
")",
"return",
"params",
"=",
"pipeline_record",
".",
"params",
"root_pipeline_key",
"=",
"_PipelineRecord",
".",
"root_pipeline",
".",
"get_value_for_datastore",
"(",
"pipeline_record",
")",
"default_slot_key",
"=",
"db",
".",
"Key",
"(",
"params",
"[",
"'output_slots'",
"]",
"[",
"'default'",
"]",
")",
"default_slot_record",
",",
"root_pipeline_record",
"=",
"db",
".",
"get",
"(",
"[",
"default_slot_key",
",",
"root_pipeline_key",
"]",
")",
"if",
"default_slot_record",
"is",
"None",
":",
"logging",
".",
"error",
"(",
"'Pipeline ID \"%s\" default slot \"%s\" does not exist.'",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"default_slot_key",
")",
"return",
"if",
"root_pipeline_record",
"is",
"None",
":",
"logging",
".",
"error",
"(",
"'Pipeline ID \"%s\" root pipeline ID \"%s\" is missing.'",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"root_pipeline_key",
".",
"name",
"(",
")",
")",
"return",
"# Always finalize if we're aborting so pipelines have a chance to cleanup",
"# before they terminate. Pipelines must access 'was_aborted' to find",
"# out how their finalization should work.",
"abort_signal",
"=",
"(",
"purpose",
"==",
"_BarrierRecord",
".",
"ABORT",
"or",
"root_pipeline_record",
".",
"abort_requested",
"==",
"True",
")",
"finalize_signal",
"=",
"(",
"(",
"default_slot_record",
".",
"status",
"==",
"_SlotRecord",
".",
"FILLED",
"and",
"purpose",
"==",
"_BarrierRecord",
".",
"FINALIZE",
")",
"or",
"abort_signal",
")",
"try",
":",
"pipeline_func_class",
"=",
"mr_util",
".",
"for_name",
"(",
"pipeline_record",
".",
"class_path",
")",
"except",
"ImportError",
",",
"e",
":",
"# This means something is wrong with the deployed code. Rely on the",
"# taskqueue system to do retries.",
"retry_message",
"=",
"'%s: %s'",
"%",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"e",
")",
")",
"logging",
".",
"exception",
"(",
"'Could not locate %s#%s. %s'",
",",
"pipeline_record",
".",
"class_path",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"retry_message",
")",
"raise",
"try",
":",
"pipeline_func",
"=",
"pipeline_func_class",
".",
"from_id",
"(",
"pipeline_key",
".",
"name",
"(",
")",
",",
"resolve_outputs",
"=",
"finalize_signal",
",",
"_pipeline_record",
"=",
"pipeline_record",
")",
"except",
"SlotNotFilledError",
",",
"e",
":",
"logging",
".",
"exception",
"(",
"'Could not resolve arguments for %s#%s. Most likely this means there '",
"'is a bug in the Pipeline runtime or some intermediate data has been '",
"'deleted from the Datastore. Giving up.'",
",",
"pipeline_record",
".",
"class_path",
",",
"pipeline_key",
".",
"name",
"(",
")",
")",
"self",
".",
"transition_aborted",
"(",
"pipeline_key",
")",
"return",
"except",
"Exception",
",",
"e",
":",
"retry_message",
"=",
"'%s: %s'",
"%",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"e",
")",
")",
"logging",
".",
"exception",
"(",
"'Instantiating %s#%s raised exception. %s'",
",",
"pipeline_record",
".",
"class_path",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"retry_message",
")",
"self",
".",
"transition_retry",
"(",
"pipeline_key",
",",
"retry_message",
")",
"if",
"pipeline_record",
".",
"params",
"[",
"'task_retry'",
"]",
":",
"raise",
"else",
":",
"return",
"else",
":",
"pipeline_generator",
"=",
"mr_util",
".",
"is_generator_function",
"(",
"pipeline_func_class",
".",
"run",
")",
"caller_output",
"=",
"pipeline_func",
".",
"outputs",
"if",
"(",
"abort_signal",
"and",
"pipeline_func",
".",
"async",
"and",
"pipeline_record",
".",
"status",
"==",
"_PipelineRecord",
".",
"RUN",
"and",
"not",
"pipeline_func",
".",
"try_cancel",
"(",
")",
")",
":",
"logging",
".",
"warning",
"(",
"'Could not cancel and abort mid-flight async pipeline: %r#%s'",
",",
"pipeline_func",
",",
"pipeline_key",
".",
"name",
"(",
")",
")",
"return",
"if",
"finalize_signal",
":",
"try",
":",
"pipeline_func",
".",
"_finalized_internal",
"(",
"self",
",",
"pipeline_key",
",",
"root_pipeline_key",
",",
"caller_output",
",",
"abort_signal",
")",
"except",
"Exception",
",",
"e",
":",
"# This means something is wrong with the deployed finalization code.",
"# Rely on the taskqueue system to do retries.",
"retry_message",
"=",
"'%s: %s'",
"%",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"e",
")",
")",
"logging",
".",
"exception",
"(",
"'Finalizing %r#%s raised exception. %s'",
",",
"pipeline_func",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"retry_message",
")",
"raise",
"else",
":",
"if",
"not",
"abort_signal",
":",
"self",
".",
"transition_complete",
"(",
"pipeline_key",
")",
"return",
"if",
"abort_signal",
":",
"logging",
".",
"debug",
"(",
"'Marking as aborted %s#%s'",
",",
"pipeline_func",
",",
"pipeline_key",
".",
"name",
"(",
")",
")",
"self",
".",
"transition_aborted",
"(",
"pipeline_key",
")",
"return",
"if",
"pipeline_record",
".",
"current_attempt",
"!=",
"attempt",
":",
"logging",
".",
"error",
"(",
"'Received evaluation task for pipeline ID \"%s\" attempt %d but '",
"'current pending attempt is %d'",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"attempt",
",",
"pipeline_record",
".",
"current_attempt",
")",
"return",
"if",
"pipeline_record",
".",
"current_attempt",
">=",
"pipeline_record",
".",
"max_attempts",
":",
"logging",
".",
"error",
"(",
"'Received evaluation task for pipeline ID \"%s\" on attempt %d '",
"'but that exceeds max attempts %d'",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"attempt",
",",
"pipeline_record",
".",
"max_attempts",
")",
"return",
"if",
"pipeline_record",
".",
"next_retry_time",
"is",
"not",
"None",
":",
"retry_time",
"=",
"pipeline_record",
".",
"next_retry_time",
"-",
"_RETRY_WIGGLE_TIMEDELTA",
"if",
"self",
".",
"_gettime",
"(",
")",
"<=",
"retry_time",
":",
"detail_message",
"=",
"(",
"'Received evaluation task for pipeline ID \"%s\" on attempt %d, '",
"'which will not be ready until: %s'",
"%",
"(",
"pipeline_key",
".",
"name",
"(",
")",
",",
"pipeline_record",
".",
"current_attempt",
",",
"pipeline_record",
".",
"next_retry_time",
")",
")",
"logging",
".",
"warning",
"(",
"detail_message",
")",
"raise",
"UnexpectedPipelineError",
"(",
"detail_message",
")",
"if",
"pipeline_record",
".",
"status",
"==",
"_PipelineRecord",
".",
"RUN",
"and",
"pipeline_generator",
":",
"if",
"(",
"default_slot_record",
".",
"status",
"==",
"_SlotRecord",
".",
"WAITING",
"and",
"not",
"pipeline_record",
".",
"fanned_out",
")",
":",
"# This properly handles the yield-less generator case when the",
"# RUN state transition worked properly but outputting to the default",
"# slot failed.",
"self",
".",
"fill_slot",
"(",
"pipeline_key",
",",
"caller_output",
".",
"default",
",",
"None",
")",
"return",
"if",
"(",
"pipeline_record",
".",
"status",
"==",
"_PipelineRecord",
".",
"WAITING",
"and",
"pipeline_func",
".",
"async",
")",
":",
"self",
".",
"transition_run",
"(",
"pipeline_key",
")",
"try",
":",
"result",
"=",
"pipeline_func",
".",
"_run_internal",
"(",
"self",
",",
"pipeline_key",
",",
"root_pipeline_key",
",",
"caller_output",
")",
"except",
"Exception",
",",
"e",
":",
"if",
"self",
".",
"handle_run_exception",
"(",
"pipeline_key",
",",
"pipeline_func",
",",
"e",
")",
":",
"raise",
"else",
":",
"return",
"if",
"pipeline_func",
".",
"async",
":",
"return",
"if",
"not",
"pipeline_generator",
":",
"# Catch any exceptions that are thrown when the pipeline's return",
"# value is being serialized. This ensures that serialization errors",
"# will cause normal abort/retry behavior.",
"try",
":",
"self",
".",
"fill_slot",
"(",
"pipeline_key",
",",
"caller_output",
".",
"default",
",",
"result",
")",
"except",
"Exception",
",",
"e",
":",
"retry_message",
"=",
"'Bad return value. %s: %s'",
"%",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"e",
")",
")",
"logging",
".",
"exception",
"(",
"'Generator %r#%s caused exception while serializing return '",
"'value %r. %s'",
",",
"pipeline_func",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"result",
",",
"retry_message",
")",
"self",
".",
"transition_retry",
"(",
"pipeline_key",
",",
"retry_message",
")",
"if",
"pipeline_func",
".",
"task_retry",
":",
"raise",
"else",
":",
"return",
"expected_outputs",
"=",
"set",
"(",
"caller_output",
".",
"_output_dict",
".",
"iterkeys",
"(",
")",
")",
"found_outputs",
"=",
"self",
".",
"session_filled_output_names",
"if",
"expected_outputs",
"!=",
"found_outputs",
":",
"exception",
"=",
"SlotNotFilledError",
"(",
"'Outputs %r for pipeline ID \"%s\" were never filled by \"%s\".'",
"%",
"(",
"expected_outputs",
"-",
"found_outputs",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"pipeline_func",
".",
"_class_path",
")",
")",
"if",
"self",
".",
"handle_run_exception",
"(",
"pipeline_key",
",",
"pipeline_func",
",",
"exception",
")",
":",
"raise",
"exception",
"return",
"pipeline_iter",
"=",
"result",
"next_value",
"=",
"None",
"last_sub_stage",
"=",
"None",
"sub_stage",
"=",
"None",
"sub_stage_dict",
"=",
"{",
"}",
"sub_stage_ordering",
"=",
"[",
"]",
"while",
"True",
":",
"try",
":",
"yielded",
"=",
"pipeline_iter",
".",
"send",
"(",
"next_value",
")",
"except",
"StopIteration",
":",
"break",
"except",
"Exception",
",",
"e",
":",
"if",
"self",
".",
"handle_run_exception",
"(",
"pipeline_key",
",",
"pipeline_func",
",",
"e",
")",
":",
"raise",
"else",
":",
"return",
"if",
"isinstance",
"(",
"yielded",
",",
"Pipeline",
")",
":",
"if",
"yielded",
"in",
"sub_stage_dict",
":",
"raise",
"UnexpectedPipelineError",
"(",
"'Already yielded pipeline object %r with pipeline ID %s'",
"%",
"(",
"yielded",
",",
"yielded",
".",
"pipeline_id",
")",
")",
"last_sub_stage",
"=",
"yielded",
"next_value",
"=",
"PipelineFuture",
"(",
"yielded",
".",
"output_names",
")",
"next_value",
".",
"_after_all_pipelines",
".",
"update",
"(",
"After",
".",
"_local",
".",
"_after_all_futures",
")",
"next_value",
".",
"_after_all_pipelines",
".",
"update",
"(",
"InOrder",
".",
"_local",
".",
"_in_order_futures",
")",
"sub_stage_dict",
"[",
"yielded",
"]",
"=",
"next_value",
"sub_stage_ordering",
".",
"append",
"(",
"yielded",
")",
"InOrder",
".",
"_add_future",
"(",
"next_value",
")",
"# To aid local testing, the task_retry flag (which instructs the",
"# evaluator to raise all exceptions back up to the task queue) is",
"# inherited by all children from the root down.",
"yielded",
".",
"task_retry",
"=",
"pipeline_func",
".",
"task_retry",
"else",
":",
"raise",
"UnexpectedPipelineError",
"(",
"'Yielded a disallowed value: %r'",
"%",
"yielded",
")",
"if",
"last_sub_stage",
":",
"# Final yielded stage inherits outputs from calling pipeline that were not",
"# already filled during the generator's execution.",
"inherited_outputs",
"=",
"params",
"[",
"'output_slots'",
"]",
"for",
"slot_name",
"in",
"self",
".",
"session_filled_output_names",
":",
"del",
"inherited_outputs",
"[",
"slot_name",
"]",
"sub_stage_dict",
"[",
"last_sub_stage",
"]",
".",
"_inherit_outputs",
"(",
"pipeline_record",
".",
"class_path",
",",
"inherited_outputs",
")",
"else",
":",
"# Here the generator has yielded nothing, and thus acts as a synchronous",
"# function. We can skip the rest of the generator steps completely and",
"# fill the default output slot to cause finalizing.",
"expected_outputs",
"=",
"set",
"(",
"caller_output",
".",
"_output_dict",
".",
"iterkeys",
"(",
")",
")",
"expected_outputs",
".",
"remove",
"(",
"'default'",
")",
"found_outputs",
"=",
"self",
".",
"session_filled_output_names",
"if",
"expected_outputs",
"!=",
"found_outputs",
":",
"exception",
"=",
"SlotNotFilledError",
"(",
"'Outputs %r for pipeline ID \"%s\" were never filled by \"%s\".'",
"%",
"(",
"expected_outputs",
"-",
"found_outputs",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"pipeline_func",
".",
"_class_path",
")",
")",
"if",
"self",
".",
"handle_run_exception",
"(",
"pipeline_key",
",",
"pipeline_func",
",",
"exception",
")",
":",
"raise",
"exception",
"else",
":",
"self",
".",
"fill_slot",
"(",
"pipeline_key",
",",
"caller_output",
".",
"default",
",",
"None",
")",
"self",
".",
"transition_run",
"(",
"pipeline_key",
")",
"return",
"# Allocate any SlotRecords that do not yet exist.",
"entities_to_put",
"=",
"[",
"]",
"for",
"future",
"in",
"sub_stage_dict",
".",
"itervalues",
"(",
")",
":",
"for",
"slot",
"in",
"future",
".",
"_output_dict",
".",
"itervalues",
"(",
")",
":",
"if",
"not",
"slot",
".",
"_exists",
":",
"entities_to_put",
".",
"append",
"(",
"_SlotRecord",
"(",
"key",
"=",
"slot",
".",
"key",
",",
"root_pipeline",
"=",
"root_pipeline_key",
")",
")",
"# Allocate PipelineRecords and BarrierRecords for generator-run Pipelines.",
"pipelines_to_run",
"=",
"set",
"(",
")",
"all_children_keys",
"=",
"[",
"]",
"all_output_slots",
"=",
"set",
"(",
")",
"for",
"sub_stage",
"in",
"sub_stage_ordering",
":",
"future",
"=",
"sub_stage_dict",
"[",
"sub_stage",
"]",
"# Catch any exceptions that are thrown when the pipeline's parameters",
"# are being serialized. This ensures that serialization errors will",
"# cause normal retry/abort behavior.",
"try",
":",
"dependent_slots",
",",
"output_slots",
",",
"params_text",
",",
"params_blob",
"=",
"_generate_args",
"(",
"sub_stage",
",",
"future",
",",
"self",
".",
"queue_name",
",",
"self",
".",
"base_path",
")",
"except",
"Exception",
",",
"e",
":",
"retry_message",
"=",
"'Bad child arguments. %s: %s'",
"%",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"e",
")",
")",
"logging",
".",
"exception",
"(",
"'Generator %r#%s caused exception while serializing args for '",
"'child pipeline %r. %s'",
",",
"pipeline_func",
",",
"pipeline_key",
".",
"name",
"(",
")",
",",
"sub_stage",
",",
"retry_message",
")",
"self",
".",
"transition_retry",
"(",
"pipeline_key",
",",
"retry_message",
")",
"if",
"pipeline_func",
".",
"task_retry",
":",
"raise",
"else",
":",
"return",
"child_pipeline_key",
"=",
"db",
".",
"Key",
".",
"from_path",
"(",
"_PipelineRecord",
".",
"kind",
"(",
")",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"all_output_slots",
".",
"update",
"(",
"output_slots",
")",
"all_children_keys",
".",
"append",
"(",
"child_pipeline_key",
")",
"child_pipeline",
"=",
"_PipelineRecord",
"(",
"key",
"=",
"child_pipeline_key",
",",
"root_pipeline",
"=",
"root_pipeline_key",
",",
"# Bug in DB means we need to use the storage name here,",
"# not the local property name.",
"params",
"=",
"params_text",
",",
"params_blob",
"=",
"params_blob",
",",
"class_path",
"=",
"sub_stage",
".",
"_class_path",
",",
"max_attempts",
"=",
"sub_stage",
".",
"max_attempts",
")",
"entities_to_put",
".",
"append",
"(",
"child_pipeline",
")",
"if",
"not",
"dependent_slots",
":",
"# This child pipeline will run immediately.",
"pipelines_to_run",
".",
"add",
"(",
"child_pipeline_key",
")",
"child_pipeline",
".",
"start_time",
"=",
"self",
".",
"_gettime",
"(",
")",
"else",
":",
"entities_to_put",
".",
"extend",
"(",
"_PipelineContext",
".",
"_create_barrier_entities",
"(",
"root_pipeline_key",
",",
"child_pipeline_key",
",",
"_BarrierRecord",
".",
"START",
",",
"dependent_slots",
")",
")",
"entities_to_put",
".",
"extend",
"(",
"_PipelineContext",
".",
"_create_barrier_entities",
"(",
"root_pipeline_key",
",",
"child_pipeline_key",
",",
"_BarrierRecord",
".",
"FINALIZE",
",",
"output_slots",
")",
")",
"# This generator pipeline's finalization barrier must include all of the",
"# outputs of any child pipelines that it runs. This ensures the finalized",
"# calls will not happen until all child pipelines have completed.",
"#",
"# The transition_run() call below will update the FINALIZE _BarrierRecord",
"# for this generator pipeline to include all of these child outputs in",
"# its list of blocking_slots. That update is done transactionally to",
"# make sure the _BarrierRecord only lists the slots that matter.",
"#",
"# However, the notify_barriers() method doesn't find _BarrierRecords",
"# through the blocking_slots field. It finds them through _BarrierIndexes",
"# entities. Thus, before we update the FINALIZE _BarrierRecord in",
"# transition_run(), we need to write _BarrierIndexes for all child outputs.",
"barrier_entities",
"=",
"_PipelineContext",
".",
"_create_barrier_entities",
"(",
"root_pipeline_key",
",",
"pipeline_key",
",",
"_BarrierRecord",
".",
"FINALIZE",
",",
"all_output_slots",
")",
"# Ignore the first element which is the _BarrierRecord. That entity must",
"# have already been created and put in the datastore for the parent",
"# pipeline before this code generated child pipelines.",
"barrier_indexes",
"=",
"barrier_entities",
"[",
"1",
":",
"]",
"entities_to_put",
".",
"extend",
"(",
"barrier_indexes",
")",
"db",
".",
"put",
"(",
"entities_to_put",
")",
"self",
".",
"transition_run",
"(",
"pipeline_key",
",",
"blocking_slot_keys",
"=",
"all_output_slots",
",",
"fanned_out_pipelines",
"=",
"all_children_keys",
",",
"pipelines_to_run",
"=",
"pipelines_to_run",
")"
] | Evaluates the given Pipeline and enqueues sub-stages for execution.
Args:
pipeline_key: The db.Key or stringified key of the _PipelineRecord to run.
purpose: Why evaluate was called ('start', 'finalize', or 'abort').
attempt: The attempt number that should be tried. | [
"Evaluates",
"the",
"given",
"Pipeline",
"and",
"enqueues",
"sub",
"-",
"stages",
"for",
"execution",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py#L1663-L1689 | def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer | [
"def",
"get_importer",
"(",
"path_item",
")",
":",
"try",
":",
"importer",
"=",
"sys",
".",
"path_importer_cache",
"[",
"path_item",
"]",
"except",
"KeyError",
":",
"for",
"hook",
"in",
"sys",
".",
"path_hooks",
":",
"try",
":",
"importer",
"=",
"hook",
"(",
"path_item",
")",
"except",
"ImportError",
":",
"pass",
"else",
":",
"break",
"else",
":",
"importer",
"=",
"None",
"sys",
".",
"path_importer_cache",
".",
"setdefault",
"(",
"path_item",
",",
"importer",
")",
"if",
"importer",
"is",
"None",
":",
"try",
":",
"importer",
"=",
"ImpWrapper",
"(",
"path_item",
")",
"except",
"ImportError",
":",
"pass",
"return",
"importer"
] | Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook. | [
"Retrieve",
"a",
"PEP",
"302",
"importer",
"for",
"the",
"given",
"path",
"item"
] | python | test |
balloob/pychromecast | pychromecast/__init__.py | https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/__init__.py#L306-L313 | def volume_down(self, delta=0.1):
""" Decrement the volume by 0.1 (or delta) unless it is already 0.
Returns the new volume.
"""
if delta <= 0:
raise ValueError(
"volume delta must be greater than zero, not {}".format(delta))
return self.set_volume(self.status.volume_level - delta) | [
"def",
"volume_down",
"(",
"self",
",",
"delta",
"=",
"0.1",
")",
":",
"if",
"delta",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"volume delta must be greater than zero, not {}\"",
".",
"format",
"(",
"delta",
")",
")",
"return",
"self",
".",
"set_volume",
"(",
"self",
".",
"status",
".",
"volume_level",
"-",
"delta",
")"
] | Decrement the volume by 0.1 (or delta) unless it is already 0.
Returns the new volume. | [
"Decrement",
"the",
"volume",
"by",
"0",
".",
"1",
"(",
"or",
"delta",
")",
"unless",
"it",
"is",
"already",
"0",
".",
"Returns",
"the",
"new",
"volume",
"."
] | python | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/grid.py | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L160-L229 | def _read_elem_nodes(self, fid):
""" Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index)
"""
nodes = {}
# # prepare nodes
# nodes_sorted = np.zeros((number_of_nodes, 3), dtype=float)
# nodes = np.zeros((number_of_nodes, 3), dtype=float)
# read in nodes
nodes_raw = np.empty((self.header['nr_nodes'], 3), dtype=float)
for nr in range(0, self.header['nr_nodes']):
node_line = fid.readline().lstrip()
nodes_raw[nr, :] = np.fromstring(
node_line, dtype=float, sep=' ')
# round node coordinates to 5th decimal point. Sometimes this is
# important when we deal with mal-formatted node data
nodes_raw[:, 1:3] = np.round(nodes_raw[:, 1:3], 5)
# check for CutMcK
# The check is based on the first node, but if one node was renumbered,
# so were all the others.
if(nodes_raw[:, 0] != list(range(1, nodes_raw.shape[0]))):
self.header['cutmck'] = True
print(
'This grid was sorted using CutMcK. The nodes were resorted!')
else:
self.header['cutmck'] = False
# Rearrange nodes when CutMcK was used.
if(self.header['cutmck']):
nodes_cutmck = np.empty_like(nodes_raw)
nodes_cutmck_index = np.zeros(nodes_raw.shape[0], dtype=int)
for node in range(0, self.header['nr_nodes']):
new_index = np.where(nodes_raw[:, 0].astype(int) == (node + 1))
nodes_cutmck[new_index[0], 1:3] = nodes_raw[node, 1:3]
nodes_cutmck[new_index[0], 0] = new_index[0]
nodes_cutmck_index[node] = new_index[0]
# sort them
nodes_sorted = nodes_cutmck[nodes_cutmck_index, :]
nodes['presort'] = nodes_cutmck
nodes['cutmck_index'] = nodes_cutmck_index
nodes['rev_cutmck_index'] = np.argsort(nodes_cutmck_index)
else:
nodes_sorted = nodes_raw
nodes['presort'] = nodes_raw
# prepare node dict
nodes['raw'] = nodes_raw
nodes['sorted'] = nodes_sorted
self.nodes = nodes
self.nr_of_nodes = nodes['raw'].shape[0] | [
"def",
"_read_elem_nodes",
"(",
"self",
",",
"fid",
")",
":",
"nodes",
"=",
"{",
"}",
"# # prepare nodes",
"# nodes_sorted = np.zeros((number_of_nodes, 3), dtype=float)",
"# nodes = np.zeros((number_of_nodes, 3), dtype=float)",
"# read in nodes",
"nodes_raw",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"header",
"[",
"'nr_nodes'",
"]",
",",
"3",
")",
",",
"dtype",
"=",
"float",
")",
"for",
"nr",
"in",
"range",
"(",
"0",
",",
"self",
".",
"header",
"[",
"'nr_nodes'",
"]",
")",
":",
"node_line",
"=",
"fid",
".",
"readline",
"(",
")",
".",
"lstrip",
"(",
")",
"nodes_raw",
"[",
"nr",
",",
":",
"]",
"=",
"np",
".",
"fromstring",
"(",
"node_line",
",",
"dtype",
"=",
"float",
",",
"sep",
"=",
"' '",
")",
"# round node coordinates to 5th decimal point. Sometimes this is",
"# important when we deal with mal-formatted node data",
"nodes_raw",
"[",
":",
",",
"1",
":",
"3",
"]",
"=",
"np",
".",
"round",
"(",
"nodes_raw",
"[",
":",
",",
"1",
":",
"3",
"]",
",",
"5",
")",
"# check for CutMcK",
"# The check is based on the first node, but if one node was renumbered,",
"# so were all the others.",
"if",
"(",
"nodes_raw",
"[",
":",
",",
"0",
"]",
"!=",
"list",
"(",
"range",
"(",
"1",
",",
"nodes_raw",
".",
"shape",
"[",
"0",
"]",
")",
")",
")",
":",
"self",
".",
"header",
"[",
"'cutmck'",
"]",
"=",
"True",
"print",
"(",
"'This grid was sorted using CutMcK. The nodes were resorted!'",
")",
"else",
":",
"self",
".",
"header",
"[",
"'cutmck'",
"]",
"=",
"False",
"# Rearrange nodes when CutMcK was used.",
"if",
"(",
"self",
".",
"header",
"[",
"'cutmck'",
"]",
")",
":",
"nodes_cutmck",
"=",
"np",
".",
"empty_like",
"(",
"nodes_raw",
")",
"nodes_cutmck_index",
"=",
"np",
".",
"zeros",
"(",
"nodes_raw",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"int",
")",
"for",
"node",
"in",
"range",
"(",
"0",
",",
"self",
".",
"header",
"[",
"'nr_nodes'",
"]",
")",
":",
"new_index",
"=",
"np",
".",
"where",
"(",
"nodes_raw",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"(",
"node",
"+",
"1",
")",
")",
"nodes_cutmck",
"[",
"new_index",
"[",
"0",
"]",
",",
"1",
":",
"3",
"]",
"=",
"nodes_raw",
"[",
"node",
",",
"1",
":",
"3",
"]",
"nodes_cutmck",
"[",
"new_index",
"[",
"0",
"]",
",",
"0",
"]",
"=",
"new_index",
"[",
"0",
"]",
"nodes_cutmck_index",
"[",
"node",
"]",
"=",
"new_index",
"[",
"0",
"]",
"# sort them",
"nodes_sorted",
"=",
"nodes_cutmck",
"[",
"nodes_cutmck_index",
",",
":",
"]",
"nodes",
"[",
"'presort'",
"]",
"=",
"nodes_cutmck",
"nodes",
"[",
"'cutmck_index'",
"]",
"=",
"nodes_cutmck_index",
"nodes",
"[",
"'rev_cutmck_index'",
"]",
"=",
"np",
".",
"argsort",
"(",
"nodes_cutmck_index",
")",
"else",
":",
"nodes_sorted",
"=",
"nodes_raw",
"nodes",
"[",
"'presort'",
"]",
"=",
"nodes_raw",
"# prepare node dict",
"nodes",
"[",
"'raw'",
"]",
"=",
"nodes_raw",
"nodes",
"[",
"'sorted'",
"]",
"=",
"nodes_sorted",
"self",
".",
"nodes",
"=",
"nodes",
"self",
".",
"nr_of_nodes",
"=",
"nodes",
"[",
"'raw'",
"]",
".",
"shape",
"[",
"0",
"]"
] | Read the nodes from an opened elem.dat file. Correct for CutMcK
transformations.
We store three typed of nodes in the dict 'nodes':
* "raw" : as read from the elem.dat file
* "presort" : pre-sorted so we can directly read node numbers from
a elec.dat file and use them as indices.
* "sorted" : completely sorted as in the original grid (before any
CutMcK)
For completeness, we also store the following keys:
* "cutmck_index" : Array containing the indices in "presort" to
obtain the "sorted" values:
nodes['sorted'] = nodes['presort'] [nodes['cutmck_index'], :]
* "rev_cutmck_index" : argsort(cutmck_index) | [
"Read",
"the",
"nodes",
"from",
"an",
"opened",
"elem",
".",
"dat",
"file",
".",
"Correct",
"for",
"CutMcK",
"transformations",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17r_1_01a/isis_state/interface_detail/isis_intf/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/isis_state/interface_detail/isis_intf/__init__.py#L837-L860 | def _set_ldp_sync_info(self, v, load=False):
"""
Setter method for ldp_sync_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_sync_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_sync_info() directly.
YANG Description: ISIS LDP sync info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ldp_sync_info.ldp_sync_info, is_container='container', presence=False, yang_name="ldp-sync-info", rest_name="ldp-sync-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-ldp-sync-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_sync_info must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ldp_sync_info.ldp_sync_info, is_container='container', presence=False, yang_name="ldp-sync-info", rest_name="ldp-sync-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-ldp-sync-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__ldp_sync_info = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_ldp_sync_info",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"ldp_sync_info",
".",
"ldp_sync_info",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"ldp-sync-info\"",
",",
"rest_name",
"=",
"\"ldp-sync-info\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'callpoint'",
":",
"u'isis-isis-ldp-sync-info'",
",",
"u'cli-suppress-show-path'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-isis-operational'",
",",
"defining_module",
"=",
"'brocade-isis-operational'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"ldp_sync_info must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=ldp_sync_info.ldp_sync_info, is_container='container', presence=False, yang_name=\"ldp-sync-info\", rest_name=\"ldp-sync-info\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-ldp-sync-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__ldp_sync_info",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for ldp_sync_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_sync_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_sync_info() directly.
YANG Description: ISIS LDP sync info | [
"Setter",
"method",
"for",
"ldp_sync_info",
"mapped",
"from",
"YANG",
"variable",
"/",
"isis_state",
"/",
"interface_detail",
"/",
"isis_intf",
"/",
"ldp_sync_info",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_ldp_sync_info",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_ldp_sync_info",
"()",
"directly",
"."
] | python | train |
lambdalisue/django-permission | src/permission/logics/oneself.py | https://github.com/lambdalisue/django-permission/blob/580f7a1f857701d06ccf41163f188ac04fbc4fac/src/permission/logics/oneself.py#L58-L118 | def has_perm(self, user_obj, perm, obj=None):
"""
Check if user have permission of himself
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the object is the
user.
So users can change or delete themselves (you can change this behavior
to set ``any_permission``, ``change_permissino`` or
``delete_permission`` attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object).
"""
if not is_authenticated(user_obj):
return False
# construct the permission full name
change_permission = self.get_full_permission_string('change')
delete_permission = self.get_full_permission_string('delete')
# check if the user is authenticated
if obj is None:
# object permission without obj should return True
# Ref: https://code.djangoproject.com/wiki/RowLevelPermissions
if self.any_permission:
return True
if self.change_permission and perm == change_permission:
return True
if self.delete_permission and perm == delete_permission:
return True
return False
elif user_obj.is_active:
# check if the user trying to interact with himself
if obj == user_obj:
if self.any_permission:
# have any kind of permissions to himself
return True
if (self.change_permission and
perm == change_permission):
return True
if (self.delete_permission and
perm == delete_permission):
return True
return False | [
"def",
"has_perm",
"(",
"self",
",",
"user_obj",
",",
"perm",
",",
"obj",
"=",
"None",
")",
":",
"if",
"not",
"is_authenticated",
"(",
"user_obj",
")",
":",
"return",
"False",
"# construct the permission full name",
"change_permission",
"=",
"self",
".",
"get_full_permission_string",
"(",
"'change'",
")",
"delete_permission",
"=",
"self",
".",
"get_full_permission_string",
"(",
"'delete'",
")",
"# check if the user is authenticated",
"if",
"obj",
"is",
"None",
":",
"# object permission without obj should return True",
"# Ref: https://code.djangoproject.com/wiki/RowLevelPermissions",
"if",
"self",
".",
"any_permission",
":",
"return",
"True",
"if",
"self",
".",
"change_permission",
"and",
"perm",
"==",
"change_permission",
":",
"return",
"True",
"if",
"self",
".",
"delete_permission",
"and",
"perm",
"==",
"delete_permission",
":",
"return",
"True",
"return",
"False",
"elif",
"user_obj",
".",
"is_active",
":",
"# check if the user trying to interact with himself",
"if",
"obj",
"==",
"user_obj",
":",
"if",
"self",
".",
"any_permission",
":",
"# have any kind of permissions to himself",
"return",
"True",
"if",
"(",
"self",
".",
"change_permission",
"and",
"perm",
"==",
"change_permission",
")",
":",
"return",
"True",
"if",
"(",
"self",
".",
"delete_permission",
"and",
"perm",
"==",
"delete_permission",
")",
":",
"return",
"True",
"return",
"False"
] | Check if user have permission of himself
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the object is the
user.
So users can change or delete themselves (you can change this behavior
to set ``any_permission``, ``change_permissino`` or
``delete_permission`` attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object). | [
"Check",
"if",
"user",
"have",
"permission",
"of",
"himself"
] | python | train |
kennethreitz/requests-html | requests_html.py | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L291-L304 | def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen()) | [
"def",
"links",
"(",
"self",
")",
"->",
"_Links",
":",
"def",
"gen",
"(",
")",
":",
"for",
"link",
"in",
"self",
".",
"find",
"(",
"'a'",
")",
":",
"try",
":",
"href",
"=",
"link",
".",
"attrs",
"[",
"'href'",
"]",
".",
"strip",
"(",
")",
"if",
"href",
"and",
"not",
"(",
"href",
".",
"startswith",
"(",
"'#'",
")",
"and",
"self",
".",
"skip_anchors",
")",
"and",
"not",
"href",
".",
"startswith",
"(",
"(",
"'javascript:'",
",",
"'mailto:'",
")",
")",
":",
"yield",
"href",
"except",
"KeyError",
":",
"pass",
"return",
"set",
"(",
"gen",
"(",
")",
")"
] | All found links on page, in as–is form. | [
"All",
"found",
"links",
"on",
"page",
"in",
"as–is",
"form",
"."
] | python | train |
adubkov/py-cloudwatch | pycloudwatch/sender.py | https://github.com/adubkov/py-cloudwatch/blob/755bac7c153f75c4f0aa73ce14ca333cc4affb36/pycloudwatch/sender.py#L14-L23 | def put(self, metrics):
"""
Put metrics to cloudwatch. Metric shoult be instance or list of
instances of CloudWatchMetric
"""
if type(metrics) == list:
for metric in metrics:
self.c.put_metric_data(**metric)
else:
self.c.put_metric_data(**metrics) | [
"def",
"put",
"(",
"self",
",",
"metrics",
")",
":",
"if",
"type",
"(",
"metrics",
")",
"==",
"list",
":",
"for",
"metric",
"in",
"metrics",
":",
"self",
".",
"c",
".",
"put_metric_data",
"(",
"*",
"*",
"metric",
")",
"else",
":",
"self",
".",
"c",
".",
"put_metric_data",
"(",
"*",
"*",
"metrics",
")"
] | Put metrics to cloudwatch. Metric shoult be instance or list of
instances of CloudWatchMetric | [
"Put",
"metrics",
"to",
"cloudwatch",
".",
"Metric",
"shoult",
"be",
"instance",
"or",
"list",
"of",
"instances",
"of",
"CloudWatchMetric"
] | python | valid |
artefactual-labs/mets-reader-writer | metsrw/metadata.py | https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/metadata.py#L370-L400 | def parse(cls, root):
"""
Create a new SubSection by parsing root.
:param root: Element or ElementTree to be parsed into an object.
:raises exceptions.ParseError: If root's tag is not in :const:`SubSection.ALLOWED_SUBSECTIONS`.
:raises exceptions.ParseError: If the first child of root is not mdRef or mdWrap.
"""
subsection = root.tag.replace(utils.lxmlns("mets"), "", 1)
if subsection not in cls.ALLOWED_SUBSECTIONS:
raise exceptions.ParseError(
"SubSection can only parse elements with tag in %s with METS namespace"
% (cls.ALLOWED_SUBSECTIONS,)
)
section_id = root.get("ID")
created = root.get("CREATED", "")
status = root.get("STATUS", "")
child = root[0]
if child.tag == utils.lxmlns("mets") + "mdWrap":
mdwrap = MDWrap.parse(child)
obj = cls(subsection, mdwrap, section_id)
elif child.tag == utils.lxmlns("mets") + "mdRef":
mdref = MDRef.parse(child)
obj = cls(subsection, mdref, section_id)
else:
raise exceptions.ParseError(
"Child of %s must be mdWrap or mdRef" % subsection
)
obj.created = created
obj.status = status
return obj | [
"def",
"parse",
"(",
"cls",
",",
"root",
")",
":",
"subsection",
"=",
"root",
".",
"tag",
".",
"replace",
"(",
"utils",
".",
"lxmlns",
"(",
"\"mets\"",
")",
",",
"\"\"",
",",
"1",
")",
"if",
"subsection",
"not",
"in",
"cls",
".",
"ALLOWED_SUBSECTIONS",
":",
"raise",
"exceptions",
".",
"ParseError",
"(",
"\"SubSection can only parse elements with tag in %s with METS namespace\"",
"%",
"(",
"cls",
".",
"ALLOWED_SUBSECTIONS",
",",
")",
")",
"section_id",
"=",
"root",
".",
"get",
"(",
"\"ID\"",
")",
"created",
"=",
"root",
".",
"get",
"(",
"\"CREATED\"",
",",
"\"\"",
")",
"status",
"=",
"root",
".",
"get",
"(",
"\"STATUS\"",
",",
"\"\"",
")",
"child",
"=",
"root",
"[",
"0",
"]",
"if",
"child",
".",
"tag",
"==",
"utils",
".",
"lxmlns",
"(",
"\"mets\"",
")",
"+",
"\"mdWrap\"",
":",
"mdwrap",
"=",
"MDWrap",
".",
"parse",
"(",
"child",
")",
"obj",
"=",
"cls",
"(",
"subsection",
",",
"mdwrap",
",",
"section_id",
")",
"elif",
"child",
".",
"tag",
"==",
"utils",
".",
"lxmlns",
"(",
"\"mets\"",
")",
"+",
"\"mdRef\"",
":",
"mdref",
"=",
"MDRef",
".",
"parse",
"(",
"child",
")",
"obj",
"=",
"cls",
"(",
"subsection",
",",
"mdref",
",",
"section_id",
")",
"else",
":",
"raise",
"exceptions",
".",
"ParseError",
"(",
"\"Child of %s must be mdWrap or mdRef\"",
"%",
"subsection",
")",
"obj",
".",
"created",
"=",
"created",
"obj",
".",
"status",
"=",
"status",
"return",
"obj"
] | Create a new SubSection by parsing root.
:param root: Element or ElementTree to be parsed into an object.
:raises exceptions.ParseError: If root's tag is not in :const:`SubSection.ALLOWED_SUBSECTIONS`.
:raises exceptions.ParseError: If the first child of root is not mdRef or mdWrap. | [
"Create",
"a",
"new",
"SubSection",
"by",
"parsing",
"root",
"."
] | python | train |
codelv/enaml-native | src/enamlnative/android/android_spinner.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_spinner.py#L65-L75 | def init_widget(self):
""" Initialize the underlying widget.
"""
w = self.widget
# Selection listener
w.setAdapter(self.adapter)
w.setOnItemSelectedListener(w.getId())
w.onItemSelected.connect(self.on_item_selected)
w.onNothingSelected.connect(self.on_nothing_selected)
super(AndroidSpinner, self).init_widget() | [
"def",
"init_widget",
"(",
"self",
")",
":",
"w",
"=",
"self",
".",
"widget",
"# Selection listener",
"w",
".",
"setAdapter",
"(",
"self",
".",
"adapter",
")",
"w",
".",
"setOnItemSelectedListener",
"(",
"w",
".",
"getId",
"(",
")",
")",
"w",
".",
"onItemSelected",
".",
"connect",
"(",
"self",
".",
"on_item_selected",
")",
"w",
".",
"onNothingSelected",
".",
"connect",
"(",
"self",
".",
"on_nothing_selected",
")",
"super",
"(",
"AndroidSpinner",
",",
"self",
")",
".",
"init_widget",
"(",
")"
] | Initialize the underlying widget. | [
"Initialize",
"the",
"underlying",
"widget",
"."
] | python | train |
ramses-tech/nefertari | nefertari/utils/utils.py | https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/utils/utils.py#L95-L107 | def maybe_dotted(module, throw=True):
""" If ``module`` is a dotted string pointing to the module,
imports and returns the module object.
"""
try:
return Configurator().maybe_dotted(module)
except ImportError as e:
err = '%s not found. %s' % (module, e)
if throw:
raise ImportError(err)
else:
log.error(err)
return None | [
"def",
"maybe_dotted",
"(",
"module",
",",
"throw",
"=",
"True",
")",
":",
"try",
":",
"return",
"Configurator",
"(",
")",
".",
"maybe_dotted",
"(",
"module",
")",
"except",
"ImportError",
"as",
"e",
":",
"err",
"=",
"'%s not found. %s'",
"%",
"(",
"module",
",",
"e",
")",
"if",
"throw",
":",
"raise",
"ImportError",
"(",
"err",
")",
"else",
":",
"log",
".",
"error",
"(",
"err",
")",
"return",
"None"
] | If ``module`` is a dotted string pointing to the module,
imports and returns the module object. | [
"If",
"module",
"is",
"a",
"dotted",
"string",
"pointing",
"to",
"the",
"module",
"imports",
"and",
"returns",
"the",
"module",
"object",
"."
] | python | train |
genialis/django-rest-framework-reactive | src/rest_framework_reactive/decorators.py | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/decorators.py#L10-L84 | def observable(
_method_or_viewset=None, poll_interval=None, primary_key=None, dependencies=None
):
"""Make ViewSet or ViewSet method observable.
Decorating a ViewSet class is the same as decorating its `list` method.
If decorated method returns a response containing a list of items, it must
use the provided `LimitOffsetPagination` for any pagination. In case a
non-list response is returned, the resulting item will be wrapped into a
list.
When multiple decorators are used, `observable` must be the first one to be
applied as it needs access to the method name.
:param poll_interval: Configure given observable as a polling observable
:param primary_key: Primary key for tracking observable items
:param dependencies: List of ORM to register as dependencies for
orm_notify. If None the observer will subscribe to notifications from
the queryset model.
"""
if poll_interval and dependencies:
raise ValueError('Only one of poll_interval and dependencies arguments allowed')
def decorator_observable(method_or_viewset):
if inspect.isclass(method_or_viewset):
list_method = getattr(method_or_viewset, 'list', None)
if list_method is not None:
method_or_viewset.list = observable(list_method)
return method_or_viewset
# Do not decorate an already observable method twice.
if getattr(method_or_viewset, 'is_observable', False):
return method_or_viewset
@functools.wraps(method_or_viewset)
def wrapper(self, request, *args, **kwargs):
if observer_request.OBSERVABLE_QUERY_PARAMETER in request.query_params:
# TODO: Validate the session identifier.
session_id = request.query_params[
observer_request.OBSERVABLE_QUERY_PARAMETER
]
# Create request and subscribe the session to given observer.
request = observer_request.Request(
self.__class__, method_or_viewset.__name__, request, args, kwargs
)
# Initialize observer and subscribe.
instance = observer.QueryObserver(request)
data = instance.subscribe(session_id, dependencies)
return response.Response({'observer': instance.id, 'items': data})
else:
# Non-reactive API.
return method_or_viewset(self, request, *args, **kwargs)
wrapper.is_observable = True
if poll_interval is not None:
wrapper.observable_change_detection = observer.Options.CHANGE_DETECTION_POLL
wrapper.observable_poll_interval = poll_interval
if primary_key is not None:
wrapper.observable_primary_key = primary_key
return wrapper
if _method_or_viewset is None:
return decorator_observable
else:
return decorator_observable(_method_or_viewset) | [
"def",
"observable",
"(",
"_method_or_viewset",
"=",
"None",
",",
"poll_interval",
"=",
"None",
",",
"primary_key",
"=",
"None",
",",
"dependencies",
"=",
"None",
")",
":",
"if",
"poll_interval",
"and",
"dependencies",
":",
"raise",
"ValueError",
"(",
"'Only one of poll_interval and dependencies arguments allowed'",
")",
"def",
"decorator_observable",
"(",
"method_or_viewset",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"method_or_viewset",
")",
":",
"list_method",
"=",
"getattr",
"(",
"method_or_viewset",
",",
"'list'",
",",
"None",
")",
"if",
"list_method",
"is",
"not",
"None",
":",
"method_or_viewset",
".",
"list",
"=",
"observable",
"(",
"list_method",
")",
"return",
"method_or_viewset",
"# Do not decorate an already observable method twice.",
"if",
"getattr",
"(",
"method_or_viewset",
",",
"'is_observable'",
",",
"False",
")",
":",
"return",
"method_or_viewset",
"@",
"functools",
".",
"wraps",
"(",
"method_or_viewset",
")",
"def",
"wrapper",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"observer_request",
".",
"OBSERVABLE_QUERY_PARAMETER",
"in",
"request",
".",
"query_params",
":",
"# TODO: Validate the session identifier.",
"session_id",
"=",
"request",
".",
"query_params",
"[",
"observer_request",
".",
"OBSERVABLE_QUERY_PARAMETER",
"]",
"# Create request and subscribe the session to given observer.",
"request",
"=",
"observer_request",
".",
"Request",
"(",
"self",
".",
"__class__",
",",
"method_or_viewset",
".",
"__name__",
",",
"request",
",",
"args",
",",
"kwargs",
")",
"# Initialize observer and subscribe.",
"instance",
"=",
"observer",
".",
"QueryObserver",
"(",
"request",
")",
"data",
"=",
"instance",
".",
"subscribe",
"(",
"session_id",
",",
"dependencies",
")",
"return",
"response",
".",
"Response",
"(",
"{",
"'observer'",
":",
"instance",
".",
"id",
",",
"'items'",
":",
"data",
"}",
")",
"else",
":",
"# Non-reactive API.",
"return",
"method_or_viewset",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"wrapper",
".",
"is_observable",
"=",
"True",
"if",
"poll_interval",
"is",
"not",
"None",
":",
"wrapper",
".",
"observable_change_detection",
"=",
"observer",
".",
"Options",
".",
"CHANGE_DETECTION_POLL",
"wrapper",
".",
"observable_poll_interval",
"=",
"poll_interval",
"if",
"primary_key",
"is",
"not",
"None",
":",
"wrapper",
".",
"observable_primary_key",
"=",
"primary_key",
"return",
"wrapper",
"if",
"_method_or_viewset",
"is",
"None",
":",
"return",
"decorator_observable",
"else",
":",
"return",
"decorator_observable",
"(",
"_method_or_viewset",
")"
] | Make ViewSet or ViewSet method observable.
Decorating a ViewSet class is the same as decorating its `list` method.
If decorated method returns a response containing a list of items, it must
use the provided `LimitOffsetPagination` for any pagination. In case a
non-list response is returned, the resulting item will be wrapped into a
list.
When multiple decorators are used, `observable` must be the first one to be
applied as it needs access to the method name.
:param poll_interval: Configure given observable as a polling observable
:param primary_key: Primary key for tracking observable items
:param dependencies: List of ORM to register as dependencies for
orm_notify. If None the observer will subscribe to notifications from
the queryset model. | [
"Make",
"ViewSet",
"or",
"ViewSet",
"method",
"observable",
"."
] | python | train |
uuazed/numerapi | numerapi/numerapi.py | https://github.com/uuazed/numerapi/blob/fc9dcc53b32ede95bfda1ceeb62aec1d67d26697/numerapi/numerapi.py#L728-L777 | def get_submission_filenames(self, tournament=None, round_num=None):
"""Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}]
"""
query = '''
query {
user {
submissions {
filename
selected
round {
tournament
number
}
}
}
}
'''
data = self.raw_query(query, authorization=True)['data']['user']
filenames = [{"round_num": item['round']['number'],
"tournament": item['round']['tournament'],
"filename": item['filename']}
for item in data['submissions'] if item['selected']]
if round_num is not None:
filenames = [f for f in filenames if f['round_num'] == round_num]
if tournament is not None:
filenames = [f for f in filenames if f['tournament'] == tournament]
filenames.sort(key=lambda f: (f['round_num'], f['tournament']))
return filenames | [
"def",
"get_submission_filenames",
"(",
"self",
",",
"tournament",
"=",
"None",
",",
"round_num",
"=",
"None",
")",
":",
"query",
"=",
"'''\n query {\n user {\n submissions {\n filename\n selected\n round {\n tournament\n number\n }\n }\n }\n }\n '''",
"data",
"=",
"self",
".",
"raw_query",
"(",
"query",
",",
"authorization",
"=",
"True",
")",
"[",
"'data'",
"]",
"[",
"'user'",
"]",
"filenames",
"=",
"[",
"{",
"\"round_num\"",
":",
"item",
"[",
"'round'",
"]",
"[",
"'number'",
"]",
",",
"\"tournament\"",
":",
"item",
"[",
"'round'",
"]",
"[",
"'tournament'",
"]",
",",
"\"filename\"",
":",
"item",
"[",
"'filename'",
"]",
"}",
"for",
"item",
"in",
"data",
"[",
"'submissions'",
"]",
"if",
"item",
"[",
"'selected'",
"]",
"]",
"if",
"round_num",
"is",
"not",
"None",
":",
"filenames",
"=",
"[",
"f",
"for",
"f",
"in",
"filenames",
"if",
"f",
"[",
"'round_num'",
"]",
"==",
"round_num",
"]",
"if",
"tournament",
"is",
"not",
"None",
":",
"filenames",
"=",
"[",
"f",
"for",
"f",
"in",
"filenames",
"if",
"f",
"[",
"'tournament'",
"]",
"==",
"tournament",
"]",
"filenames",
".",
"sort",
"(",
"key",
"=",
"lambda",
"f",
":",
"(",
"f",
"[",
"'round_num'",
"]",
",",
"f",
"[",
"'tournament'",
"]",
")",
")",
"return",
"filenames"
] | Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}] | [
"Get",
"filenames",
"of",
"the",
"submission",
"of",
"the",
"user",
"."
] | python | train |
MisterY/pydatum | pydatum/datum.py | https://github.com/MisterY/pydatum/blob/4b39f43040e31a95bcf219603b6429078a9ba3c2/pydatum/datum.py#L84-L89 | def from_iso_date_string(self, date_str: str) -> datetime:
""" Parse ISO date string (YYYY-MM-DD) """
assert isinstance(date_str, str)
self.value = datetime.strptime(date_str, ISO_DATE_FORMAT)
return self.value | [
"def",
"from_iso_date_string",
"(",
"self",
",",
"date_str",
":",
"str",
")",
"->",
"datetime",
":",
"assert",
"isinstance",
"(",
"date_str",
",",
"str",
")",
"self",
".",
"value",
"=",
"datetime",
".",
"strptime",
"(",
"date_str",
",",
"ISO_DATE_FORMAT",
")",
"return",
"self",
".",
"value"
] | Parse ISO date string (YYYY-MM-DD) | [
"Parse",
"ISO",
"date",
"string",
"(",
"YYYY",
"-",
"MM",
"-",
"DD",
")"
] | python | train |
chaoss/grimoirelab-perceval | perceval/backends/core/nntp.py | https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/nntp.py#L303-L315 | def _fetch_article(self, article_id):
"""Fetch article data
:param article_id: id of the article to fetch
"""
fetched_data = self.handler.article(article_id)
data = {
'number': fetched_data[1].number,
'message_id': fetched_data[1].message_id,
'lines': fetched_data[1].lines
}
return data | [
"def",
"_fetch_article",
"(",
"self",
",",
"article_id",
")",
":",
"fetched_data",
"=",
"self",
".",
"handler",
".",
"article",
"(",
"article_id",
")",
"data",
"=",
"{",
"'number'",
":",
"fetched_data",
"[",
"1",
"]",
".",
"number",
",",
"'message_id'",
":",
"fetched_data",
"[",
"1",
"]",
".",
"message_id",
",",
"'lines'",
":",
"fetched_data",
"[",
"1",
"]",
".",
"lines",
"}",
"return",
"data"
] | Fetch article data
:param article_id: id of the article to fetch | [
"Fetch",
"article",
"data"
] | python | test |
readbeyond/aeneas | thirdparty/mfcc.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/thirdparty/mfcc.py#L162-L172 | def dctmat(N,K,freqstep,orthogonalize=True):
"""Return the orthogonal DCT-II/DCT-III matrix of size NxK.
For computing or inverting MFCCs, N is the number of
log-power-spectrum bins while K is the number of cepstra."""
cosmat = numpy.zeros((N, K), 'double')
for n in range(0,N):
for k in range(0, K):
cosmat[n,k] = numpy.cos(freqstep * (n + 0.5) * k)
if orthogonalize:
cosmat[:,0] = cosmat[:,0] * 1./numpy.sqrt(2)
return cosmat | [
"def",
"dctmat",
"(",
"N",
",",
"K",
",",
"freqstep",
",",
"orthogonalize",
"=",
"True",
")",
":",
"cosmat",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"N",
",",
"K",
")",
",",
"'double'",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"N",
")",
":",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"K",
")",
":",
"cosmat",
"[",
"n",
",",
"k",
"]",
"=",
"numpy",
".",
"cos",
"(",
"freqstep",
"*",
"(",
"n",
"+",
"0.5",
")",
"*",
"k",
")",
"if",
"orthogonalize",
":",
"cosmat",
"[",
":",
",",
"0",
"]",
"=",
"cosmat",
"[",
":",
",",
"0",
"]",
"*",
"1.",
"/",
"numpy",
".",
"sqrt",
"(",
"2",
")",
"return",
"cosmat"
] | Return the orthogonal DCT-II/DCT-III matrix of size NxK.
For computing or inverting MFCCs, N is the number of
log-power-spectrum bins while K is the number of cepstra. | [
"Return",
"the",
"orthogonal",
"DCT",
"-",
"II",
"/",
"DCT",
"-",
"III",
"matrix",
"of",
"size",
"NxK",
".",
"For",
"computing",
"or",
"inverting",
"MFCCs",
"N",
"is",
"the",
"number",
"of",
"log",
"-",
"power",
"-",
"spectrum",
"bins",
"while",
"K",
"is",
"the",
"number",
"of",
"cepstra",
"."
] | python | train |
gbiggs/rtctree | rtctree/manager.py | https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L215-L234 | def load_module(self, path, init_func):
'''Load a shared library.
Call this function to load a shared library (DLL file under Windows,
shared object under UNIX) into the manager.
@param path The path to the shared library.
@param init_func The name entry function in the library.
@raises FailedToLoadModuleError
'''
try:
with self._mutex:
if self._obj.load_module(path, init_func) != RTC.RTC_OK:
raise exceptions.FailedToLoadModuleError(path)
except CORBA.UNKNOWN as e:
if e.args[0] == UNKNOWN_UserException:
raise exceptions.FailedToLoadModuleError(path, 'CORBA User Exception')
else:
raise | [
"def",
"load_module",
"(",
"self",
",",
"path",
",",
"init_func",
")",
":",
"try",
":",
"with",
"self",
".",
"_mutex",
":",
"if",
"self",
".",
"_obj",
".",
"load_module",
"(",
"path",
",",
"init_func",
")",
"!=",
"RTC",
".",
"RTC_OK",
":",
"raise",
"exceptions",
".",
"FailedToLoadModuleError",
"(",
"path",
")",
"except",
"CORBA",
".",
"UNKNOWN",
"as",
"e",
":",
"if",
"e",
".",
"args",
"[",
"0",
"]",
"==",
"UNKNOWN_UserException",
":",
"raise",
"exceptions",
".",
"FailedToLoadModuleError",
"(",
"path",
",",
"'CORBA User Exception'",
")",
"else",
":",
"raise"
] | Load a shared library.
Call this function to load a shared library (DLL file under Windows,
shared object under UNIX) into the manager.
@param path The path to the shared library.
@param init_func The name entry function in the library.
@raises FailedToLoadModuleError | [
"Load",
"a",
"shared",
"library",
"."
] | python | train |
sosy-lab/benchexec | benchexec/container.py | https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/container.py#L226-L244 | def get_mount_points():
"""Get all current mount points of the system.
Changes to the mount points during iteration may be reflected in the result.
@return a generator of (source, target, fstype, options),
where options is a list of bytes instances, and the others are bytes instances
(this avoids encoding problems with mount points with problematic characters).
"""
def decode_path(path):
# Replace tab, space, newline, and backslash escapes with actual characters.
# According to man 5 fstab, only tab and space escaped, but Linux escapes more:
# https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/proc_namespace.c?id=12a54b150fb5b6c2f3da932dc0e665355f8a5a48#n85
return path.replace(br"\011", b"\011").replace(br"\040", b"\040").replace(br"\012", b"\012").replace(br"\134", b"\134")
with open("/proc/self/mounts", "rb") as mounts:
# The format of this file is the same as of /etc/fstab (cf. man 5 fstab)
for mount in mounts:
source, target, fstype, options, unused1, unused2 = mount.split(b" ")
options = set(options.split(b","))
yield (decode_path(source), decode_path(target), fstype, options) | [
"def",
"get_mount_points",
"(",
")",
":",
"def",
"decode_path",
"(",
"path",
")",
":",
"# Replace tab, space, newline, and backslash escapes with actual characters.",
"# According to man 5 fstab, only tab and space escaped, but Linux escapes more:",
"# https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/proc_namespace.c?id=12a54b150fb5b6c2f3da932dc0e665355f8a5a48#n85",
"return",
"path",
".",
"replace",
"(",
"br\"\\011\"",
",",
"b\"\\011\"",
")",
".",
"replace",
"(",
"br\"\\040\"",
",",
"b\"\\040\"",
")",
".",
"replace",
"(",
"br\"\\012\"",
",",
"b\"\\012\"",
")",
".",
"replace",
"(",
"br\"\\134\"",
",",
"b\"\\134\"",
")",
"with",
"open",
"(",
"\"/proc/self/mounts\"",
",",
"\"rb\"",
")",
"as",
"mounts",
":",
"# The format of this file is the same as of /etc/fstab (cf. man 5 fstab)",
"for",
"mount",
"in",
"mounts",
":",
"source",
",",
"target",
",",
"fstype",
",",
"options",
",",
"unused1",
",",
"unused2",
"=",
"mount",
".",
"split",
"(",
"b\" \"",
")",
"options",
"=",
"set",
"(",
"options",
".",
"split",
"(",
"b\",\"",
")",
")",
"yield",
"(",
"decode_path",
"(",
"source",
")",
",",
"decode_path",
"(",
"target",
")",
",",
"fstype",
",",
"options",
")"
] | Get all current mount points of the system.
Changes to the mount points during iteration may be reflected in the result.
@return a generator of (source, target, fstype, options),
where options is a list of bytes instances, and the others are bytes instances
(this avoids encoding problems with mount points with problematic characters). | [
"Get",
"all",
"current",
"mount",
"points",
"of",
"the",
"system",
".",
"Changes",
"to",
"the",
"mount",
"points",
"during",
"iteration",
"may",
"be",
"reflected",
"in",
"the",
"result",
"."
] | python | train |
chaosmail/python-fs | fs/fs.py | https://github.com/chaosmail/python-fs/blob/2567922ced9387e327e65f3244caff3b7af35684/fs/fs.py#L315-L320 | def addpath(path):
"""Add *path* to system path"""
import sys
if not exists(path):
raise ValueError('Path %s does not exist' % path)
sys.path.insert(1, path) | [
"def",
"addpath",
"(",
"path",
")",
":",
"import",
"sys",
"if",
"not",
"exists",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'Path %s does not exist'",
"%",
"path",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"1",
",",
"path",
")"
] | Add *path* to system path | [
"Add",
"*",
"path",
"*",
"to",
"system",
"path"
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/ext/_bundled/decorator.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/_bundled/decorator.py#L140-L167 | def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec(code, evaldict)
except:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func | [
"def",
"make",
"(",
"self",
",",
"src_templ",
",",
"evaldict",
"=",
"None",
",",
"addsource",
"=",
"False",
",",
"*",
"*",
"attrs",
")",
":",
"src",
"=",
"src_templ",
"%",
"vars",
"(",
"self",
")",
"# expand name and signature",
"evaldict",
"=",
"evaldict",
"or",
"{",
"}",
"mo",
"=",
"DEF",
".",
"match",
"(",
"src",
")",
"if",
"mo",
"is",
"None",
":",
"raise",
"SyntaxError",
"(",
"'not a valid function template\\n%s'",
"%",
"src",
")",
"name",
"=",
"mo",
".",
"group",
"(",
"1",
")",
"# extract the function name",
"names",
"=",
"set",
"(",
"[",
"name",
"]",
"+",
"[",
"arg",
".",
"strip",
"(",
"' *'",
")",
"for",
"arg",
"in",
"self",
".",
"shortsignature",
".",
"split",
"(",
"','",
")",
"]",
")",
"for",
"n",
"in",
"names",
":",
"if",
"n",
"in",
"(",
"'_func_'",
",",
"'_call_'",
")",
":",
"raise",
"NameError",
"(",
"'%s is overridden in\\n%s'",
"%",
"(",
"n",
",",
"src",
")",
")",
"if",
"not",
"src",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"# add a newline just for safety",
"src",
"+=",
"'\\n'",
"# this is needed in old versions of Python",
"try",
":",
"code",
"=",
"compile",
"(",
"src",
",",
"'<string>'",
",",
"'single'",
")",
"# print >> sys.stderr, 'Compiling %s' % src",
"exec",
"(",
"code",
",",
"evaldict",
")",
"except",
":",
"print",
"(",
"'Error in generated code:'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"src",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"raise",
"func",
"=",
"evaldict",
"[",
"name",
"]",
"if",
"addsource",
":",
"attrs",
"[",
"'__source__'",
"]",
"=",
"src",
"self",
".",
"update",
"(",
"func",
",",
"*",
"*",
"attrs",
")",
"return",
"func"
] | Make a new function from a given template and update the signature | [
"Make",
"a",
"new",
"function",
"from",
"a",
"given",
"template",
"and",
"update",
"the",
"signature"
] | python | train |
JoeVirtual/KonFoo | konfoo/core.py | https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L273-L303 | def to_dict(self, *attributes, **options):
""" Returns a **flatten** :class:`ordered dictionary <collections.OrderedDict>`
of ``{'field path': attribute}`` or ``{'field path': tuple(attributes)}``
pairs for each :class:`Field` *nested* in the `Container`.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword str name: name of the `Container`.
Default is the class name of the instance.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
`Container` lists their referenced :attr:`~Pointer.data` object
field attributes as well (chained method call).
"""
# Name of the Container
name = options.pop('name', self.__class__.__name__)
# Save to file
save = options.pop('save', False)
fields = OrderedDict()
fields[name] = OrderedDict()
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
for item in self.field_items(**options):
field_path, field = item
if save and field_path.startswith('['):
# Sequence element
field_path = '_' + field_path
fields[name][field_path] = field_getter(field)
return fields | [
"def",
"to_dict",
"(",
"self",
",",
"*",
"attributes",
",",
"*",
"*",
"options",
")",
":",
"# Name of the Container",
"name",
"=",
"options",
".",
"pop",
"(",
"'name'",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
"# Save to file",
"save",
"=",
"options",
".",
"pop",
"(",
"'save'",
",",
"False",
")",
"fields",
"=",
"OrderedDict",
"(",
")",
"fields",
"[",
"name",
"]",
"=",
"OrderedDict",
"(",
")",
"if",
"attributes",
":",
"field_getter",
"=",
"attrgetter",
"(",
"*",
"attributes",
")",
"else",
":",
"field_getter",
"=",
"attrgetter",
"(",
"'value'",
")",
"for",
"item",
"in",
"self",
".",
"field_items",
"(",
"*",
"*",
"options",
")",
":",
"field_path",
",",
"field",
"=",
"item",
"if",
"save",
"and",
"field_path",
".",
"startswith",
"(",
"'['",
")",
":",
"# Sequence element",
"field_path",
"=",
"'_'",
"+",
"field_path",
"fields",
"[",
"name",
"]",
"[",
"field_path",
"]",
"=",
"field_getter",
"(",
"field",
")",
"return",
"fields"
] | Returns a **flatten** :class:`ordered dictionary <collections.OrderedDict>`
of ``{'field path': attribute}`` or ``{'field path': tuple(attributes)}``
pairs for each :class:`Field` *nested* in the `Container`.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword str name: name of the `Container`.
Default is the class name of the instance.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
`Container` lists their referenced :attr:`~Pointer.data` object
field attributes as well (chained method call). | [
"Returns",
"a",
"**",
"flatten",
"**",
":",
"class",
":",
"ordered",
"dictionary",
"<collections",
".",
"OrderedDict",
">",
"of",
"{",
"field",
"path",
":",
"attribute",
"}",
"or",
"{",
"field",
"path",
":",
"tuple",
"(",
"attributes",
")",
"}",
"pairs",
"for",
"each",
":",
"class",
":",
"Field",
"*",
"nested",
"*",
"in",
"the",
"Container",
"."
] | python | train |
ciena/afkak | afkak/brokerclient.py | https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/brokerclient.py#L322-L332 | def cancelRequest(self, requestId, reason=None, _=None):
"""Cancel a request: remove it from requests, & errback the deferred.
NOTE: Attempts to cancel a request which is no longer tracked
(expectResponse == False and already sent, or response already
received) will raise KeyError
"""
if reason is None:
reason = CancelledError()
tReq = self.requests.pop(requestId)
tReq.d.errback(reason) | [
"def",
"cancelRequest",
"(",
"self",
",",
"requestId",
",",
"reason",
"=",
"None",
",",
"_",
"=",
"None",
")",
":",
"if",
"reason",
"is",
"None",
":",
"reason",
"=",
"CancelledError",
"(",
")",
"tReq",
"=",
"self",
".",
"requests",
".",
"pop",
"(",
"requestId",
")",
"tReq",
".",
"d",
".",
"errback",
"(",
"reason",
")"
] | Cancel a request: remove it from requests, & errback the deferred.
NOTE: Attempts to cancel a request which is no longer tracked
(expectResponse == False and already sent, or response already
received) will raise KeyError | [
"Cancel",
"a",
"request",
":",
"remove",
"it",
"from",
"requests",
"&",
"errback",
"the",
"deferred",
"."
] | python | train |
saltstack/salt | salt/beacons/glxinfo.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/glxinfo.py#L49-L82 | def beacon(config):
'''
Emit the status of a connected display to the minion
Mainly this is used to detect when the display fails to connect
for whatever reason.
.. code-block:: yaml
beacons:
glxinfo:
- user: frank
- screen_event: True
'''
log.trace('glxinfo beacon starting')
ret = []
_config = {}
list(map(_config.update, config))
retcode = __salt__['cmd.retcode']('DISPLAY=:0 glxinfo',
runas=_config['user'], python_shell=True)
if 'screen_event' in _config and _config['screen_event']:
last_value = last_state.get('screen_available', False)
screen_available = retcode == 0
if last_value != screen_available or 'screen_available' not in last_state:
ret.append({'tag': 'screen_event', 'screen_available': screen_available})
last_state['screen_available'] = screen_available
return ret | [
"def",
"beacon",
"(",
"config",
")",
":",
"log",
".",
"trace",
"(",
"'glxinfo beacon starting'",
")",
"ret",
"=",
"[",
"]",
"_config",
"=",
"{",
"}",
"list",
"(",
"map",
"(",
"_config",
".",
"update",
",",
"config",
")",
")",
"retcode",
"=",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"'DISPLAY=:0 glxinfo'",
",",
"runas",
"=",
"_config",
"[",
"'user'",
"]",
",",
"python_shell",
"=",
"True",
")",
"if",
"'screen_event'",
"in",
"_config",
"and",
"_config",
"[",
"'screen_event'",
"]",
":",
"last_value",
"=",
"last_state",
".",
"get",
"(",
"'screen_available'",
",",
"False",
")",
"screen_available",
"=",
"retcode",
"==",
"0",
"if",
"last_value",
"!=",
"screen_available",
"or",
"'screen_available'",
"not",
"in",
"last_state",
":",
"ret",
".",
"append",
"(",
"{",
"'tag'",
":",
"'screen_event'",
",",
"'screen_available'",
":",
"screen_available",
"}",
")",
"last_state",
"[",
"'screen_available'",
"]",
"=",
"screen_available",
"return",
"ret"
] | Emit the status of a connected display to the minion
Mainly this is used to detect when the display fails to connect
for whatever reason.
.. code-block:: yaml
beacons:
glxinfo:
- user: frank
- screen_event: True | [
"Emit",
"the",
"status",
"of",
"a",
"connected",
"display",
"to",
"the",
"minion"
] | python | train |
okfn/ofs | ofs/local/storedjson.py | https://github.com/okfn/ofs/blob/c110cbecd7d0ae7e877963914a1a5af030cd6d45/ofs/local/storedjson.py#L33-L49 | def revert(self):
"""Revert the state to the version stored on disc."""
if self.filepath:
if path.isfile(self.filepath):
serialised_file = open(self.filepath, "r")
try:
self.state = json.load(serialised_file)
except ValueError:
print("No JSON information could be read from the persistence file - could be empty: %s" % self.filepath)
self.state = {}
finally:
serialised_file.close()
else:
print("The persistence file has not yet been created or does not exist, so the state cannot be read from it yet.")
else:
print("Filepath to the persistence file is not set. State cannot be read.")
return False | [
"def",
"revert",
"(",
"self",
")",
":",
"if",
"self",
".",
"filepath",
":",
"if",
"path",
".",
"isfile",
"(",
"self",
".",
"filepath",
")",
":",
"serialised_file",
"=",
"open",
"(",
"self",
".",
"filepath",
",",
"\"r\"",
")",
"try",
":",
"self",
".",
"state",
"=",
"json",
".",
"load",
"(",
"serialised_file",
")",
"except",
"ValueError",
":",
"print",
"(",
"\"No JSON information could be read from the persistence file - could be empty: %s\"",
"%",
"self",
".",
"filepath",
")",
"self",
".",
"state",
"=",
"{",
"}",
"finally",
":",
"serialised_file",
".",
"close",
"(",
")",
"else",
":",
"print",
"(",
"\"The persistence file has not yet been created or does not exist, so the state cannot be read from it yet.\"",
")",
"else",
":",
"print",
"(",
"\"Filepath to the persistence file is not set. State cannot be read.\"",
")",
"return",
"False"
] | Revert the state to the version stored on disc. | [
"Revert",
"the",
"state",
"to",
"the",
"version",
"stored",
"on",
"disc",
"."
] | python | train |
fr33jc/bang | bang/providers/hpcloud/v12/__init__.py | https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/hpcloud/v12/__init__.py#L50-L71 | def create_server(self, *args, **kwargs):
"""
Wraps :meth:`bang.providers.openstack.Nova.create_server` to apply
hpcloud specialization, namely pulling IP addresses from the hpcloud's
non-standard return values.
"""
# hpcloud's management console stuffs all of its tags in a "tags" tag.
# populate it with the stack and role values here only at server
# creation time. what users do with it after server creation is up to
# them.
tags = kwargs['tags']
tags[A.tags.TAGS] = ','.join([
tags.get(A.tags.STACK, ''),
tags.get(A.tags.ROLE, ''),
])
# Don't create an explicit floating IP; gets one
# automatically
if 'floating_ip' not in kwargs:
kwargs['floating_ip'] = False
s = super(HPNova, self).create_server(*args, **kwargs)
return fix_hp_addrs(s) | [
"def",
"create_server",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# hpcloud's management console stuffs all of its tags in a \"tags\" tag.",
"# populate it with the stack and role values here only at server",
"# creation time. what users do with it after server creation is up to",
"# them.",
"tags",
"=",
"kwargs",
"[",
"'tags'",
"]",
"tags",
"[",
"A",
".",
"tags",
".",
"TAGS",
"]",
"=",
"','",
".",
"join",
"(",
"[",
"tags",
".",
"get",
"(",
"A",
".",
"tags",
".",
"STACK",
",",
"''",
")",
",",
"tags",
".",
"get",
"(",
"A",
".",
"tags",
".",
"ROLE",
",",
"''",
")",
",",
"]",
")",
"# Don't create an explicit floating IP; gets one ",
"# automatically",
"if",
"'floating_ip'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'floating_ip'",
"]",
"=",
"False",
"s",
"=",
"super",
"(",
"HPNova",
",",
"self",
")",
".",
"create_server",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"fix_hp_addrs",
"(",
"s",
")"
] | Wraps :meth:`bang.providers.openstack.Nova.create_server` to apply
hpcloud specialization, namely pulling IP addresses from the hpcloud's
non-standard return values. | [
"Wraps",
":",
"meth",
":",
"bang",
".",
"providers",
".",
"openstack",
".",
"Nova",
".",
"create_server",
"to",
"apply",
"hpcloud",
"specialization",
"namely",
"pulling",
"IP",
"addresses",
"from",
"the",
"hpcloud",
"s",
"non",
"-",
"standard",
"return",
"values",
"."
] | python | train |
bluedynamics/cone.ugm | src/cone/ugm/model/localmanager.py | https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/model/localmanager.py#L118-L125 | def local_manager_rule(self):
"""Return rule for local manager.
"""
adm_gid = self.local_manager_gid
if not adm_gid:
return None
config = self.root['settings']['ugm_localmanager'].attrs
return config[adm_gid] | [
"def",
"local_manager_rule",
"(",
"self",
")",
":",
"adm_gid",
"=",
"self",
".",
"local_manager_gid",
"if",
"not",
"adm_gid",
":",
"return",
"None",
"config",
"=",
"self",
".",
"root",
"[",
"'settings'",
"]",
"[",
"'ugm_localmanager'",
"]",
".",
"attrs",
"return",
"config",
"[",
"adm_gid",
"]"
] | Return rule for local manager. | [
"Return",
"rule",
"for",
"local",
"manager",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/layers/transformer_memory.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L31-L45 | def pre_attention(self, segment, query_antecedent, memory_antecedent, bias):
"""Called prior to self-attention, to incorporate memory items.
Args:
segment: an integer Tensor with shape [batch]
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: must be None. Attention normally allows this to be a
Tensor with shape [batch, length_m, channels], but we currently only
support memory for decoder-side self-attention.
bias: bias Tensor (see attention_bias())
Returns:
(data, new_query_antecedent, new_memory_antecedent, new_bias)
"""
del segment
return None, query_antecedent, memory_antecedent, bias | [
"def",
"pre_attention",
"(",
"self",
",",
"segment",
",",
"query_antecedent",
",",
"memory_antecedent",
",",
"bias",
")",
":",
"del",
"segment",
"return",
"None",
",",
"query_antecedent",
",",
"memory_antecedent",
",",
"bias"
] | Called prior to self-attention, to incorporate memory items.
Args:
segment: an integer Tensor with shape [batch]
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: must be None. Attention normally allows this to be a
Tensor with shape [batch, length_m, channels], but we currently only
support memory for decoder-side self-attention.
bias: bias Tensor (see attention_bias())
Returns:
(data, new_query_antecedent, new_memory_antecedent, new_bias) | [
"Called",
"prior",
"to",
"self",
"-",
"attention",
"to",
"incorporate",
"memory",
"items",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L952-L976 | def _context_menu_make(self, pos):
""" Creates a context menu for the given QPoint (in widget coordinates).
"""
menu = QtGui.QMenu(self)
self.cut_action = menu.addAction('Cut', self.cut)
self.cut_action.setEnabled(self.can_cut())
self.cut_action.setShortcut(QtGui.QKeySequence.Cut)
self.copy_action = menu.addAction('Copy', self.copy)
self.copy_action.setEnabled(self.can_copy())
self.copy_action.setShortcut(QtGui.QKeySequence.Copy)
self.paste_action = menu.addAction('Paste', self.paste)
self.paste_action.setEnabled(self.can_paste())
self.paste_action.setShortcut(QtGui.QKeySequence.Paste)
menu.addSeparator()
menu.addAction(self.select_all_action)
menu.addSeparator()
menu.addAction(self.export_action)
menu.addAction(self.print_action)
return menu | [
"def",
"_context_menu_make",
"(",
"self",
",",
"pos",
")",
":",
"menu",
"=",
"QtGui",
".",
"QMenu",
"(",
"self",
")",
"self",
".",
"cut_action",
"=",
"menu",
".",
"addAction",
"(",
"'Cut'",
",",
"self",
".",
"cut",
")",
"self",
".",
"cut_action",
".",
"setEnabled",
"(",
"self",
".",
"can_cut",
"(",
")",
")",
"self",
".",
"cut_action",
".",
"setShortcut",
"(",
"QtGui",
".",
"QKeySequence",
".",
"Cut",
")",
"self",
".",
"copy_action",
"=",
"menu",
".",
"addAction",
"(",
"'Copy'",
",",
"self",
".",
"copy",
")",
"self",
".",
"copy_action",
".",
"setEnabled",
"(",
"self",
".",
"can_copy",
"(",
")",
")",
"self",
".",
"copy_action",
".",
"setShortcut",
"(",
"QtGui",
".",
"QKeySequence",
".",
"Copy",
")",
"self",
".",
"paste_action",
"=",
"menu",
".",
"addAction",
"(",
"'Paste'",
",",
"self",
".",
"paste",
")",
"self",
".",
"paste_action",
".",
"setEnabled",
"(",
"self",
".",
"can_paste",
"(",
")",
")",
"self",
".",
"paste_action",
".",
"setShortcut",
"(",
"QtGui",
".",
"QKeySequence",
".",
"Paste",
")",
"menu",
".",
"addSeparator",
"(",
")",
"menu",
".",
"addAction",
"(",
"self",
".",
"select_all_action",
")",
"menu",
".",
"addSeparator",
"(",
")",
"menu",
".",
"addAction",
"(",
"self",
".",
"export_action",
")",
"menu",
".",
"addAction",
"(",
"self",
".",
"print_action",
")",
"return",
"menu"
] | Creates a context menu for the given QPoint (in widget coordinates). | [
"Creates",
"a",
"context",
"menu",
"for",
"the",
"given",
"QPoint",
"(",
"in",
"widget",
"coordinates",
")",
"."
] | python | test |
TomasTomecek/sen | sen/docker_backend.py | https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/docker_backend.py#L612-L624 | def net(self):
"""
get ACTIVE port mappings of a container
:return: dict:
{
"host_port": "container_port"
}
"""
try:
return NetData(self.inspect(cached=True).response)
except docker.errors.NotFound:
raise NotAvailableAnymore() | [
"def",
"net",
"(",
"self",
")",
":",
"try",
":",
"return",
"NetData",
"(",
"self",
".",
"inspect",
"(",
"cached",
"=",
"True",
")",
".",
"response",
")",
"except",
"docker",
".",
"errors",
".",
"NotFound",
":",
"raise",
"NotAvailableAnymore",
"(",
")"
] | get ACTIVE port mappings of a container
:return: dict:
{
"host_port": "container_port"
} | [
"get",
"ACTIVE",
"port",
"mappings",
"of",
"a",
"container"
] | python | train |
asweigart/pytweening | pytweening/__init__.py | https://github.com/asweigart/pytweening/blob/20d74368e53dc7d0f77c810b624b2c90994f099d/pytweening/__init__.py#L156-L170 | def easeInOutQuad(n):
"""A quadratic tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
if n < 0.5:
return 2 * n**2
else:
n = n * 2 - 1
return -0.5 * (n*(n-2) - 1) | [
"def",
"easeInOutQuad",
"(",
"n",
")",
":",
"_checkRange",
"(",
"n",
")",
"if",
"n",
"<",
"0.5",
":",
"return",
"2",
"*",
"n",
"**",
"2",
"else",
":",
"n",
"=",
"n",
"*",
"2",
"-",
"1",
"return",
"-",
"0.5",
"*",
"(",
"n",
"*",
"(",
"n",
"-",
"2",
")",
"-",
"1",
")"
] | A quadratic tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). | [
"A",
"quadratic",
"tween",
"function",
"that",
"accelerates",
"reaches",
"the",
"midpoint",
"and",
"then",
"decelerates",
"."
] | python | train |
r-barnes/richdem | wrappers/pyrichdem/richdem/__init__.py | https://github.com/r-barnes/richdem/blob/abc04d81216d7cf5b57ad7a1e04b5699369b6f58/wrappers/pyrichdem/richdem/__init__.py#L289-L319 | def SaveGDAL(filename, rda):
"""Save a GDAL file.
Saves a RichDEM array to a data file in GeoTIFF format.
If you need to do something more complicated, look at the source of this
function.
Args:
filename (str): Name of the raster file to be created
rda (rdarray): Data to save.
Returns:
No Return
"""
if type(rda) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if not GDAL_AVAILABLE:
raise Exception("richdem.SaveGDAL() requires GDAL.")
driver = gdal.GetDriverByName('GTiff')
data_type = gdal.GDT_Float32 #TODO
data_set = driver.Create(filename, xsize=rda.shape[1], ysize=rda.shape[0], bands=1, eType=data_type)
data_set.SetGeoTransform(rda.geotransform)
data_set.SetProjection(rda.projection)
band = data_set.GetRasterBand(1)
band.SetNoDataValue(rda.no_data)
band.WriteArray(np.array(rda))
for k,v in rda.metadata.items():
data_set.SetMetadataItem(str(k),str(v)) | [
"def",
"SaveGDAL",
"(",
"filename",
",",
"rda",
")",
":",
"if",
"type",
"(",
"rda",
")",
"is",
"not",
"rdarray",
":",
"raise",
"Exception",
"(",
"\"A richdem.rdarray or numpy.ndarray is required!\"",
")",
"if",
"not",
"GDAL_AVAILABLE",
":",
"raise",
"Exception",
"(",
"\"richdem.SaveGDAL() requires GDAL.\"",
")",
"driver",
"=",
"gdal",
".",
"GetDriverByName",
"(",
"'GTiff'",
")",
"data_type",
"=",
"gdal",
".",
"GDT_Float32",
"#TODO",
"data_set",
"=",
"driver",
".",
"Create",
"(",
"filename",
",",
"xsize",
"=",
"rda",
".",
"shape",
"[",
"1",
"]",
",",
"ysize",
"=",
"rda",
".",
"shape",
"[",
"0",
"]",
",",
"bands",
"=",
"1",
",",
"eType",
"=",
"data_type",
")",
"data_set",
".",
"SetGeoTransform",
"(",
"rda",
".",
"geotransform",
")",
"data_set",
".",
"SetProjection",
"(",
"rda",
".",
"projection",
")",
"band",
"=",
"data_set",
".",
"GetRasterBand",
"(",
"1",
")",
"band",
".",
"SetNoDataValue",
"(",
"rda",
".",
"no_data",
")",
"band",
".",
"WriteArray",
"(",
"np",
".",
"array",
"(",
"rda",
")",
")",
"for",
"k",
",",
"v",
"in",
"rda",
".",
"metadata",
".",
"items",
"(",
")",
":",
"data_set",
".",
"SetMetadataItem",
"(",
"str",
"(",
"k",
")",
",",
"str",
"(",
"v",
")",
")"
] | Save a GDAL file.
Saves a RichDEM array to a data file in GeoTIFF format.
If you need to do something more complicated, look at the source of this
function.
Args:
filename (str): Name of the raster file to be created
rda (rdarray): Data to save.
Returns:
No Return | [
"Save",
"a",
"GDAL",
"file",
"."
] | python | train |
hhatto/autopep8 | autopep8.py | https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L2211-L2248 | def _prevent_default_initializer_splitting(self, item, indent_amt):
"""Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed.
"""
if unicode(item) == '=':
# This is the assignment in the initializer. Just remove spaces for
# now.
self._delete_whitespace()
return
if (not self._prev_item or not self._prev_prev_item or
unicode(self._prev_item) != '='):
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt) | [
"def",
"_prevent_default_initializer_splitting",
"(",
"self",
",",
"item",
",",
"indent_amt",
")",
":",
"if",
"unicode",
"(",
"item",
")",
"==",
"'='",
":",
"# This is the assignment in the initializer. Just remove spaces for",
"# now.",
"self",
".",
"_delete_whitespace",
"(",
")",
"return",
"if",
"(",
"not",
"self",
".",
"_prev_item",
"or",
"not",
"self",
".",
"_prev_prev_item",
"or",
"unicode",
"(",
"self",
".",
"_prev_item",
")",
"!=",
"'='",
")",
":",
"return",
"self",
".",
"_delete_whitespace",
"(",
")",
"prev_prev_index",
"=",
"self",
".",
"_lines",
".",
"index",
"(",
"self",
".",
"_prev_prev_item",
")",
"if",
"(",
"isinstance",
"(",
"self",
".",
"_lines",
"[",
"prev_prev_index",
"-",
"1",
"]",
",",
"self",
".",
"_Indent",
")",
"or",
"self",
".",
"fits_on_current_line",
"(",
"item",
".",
"size",
"+",
"1",
")",
")",
":",
"# The default initializer is already the only item on this line.",
"# Don't insert a newline here.",
"return",
"# Replace the space with a newline/indent combo.",
"if",
"isinstance",
"(",
"self",
".",
"_lines",
"[",
"prev_prev_index",
"-",
"1",
"]",
",",
"self",
".",
"_Space",
")",
":",
"del",
"self",
".",
"_lines",
"[",
"prev_prev_index",
"-",
"1",
"]",
"self",
".",
"add_line_break_at",
"(",
"self",
".",
"_lines",
".",
"index",
"(",
"self",
".",
"_prev_prev_item",
")",
",",
"indent_amt",
")"
] | Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed. | [
"Prevent",
"splitting",
"between",
"a",
"default",
"initializer",
"."
] | python | train |
edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/scrappers/grada_cz.py | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/grada_cz.py#L54-L80 | def _parse_title_url(html_chunk):
"""
Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings.
"""
title = html_chunk.find("div", {"class": "comment"})
if not title:
return _parse_alt_title(html_chunk), None
title = title[0].find("h2")
if not title:
return _parse_alt_title(html_chunk), None
# look for the url of the book if present
url = None
url_tag = title[0].find("a")
if url_tag:
url = url_tag[0].params.get("href", None)
title = url_tag
return title[0].getContent(), normalize_url(BASE_URL, url) | [
"def",
"_parse_title_url",
"(",
"html_chunk",
")",
":",
"title",
"=",
"html_chunk",
".",
"find",
"(",
"\"div\"",
",",
"{",
"\"class\"",
":",
"\"comment\"",
"}",
")",
"if",
"not",
"title",
":",
"return",
"_parse_alt_title",
"(",
"html_chunk",
")",
",",
"None",
"title",
"=",
"title",
"[",
"0",
"]",
".",
"find",
"(",
"\"h2\"",
")",
"if",
"not",
"title",
":",
"return",
"_parse_alt_title",
"(",
"html_chunk",
")",
",",
"None",
"# look for the url of the book if present",
"url",
"=",
"None",
"url_tag",
"=",
"title",
"[",
"0",
"]",
".",
"find",
"(",
"\"a\"",
")",
"if",
"url_tag",
":",
"url",
"=",
"url_tag",
"[",
"0",
"]",
".",
"params",
".",
"get",
"(",
"\"href\"",
",",
"None",
")",
"title",
"=",
"url_tag",
"return",
"title",
"[",
"0",
"]",
".",
"getContent",
"(",
")",
",",
"normalize_url",
"(",
"BASE_URL",
",",
"url",
")"
] | Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings. | [
"Parse",
"title",
"/",
"name",
"of",
"the",
"book",
"and",
"URL",
"of",
"the",
"book",
"."
] | python | train |
GNS3/gns3-server | gns3server/controller/project.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/project.py#L922-L929 | def suspend_all(self):
"""
Suspend all nodes
"""
pool = Pool(concurrency=3)
for node in self.nodes.values():
pool.append(node.suspend)
yield from pool.join() | [
"def",
"suspend_all",
"(",
"self",
")",
":",
"pool",
"=",
"Pool",
"(",
"concurrency",
"=",
"3",
")",
"for",
"node",
"in",
"self",
".",
"nodes",
".",
"values",
"(",
")",
":",
"pool",
".",
"append",
"(",
"node",
".",
"suspend",
")",
"yield",
"from",
"pool",
".",
"join",
"(",
")"
] | Suspend all nodes | [
"Suspend",
"all",
"nodes"
] | python | train |
d0c-s4vage/pfp | pfp/interp.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2336-L2385 | def _resolve_to_field_class(self, names, scope):
"""Resolve the names to a class in fields.py, resolving past
typedefs, etc
:names: TODO
:scope: TODO
:ctxt: TODO
:returns: TODO
"""
switch = {
"char" : "Char",
"int" : "Int",
"long" : "Int",
"int64" : "Int64",
"uint64" : "UInt64",
"short" : "Short",
"double" : "Double",
"float" : "Float",
"void" : "Void",
"string" : "String",
"wstring" : "WString"
}
core = names[-1]
if core not in switch:
# will return a list of resolved names
type_info = scope.get_type(core)
if type(type_info) is type and issubclass(type_info, fields.Field):
return type_info
resolved_names = type_info
if resolved_names is None:
raise errors.UnresolvedType(self._coord, " ".join(names), " ")
if resolved_names[-1] not in switch:
raise errors.UnresolvedType(self._coord, " ".join(names), " ".join(resolved_names))
names = copy.copy(names)
names.pop()
names += resolved_names
if len(names) >= 2 and names[-1] == names[-2] and names[-1] == "long":
res = "Int64"
else:
res = switch[names[-1]]
if names[-1] in ["char", "short", "int", "long"] and "unsigned" in names[:-1]:
res = "U" + res
cls = getattr(fields, res)
return cls | [
"def",
"_resolve_to_field_class",
"(",
"self",
",",
"names",
",",
"scope",
")",
":",
"switch",
"=",
"{",
"\"char\"",
":",
"\"Char\"",
",",
"\"int\"",
":",
"\"Int\"",
",",
"\"long\"",
":",
"\"Int\"",
",",
"\"int64\"",
":",
"\"Int64\"",
",",
"\"uint64\"",
":",
"\"UInt64\"",
",",
"\"short\"",
":",
"\"Short\"",
",",
"\"double\"",
":",
"\"Double\"",
",",
"\"float\"",
":",
"\"Float\"",
",",
"\"void\"",
":",
"\"Void\"",
",",
"\"string\"",
":",
"\"String\"",
",",
"\"wstring\"",
":",
"\"WString\"",
"}",
"core",
"=",
"names",
"[",
"-",
"1",
"]",
"if",
"core",
"not",
"in",
"switch",
":",
"# will return a list of resolved names",
"type_info",
"=",
"scope",
".",
"get_type",
"(",
"core",
")",
"if",
"type",
"(",
"type_info",
")",
"is",
"type",
"and",
"issubclass",
"(",
"type_info",
",",
"fields",
".",
"Field",
")",
":",
"return",
"type_info",
"resolved_names",
"=",
"type_info",
"if",
"resolved_names",
"is",
"None",
":",
"raise",
"errors",
".",
"UnresolvedType",
"(",
"self",
".",
"_coord",
",",
"\" \"",
".",
"join",
"(",
"names",
")",
",",
"\" \"",
")",
"if",
"resolved_names",
"[",
"-",
"1",
"]",
"not",
"in",
"switch",
":",
"raise",
"errors",
".",
"UnresolvedType",
"(",
"self",
".",
"_coord",
",",
"\" \"",
".",
"join",
"(",
"names",
")",
",",
"\" \"",
".",
"join",
"(",
"resolved_names",
")",
")",
"names",
"=",
"copy",
".",
"copy",
"(",
"names",
")",
"names",
".",
"pop",
"(",
")",
"names",
"+=",
"resolved_names",
"if",
"len",
"(",
"names",
")",
">=",
"2",
"and",
"names",
"[",
"-",
"1",
"]",
"==",
"names",
"[",
"-",
"2",
"]",
"and",
"names",
"[",
"-",
"1",
"]",
"==",
"\"long\"",
":",
"res",
"=",
"\"Int64\"",
"else",
":",
"res",
"=",
"switch",
"[",
"names",
"[",
"-",
"1",
"]",
"]",
"if",
"names",
"[",
"-",
"1",
"]",
"in",
"[",
"\"char\"",
",",
"\"short\"",
",",
"\"int\"",
",",
"\"long\"",
"]",
"and",
"\"unsigned\"",
"in",
"names",
"[",
":",
"-",
"1",
"]",
":",
"res",
"=",
"\"U\"",
"+",
"res",
"cls",
"=",
"getattr",
"(",
"fields",
",",
"res",
")",
"return",
"cls"
] | Resolve the names to a class in fields.py, resolving past
typedefs, etc
:names: TODO
:scope: TODO
:ctxt: TODO
:returns: TODO | [
"Resolve",
"the",
"names",
"to",
"a",
"class",
"in",
"fields",
".",
"py",
"resolving",
"past",
"typedefs",
"etc"
] | python | train |
xapple/plumbing | plumbing/common.py | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/common.py#L286-L296 | def natural_sort(item):
"""
Sort strings that contain numbers correctly. Works in Python 2 and 3.
>>> l = ['v1.3.12', 'v1.3.3', 'v1.2.5', 'v1.2.15', 'v1.2.3', 'v1.2.1']
>>> l.sort(key=natural_sort)
>>> l.__repr__()
"['v1.2.1', 'v1.2.3', 'v1.2.5', 'v1.2.15', 'v1.3.3', 'v1.3.12']"
"""
dre = re.compile(r'(\d+)')
return [int(s) if s.isdigit() else s.lower() for s in re.split(dre, item)] | [
"def",
"natural_sort",
"(",
"item",
")",
":",
"dre",
"=",
"re",
".",
"compile",
"(",
"r'(\\d+)'",
")",
"return",
"[",
"int",
"(",
"s",
")",
"if",
"s",
".",
"isdigit",
"(",
")",
"else",
"s",
".",
"lower",
"(",
")",
"for",
"s",
"in",
"re",
".",
"split",
"(",
"dre",
",",
"item",
")",
"]"
] | Sort strings that contain numbers correctly. Works in Python 2 and 3.
>>> l = ['v1.3.12', 'v1.3.3', 'v1.2.5', 'v1.2.15', 'v1.2.3', 'v1.2.1']
>>> l.sort(key=natural_sort)
>>> l.__repr__()
"['v1.2.1', 'v1.2.3', 'v1.2.5', 'v1.2.15', 'v1.3.3', 'v1.3.12']" | [
"Sort",
"strings",
"that",
"contain",
"numbers",
"correctly",
".",
"Works",
"in",
"Python",
"2",
"and",
"3",
"."
] | python | train |
coded-by-hand/mass | env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/commands/zip.py | https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/commands/zip.py#L58-L86 | def paths(self):
"""All the entries of sys.path, possibly restricted by --path"""
if not self.select_paths:
return sys.path
result = []
match_any = set()
for path in sys.path:
path = os.path.normcase(os.path.abspath(path))
for match in self.select_paths:
match = os.path.normcase(os.path.abspath(match))
if '*' in match:
if re.search(fnmatch.translate(match+'*'), path):
result.append(path)
match_any.add(match)
break
else:
if path.startswith(match):
result.append(path)
match_any.add(match)
break
else:
logger.debug("Skipping path %s because it doesn't match %s"
% (path, ', '.join(self.select_paths)))
for match in self.select_paths:
if match not in match_any and '*' not in match:
result.append(match)
logger.debug("Adding path %s because it doesn't match anything already on sys.path"
% match)
return result | [
"def",
"paths",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"select_paths",
":",
"return",
"sys",
".",
"path",
"result",
"=",
"[",
"]",
"match_any",
"=",
"set",
"(",
")",
"for",
"path",
"in",
"sys",
".",
"path",
":",
"path",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"for",
"match",
"in",
"self",
".",
"select_paths",
":",
"match",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"match",
")",
")",
"if",
"'*'",
"in",
"match",
":",
"if",
"re",
".",
"search",
"(",
"fnmatch",
".",
"translate",
"(",
"match",
"+",
"'*'",
")",
",",
"path",
")",
":",
"result",
".",
"append",
"(",
"path",
")",
"match_any",
".",
"add",
"(",
"match",
")",
"break",
"else",
":",
"if",
"path",
".",
"startswith",
"(",
"match",
")",
":",
"result",
".",
"append",
"(",
"path",
")",
"match_any",
".",
"add",
"(",
"match",
")",
"break",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Skipping path %s because it doesn't match %s\"",
"%",
"(",
"path",
",",
"', '",
".",
"join",
"(",
"self",
".",
"select_paths",
")",
")",
")",
"for",
"match",
"in",
"self",
".",
"select_paths",
":",
"if",
"match",
"not",
"in",
"match_any",
"and",
"'*'",
"not",
"in",
"match",
":",
"result",
".",
"append",
"(",
"match",
")",
"logger",
".",
"debug",
"(",
"\"Adding path %s because it doesn't match anything already on sys.path\"",
"%",
"match",
")",
"return",
"result"
] | All the entries of sys.path, possibly restricted by --path | [
"All",
"the",
"entries",
"of",
"sys",
".",
"path",
"possibly",
"restricted",
"by",
"--",
"path"
] | python | train |
janpipek/physt | physt/special.py | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/special.py#L331-L345 | def _prepare_data(data, transformed, klass, *args, **kwargs):
"""Transform data for binning.
Returns
-------
np.ndarray
"""
# TODO: Maybe include in the class itself?
data = np.asarray(data)
if not transformed:
data = klass.transform(data)
dropna = kwargs.get("dropna", False)
if dropna:
data = data[~np.isnan(data).any(axis=1)]
return data | [
"def",
"_prepare_data",
"(",
"data",
",",
"transformed",
",",
"klass",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: Maybe include in the class itself?",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"if",
"not",
"transformed",
":",
"data",
"=",
"klass",
".",
"transform",
"(",
"data",
")",
"dropna",
"=",
"kwargs",
".",
"get",
"(",
"\"dropna\"",
",",
"False",
")",
"if",
"dropna",
":",
"data",
"=",
"data",
"[",
"~",
"np",
".",
"isnan",
"(",
"data",
")",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"]",
"return",
"data"
] | Transform data for binning.
Returns
-------
np.ndarray | [
"Transform",
"data",
"for",
"binning",
"."
] | python | train |
sdispater/pendulum | pendulum/time.py | https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/time.py#L193-L221 | def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
"""
if dt is None:
dt = pendulum.now().time()
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (
self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration
return klass(microseconds=us2 - us1) | [
"def",
"diff",
"(",
"self",
",",
"dt",
"=",
"None",
",",
"abs",
"=",
"True",
")",
":",
"if",
"dt",
"is",
"None",
":",
"dt",
"=",
"pendulum",
".",
"now",
"(",
")",
".",
"time",
"(",
")",
"else",
":",
"dt",
"=",
"self",
".",
"__class__",
"(",
"dt",
".",
"hour",
",",
"dt",
".",
"minute",
",",
"dt",
".",
"second",
",",
"dt",
".",
"microsecond",
")",
"us1",
"=",
"(",
"self",
".",
"hour",
"*",
"SECS_PER_HOUR",
"+",
"self",
".",
"minute",
"*",
"SECS_PER_MIN",
"+",
"self",
".",
"second",
")",
"*",
"USECS_PER_SEC",
"us2",
"=",
"(",
"dt",
".",
"hour",
"*",
"SECS_PER_HOUR",
"+",
"dt",
".",
"minute",
"*",
"SECS_PER_MIN",
"+",
"dt",
".",
"second",
")",
"*",
"USECS_PER_SEC",
"klass",
"=",
"Duration",
"if",
"abs",
":",
"klass",
"=",
"AbsoluteDuration",
"return",
"klass",
"(",
"microseconds",
"=",
"us2",
"-",
"us1",
")"
] | Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration | [
"Returns",
"the",
"difference",
"between",
"two",
"Time",
"objects",
"as",
"an",
"Duration",
"."
] | python | train |
cyface/django-termsandconditions | termsandconditions/signals.py | https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/signals.py#L23-L31 | def terms_updated(sender, **kwargs):
"""Called when terms and conditions is changed - to force cache clearing"""
LOGGER.debug("T&C Updated Signal Handler")
cache.delete('tandc.active_terms_ids')
cache.delete('tandc.active_terms_list')
if kwargs.get('instance').slug:
cache.delete('tandc.active_terms_' + kwargs.get('instance').slug)
for utandc in UserTermsAndConditions.objects.all():
cache.delete('tandc.not_agreed_terms_' + utandc.user.get_username()) | [
"def",
"terms_updated",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"T&C Updated Signal Handler\"",
")",
"cache",
".",
"delete",
"(",
"'tandc.active_terms_ids'",
")",
"cache",
".",
"delete",
"(",
"'tandc.active_terms_list'",
")",
"if",
"kwargs",
".",
"get",
"(",
"'instance'",
")",
".",
"slug",
":",
"cache",
".",
"delete",
"(",
"'tandc.active_terms_'",
"+",
"kwargs",
".",
"get",
"(",
"'instance'",
")",
".",
"slug",
")",
"for",
"utandc",
"in",
"UserTermsAndConditions",
".",
"objects",
".",
"all",
"(",
")",
":",
"cache",
".",
"delete",
"(",
"'tandc.not_agreed_terms_'",
"+",
"utandc",
".",
"user",
".",
"get_username",
"(",
")",
")"
] | Called when terms and conditions is changed - to force cache clearing | [
"Called",
"when",
"terms",
"and",
"conditions",
"is",
"changed",
"-",
"to",
"force",
"cache",
"clearing"
] | python | train |
Jammy2211/PyAutoLens | autolens/model/galaxy/galaxy.py | https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy.py#L293-L305 | def einstein_mass_in_units(self, unit_mass='angular', critical_surface_density=None):
"""The Einstein Mass of this galaxy, which is the sum of Einstein Radii of its mass profiles.
If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Mass \
may be inaccurate. This is because the differently oriented ellipses of each mass profile """
if self.has_mass_profile:
return sum(
map(lambda p: p.einstein_mass_in_units(unit_mass=unit_mass,
critical_surface_density=critical_surface_density),
self.mass_profiles))
else:
return None | [
"def",
"einstein_mass_in_units",
"(",
"self",
",",
"unit_mass",
"=",
"'angular'",
",",
"critical_surface_density",
"=",
"None",
")",
":",
"if",
"self",
".",
"has_mass_profile",
":",
"return",
"sum",
"(",
"map",
"(",
"lambda",
"p",
":",
"p",
".",
"einstein_mass_in_units",
"(",
"unit_mass",
"=",
"unit_mass",
",",
"critical_surface_density",
"=",
"critical_surface_density",
")",
",",
"self",
".",
"mass_profiles",
")",
")",
"else",
":",
"return",
"None"
] | The Einstein Mass of this galaxy, which is the sum of Einstein Radii of its mass profiles.
If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Mass \
may be inaccurate. This is because the differently oriented ellipses of each mass profile | [
"The",
"Einstein",
"Mass",
"of",
"this",
"galaxy",
"which",
"is",
"the",
"sum",
"of",
"Einstein",
"Radii",
"of",
"its",
"mass",
"profiles",
"."
] | python | valid |
underworldcode/stripy | stripy-src/stripy/spherical.py | https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/spherical.py#L128-L138 | def _generate_permutation(self, npoints):
"""
Create shuffle and deshuffle vectors
"""
i = np.arange(0, npoints)
# permutation
p = np.random.permutation(npoints)
ip = np.empty_like(p)
# inverse permutation
ip[p[i]] = i
return p, ip | [
"def",
"_generate_permutation",
"(",
"self",
",",
"npoints",
")",
":",
"i",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"npoints",
")",
"# permutation",
"p",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"npoints",
")",
"ip",
"=",
"np",
".",
"empty_like",
"(",
"p",
")",
"# inverse permutation",
"ip",
"[",
"p",
"[",
"i",
"]",
"]",
"=",
"i",
"return",
"p",
",",
"ip"
] | Create shuffle and deshuffle vectors | [
"Create",
"shuffle",
"and",
"deshuffle",
"vectors"
] | python | train |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/utils.py | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L251-L268 | def encrypt_put_item(encrypt_method, crypto_config_method, write_method, **kwargs):
# type: (Callable, Callable, Callable, **Any) -> Dict
# TODO: narrow this down
"""Transparently encrypt an item before putting it to the table.
:param callable encrypt_method: Method to use to encrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable write_method: Method that writes to the table
:param **kwargs: Keyword arguments to pass to ``write_method``
:return: DynamoDB response
:rtype: dict
"""
crypto_config, ddb_kwargs = crypto_config_method(**kwargs)
ddb_kwargs["Item"] = encrypt_method(
item=ddb_kwargs["Item"],
crypto_config=crypto_config.with_item(_item_transformer(encrypt_method)(ddb_kwargs["Item"])),
)
return write_method(**ddb_kwargs) | [
"def",
"encrypt_put_item",
"(",
"encrypt_method",
",",
"crypto_config_method",
",",
"write_method",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Callable, Callable, Callable, **Any) -> Dict",
"# TODO: narrow this down",
"crypto_config",
",",
"ddb_kwargs",
"=",
"crypto_config_method",
"(",
"*",
"*",
"kwargs",
")",
"ddb_kwargs",
"[",
"\"Item\"",
"]",
"=",
"encrypt_method",
"(",
"item",
"=",
"ddb_kwargs",
"[",
"\"Item\"",
"]",
",",
"crypto_config",
"=",
"crypto_config",
".",
"with_item",
"(",
"_item_transformer",
"(",
"encrypt_method",
")",
"(",
"ddb_kwargs",
"[",
"\"Item\"",
"]",
")",
")",
",",
")",
"return",
"write_method",
"(",
"*",
"*",
"ddb_kwargs",
")"
] | Transparently encrypt an item before putting it to the table.
:param callable encrypt_method: Method to use to encrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable write_method: Method that writes to the table
:param **kwargs: Keyword arguments to pass to ``write_method``
:return: DynamoDB response
:rtype: dict | [
"Transparently",
"encrypt",
"an",
"item",
"before",
"putting",
"it",
"to",
"the",
"table",
"."
] | python | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/io.py | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/io.py#L50-L65 | def table_to_csv(table, engine, filepath, chunksize=1000, overwrite=False):
"""
Export entire table to a csv file.
:param table: :class:`sqlalchemy.Table` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将整个表中的所有数据, 写入csv文件。
"""
sql = select([table])
sql_to_csv(sql, engine, filepath, chunksize) | [
"def",
"table_to_csv",
"(",
"table",
",",
"engine",
",",
"filepath",
",",
"chunksize",
"=",
"1000",
",",
"overwrite",
"=",
"False",
")",
":",
"sql",
"=",
"select",
"(",
"[",
"table",
"]",
")",
"sql_to_csv",
"(",
"sql",
",",
"engine",
",",
"filepath",
",",
"chunksize",
")"
] | Export entire table to a csv file.
:param table: :class:`sqlalchemy.Table` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将整个表中的所有数据, 写入csv文件。 | [
"Export",
"entire",
"table",
"to",
"a",
"csv",
"file",
"."
] | python | train |
vpelletier/python-libusb1 | usb1/__init__.py | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2308-L2337 | def getDeviceIterator(self, skip_on_error=False):
"""
Return an iterator over all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError.
"""
device_p_p = libusb1.libusb_device_p_p()
libusb_device_p = libusb1.libusb_device_p
device_list_len = libusb1.libusb_get_device_list(self.__context_p,
byref(device_p_p))
mayRaiseUSBError(device_list_len)
try:
for device_p in device_p_p[:device_list_len]:
try:
# Instanciate our own libusb_device_p object so we can free
# libusb-provided device list. Is this a bug in ctypes that
# it doesn't copy pointer value (=pointed memory address) ?
# At least, it's not so convenient and forces using such
# weird code.
device = USBDevice(self, libusb_device_p(device_p.contents))
except USBError:
if not skip_on_error:
raise
else:
self.__close_set.add(device)
yield device
finally:
libusb1.libusb_free_device_list(device_p_p, 1) | [
"def",
"getDeviceIterator",
"(",
"self",
",",
"skip_on_error",
"=",
"False",
")",
":",
"device_p_p",
"=",
"libusb1",
".",
"libusb_device_p_p",
"(",
")",
"libusb_device_p",
"=",
"libusb1",
".",
"libusb_device_p",
"device_list_len",
"=",
"libusb1",
".",
"libusb_get_device_list",
"(",
"self",
".",
"__context_p",
",",
"byref",
"(",
"device_p_p",
")",
")",
"mayRaiseUSBError",
"(",
"device_list_len",
")",
"try",
":",
"for",
"device_p",
"in",
"device_p_p",
"[",
":",
"device_list_len",
"]",
":",
"try",
":",
"# Instanciate our own libusb_device_p object so we can free",
"# libusb-provided device list. Is this a bug in ctypes that",
"# it doesn't copy pointer value (=pointed memory address) ?",
"# At least, it's not so convenient and forces using such",
"# weird code.",
"device",
"=",
"USBDevice",
"(",
"self",
",",
"libusb_device_p",
"(",
"device_p",
".",
"contents",
")",
")",
"except",
"USBError",
":",
"if",
"not",
"skip_on_error",
":",
"raise",
"else",
":",
"self",
".",
"__close_set",
".",
"add",
"(",
"device",
")",
"yield",
"device",
"finally",
":",
"libusb1",
".",
"libusb_free_device_list",
"(",
"device_p_p",
",",
"1",
")"
] | Return an iterator over all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError. | [
"Return",
"an",
"iterator",
"over",
"all",
"USB",
"devices",
"currently",
"plugged",
"in",
"as",
"USBDevice",
"instances",
"."
] | python | train |
facelessuser/backrefs | setup.py | https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/setup.py#L60-L82 | def generate_unicode_table():
"""Generate the Unicode table for the given Python version."""
uver = get_unicodedata()
fail = False
path = os.path.join(os.path.dirname(__file__), 'tools')
fp, pathname, desc = imp.find_module('unipropgen', [path])
try:
unipropgen = imp.load_module('unipropgen', fp, pathname, desc)
unipropgen.build_tables(
os.path.join(
os.path.dirname(__file__),
'backrefs', 'uniprops', 'unidata'
),
uver
)
except Exception:
print(traceback.format_exc())
fail = True
finally:
fp.close()
assert not fail, "Failed uniprops.py generation!" | [
"def",
"generate_unicode_table",
"(",
")",
":",
"uver",
"=",
"get_unicodedata",
"(",
")",
"fail",
"=",
"False",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'tools'",
")",
"fp",
",",
"pathname",
",",
"desc",
"=",
"imp",
".",
"find_module",
"(",
"'unipropgen'",
",",
"[",
"path",
"]",
")",
"try",
":",
"unipropgen",
"=",
"imp",
".",
"load_module",
"(",
"'unipropgen'",
",",
"fp",
",",
"pathname",
",",
"desc",
")",
"unipropgen",
".",
"build_tables",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'backrefs'",
",",
"'uniprops'",
",",
"'unidata'",
")",
",",
"uver",
")",
"except",
"Exception",
":",
"print",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"fail",
"=",
"True",
"finally",
":",
"fp",
".",
"close",
"(",
")",
"assert",
"not",
"fail",
",",
"\"Failed uniprops.py generation!\""
] | Generate the Unicode table for the given Python version. | [
"Generate",
"the",
"Unicode",
"table",
"for",
"the",
"given",
"Python",
"version",
"."
] | python | train |
sparknetworks/pgpm | pgpm/utils/config.py | https://github.com/sparknetworks/pgpm/blob/1a060df46a886095181f692ea870a73a32510a2e/pgpm/utils/config.py#L63-L87 | def get_list_connections(self, environment, product, unique_name_list=None, is_except=False):
"""
Gets list of connections that satisfy the filter by environment, product and (optionally) unique DB names
:param environment: Environment name
:param product: Product name
:param unique_name_list: list of unique db aliases
:param is_except: take the connections with aliases provided or, the other wat around, take all the rest
:return: list of dictionaries with connections
"""
return_list = []
for item in self.connection_sets:
if unique_name_list:
if item['unique_name']:
if is_except:
if item['environment'] == environment and item['product'] == product and \
(item['unique_name'] not in unique_name_list):
return_list.append(item)
elif not is_except:
if item['environment'] == environment and item['product'] == product and \
(item['unique_name'] in unique_name_list):
return_list.append(item)
else:
if item['environment'] == environment and item['product'] == product:
return_list.append(item)
return return_list | [
"def",
"get_list_connections",
"(",
"self",
",",
"environment",
",",
"product",
",",
"unique_name_list",
"=",
"None",
",",
"is_except",
"=",
"False",
")",
":",
"return_list",
"=",
"[",
"]",
"for",
"item",
"in",
"self",
".",
"connection_sets",
":",
"if",
"unique_name_list",
":",
"if",
"item",
"[",
"'unique_name'",
"]",
":",
"if",
"is_except",
":",
"if",
"item",
"[",
"'environment'",
"]",
"==",
"environment",
"and",
"item",
"[",
"'product'",
"]",
"==",
"product",
"and",
"(",
"item",
"[",
"'unique_name'",
"]",
"not",
"in",
"unique_name_list",
")",
":",
"return_list",
".",
"append",
"(",
"item",
")",
"elif",
"not",
"is_except",
":",
"if",
"item",
"[",
"'environment'",
"]",
"==",
"environment",
"and",
"item",
"[",
"'product'",
"]",
"==",
"product",
"and",
"(",
"item",
"[",
"'unique_name'",
"]",
"in",
"unique_name_list",
")",
":",
"return_list",
".",
"append",
"(",
"item",
")",
"else",
":",
"if",
"item",
"[",
"'environment'",
"]",
"==",
"environment",
"and",
"item",
"[",
"'product'",
"]",
"==",
"product",
":",
"return_list",
".",
"append",
"(",
"item",
")",
"return",
"return_list"
] | Gets list of connections that satisfy the filter by environment, product and (optionally) unique DB names
:param environment: Environment name
:param product: Product name
:param unique_name_list: list of unique db aliases
:param is_except: take the connections with aliases provided or, the other wat around, take all the rest
:return: list of dictionaries with connections | [
"Gets",
"list",
"of",
"connections",
"that",
"satisfy",
"the",
"filter",
"by",
"environment",
"product",
"and",
"(",
"optionally",
")",
"unique",
"DB",
"names",
":",
"param",
"environment",
":",
"Environment",
"name",
":",
"param",
"product",
":",
"Product",
"name",
":",
"param",
"unique_name_list",
":",
"list",
"of",
"unique",
"db",
"aliases",
":",
"param",
"is_except",
":",
"take",
"the",
"connections",
"with",
"aliases",
"provided",
"or",
"the",
"other",
"wat",
"around",
"take",
"all",
"the",
"rest",
":",
"return",
":",
"list",
"of",
"dictionaries",
"with",
"connections"
] | python | train |
log2timeline/dftimewolf | dftimewolf/lib/state.py | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L121-L157 | def run_modules(self):
"""Performs the actual processing for each module in the module pool."""
def _run_module_thread(module_description):
"""Runs the module's process() function.
Waits for any blockers to have finished before running process(), then
sets an Event flag declaring the module has completed.
"""
for blocker in module_description['wants']:
self.events[blocker].wait()
module = self._module_pool[module_description['name']]
try:
module.process()
except DFTimewolfError as error:
self.add_error(error.message, critical=True)
except Exception as error: # pylint: disable=broad-except
self.add_error(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
error, traceback.format_exc()),
critical=True)
print('Module {0:s} completed'.format(module_description['name']))
self.events[module_description['name']].set()
self.cleanup()
threads = []
for module_description in self.recipe['modules']:
t = threading.Thread(
target=_run_module_thread,
args=(module_description, )
)
threads.append(t)
t.start()
for t in threads:
t.join()
self.check_errors(is_global=True) | [
"def",
"run_modules",
"(",
"self",
")",
":",
"def",
"_run_module_thread",
"(",
"module_description",
")",
":",
"\"\"\"Runs the module's process() function.\n\n Waits for any blockers to have finished before running process(), then\n sets an Event flag declaring the module has completed.\n \"\"\"",
"for",
"blocker",
"in",
"module_description",
"[",
"'wants'",
"]",
":",
"self",
".",
"events",
"[",
"blocker",
"]",
".",
"wait",
"(",
")",
"module",
"=",
"self",
".",
"_module_pool",
"[",
"module_description",
"[",
"'name'",
"]",
"]",
"try",
":",
"module",
".",
"process",
"(",
")",
"except",
"DFTimewolfError",
"as",
"error",
":",
"self",
".",
"add_error",
"(",
"error",
".",
"message",
",",
"critical",
"=",
"True",
")",
"except",
"Exception",
"as",
"error",
":",
"# pylint: disable=broad-except",
"self",
".",
"add_error",
"(",
"'An unknown error occurred: {0!s}\\nFull traceback:\\n{1:s}'",
".",
"format",
"(",
"error",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"critical",
"=",
"True",
")",
"print",
"(",
"'Module {0:s} completed'",
".",
"format",
"(",
"module_description",
"[",
"'name'",
"]",
")",
")",
"self",
".",
"events",
"[",
"module_description",
"[",
"'name'",
"]",
"]",
".",
"set",
"(",
")",
"self",
".",
"cleanup",
"(",
")",
"threads",
"=",
"[",
"]",
"for",
"module_description",
"in",
"self",
".",
"recipe",
"[",
"'modules'",
"]",
":",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"_run_module_thread",
",",
"args",
"=",
"(",
"module_description",
",",
")",
")",
"threads",
".",
"append",
"(",
"t",
")",
"t",
".",
"start",
"(",
")",
"for",
"t",
"in",
"threads",
":",
"t",
".",
"join",
"(",
")",
"self",
".",
"check_errors",
"(",
"is_global",
"=",
"True",
")"
] | Performs the actual processing for each module in the module pool. | [
"Performs",
"the",
"actual",
"processing",
"for",
"each",
"module",
"in",
"the",
"module",
"pool",
"."
] | python | train |
GibbsConsulting/django-plotly-dash | django_plotly_dash/views.py | https://github.com/GibbsConsulting/django-plotly-dash/blob/773ed081fc2ea3cc7607590322a14686a7a79bc5/django_plotly_dash/views.py#L120-L129 | def component_suites(request, resource=None, component=None, extra_element="", **kwargs):
'Return part of a client-side component, served locally for some reason'
get_params = request.GET.urlencode()
if get_params and False:
redone_url = "/static/dash/component/%s/%s%s?%s" %(component, extra_element, resource, get_params)
else:
redone_url = "/static/dash/component/%s/%s%s" %(component, extra_element, resource)
return HttpResponseRedirect(redirect_to=redone_url) | [
"def",
"component_suites",
"(",
"request",
",",
"resource",
"=",
"None",
",",
"component",
"=",
"None",
",",
"extra_element",
"=",
"\"\"",
",",
"*",
"*",
"kwargs",
")",
":",
"get_params",
"=",
"request",
".",
"GET",
".",
"urlencode",
"(",
")",
"if",
"get_params",
"and",
"False",
":",
"redone_url",
"=",
"\"/static/dash/component/%s/%s%s?%s\"",
"%",
"(",
"component",
",",
"extra_element",
",",
"resource",
",",
"get_params",
")",
"else",
":",
"redone_url",
"=",
"\"/static/dash/component/%s/%s%s\"",
"%",
"(",
"component",
",",
"extra_element",
",",
"resource",
")",
"return",
"HttpResponseRedirect",
"(",
"redirect_to",
"=",
"redone_url",
")"
] | Return part of a client-side component, served locally for some reason | [
"Return",
"part",
"of",
"a",
"client",
"-",
"side",
"component",
"served",
"locally",
"for",
"some",
"reason"
] | python | train |
rameshg87/pyremotevbox | pyremotevbox/ZSI/wstools/logging.py | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/logging.py#L185-L209 | def gridLog(**kw):
"""Send GLRecord, Distributed Logging Utilities
If the scheme is passed as a keyword parameter
the value is expected to be a callable function
that takes 2 parameters: url, outputStr
GRIDLOG_ON -- turn grid logging on
GRIDLOG_DEST -- provide URL destination
"""
import os
if not bool( int(os.environ.get('GRIDLOG_ON', 0)) ):
return
url = os.environ.get('GRIDLOG_DEST')
if url is None:
return
## NOTE: urlparse problem w/customized schemes
try:
scheme = url[:url.find('://')]
send = GLRegistry[scheme]
send( url, str(GLRecord(**kw)), )
except Exception, ex:
print >>sys.stderr, "*** gridLog failed -- %s" %(str(kw)) | [
"def",
"gridLog",
"(",
"*",
"*",
"kw",
")",
":",
"import",
"os",
"if",
"not",
"bool",
"(",
"int",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'GRIDLOG_ON'",
",",
"0",
")",
")",
")",
":",
"return",
"url",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'GRIDLOG_DEST'",
")",
"if",
"url",
"is",
"None",
":",
"return",
"## NOTE: urlparse problem w/customized schemes ",
"try",
":",
"scheme",
"=",
"url",
"[",
":",
"url",
".",
"find",
"(",
"'://'",
")",
"]",
"send",
"=",
"GLRegistry",
"[",
"scheme",
"]",
"send",
"(",
"url",
",",
"str",
"(",
"GLRecord",
"(",
"*",
"*",
"kw",
")",
")",
",",
")",
"except",
"Exception",
",",
"ex",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"*** gridLog failed -- %s\"",
"%",
"(",
"str",
"(",
"kw",
")",
")"
] | Send GLRecord, Distributed Logging Utilities
If the scheme is passed as a keyword parameter
the value is expected to be a callable function
that takes 2 parameters: url, outputStr
GRIDLOG_ON -- turn grid logging on
GRIDLOG_DEST -- provide URL destination | [
"Send",
"GLRecord",
"Distributed",
"Logging",
"Utilities",
"If",
"the",
"scheme",
"is",
"passed",
"as",
"a",
"keyword",
"parameter",
"the",
"value",
"is",
"expected",
"to",
"be",
"a",
"callable",
"function",
"that",
"takes",
"2",
"parameters",
":",
"url",
"outputStr"
] | python | train |
pysal/mapclassify | mapclassify/classifiers.py | https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L476-L618 | def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier | [
"def",
"make",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# only flag overrides return flag",
"to_annotate",
"=",
"copy",
".",
"deepcopy",
"(",
"kwargs",
")",
"return_object",
"=",
"kwargs",
".",
"pop",
"(",
"'return_object'",
",",
"False",
")",
"return_bins",
"=",
"kwargs",
".",
"pop",
"(",
"'return_bins'",
",",
"False",
")",
"return_counts",
"=",
"kwargs",
".",
"pop",
"(",
"'return_counts'",
",",
"False",
")",
"rolling",
"=",
"kwargs",
".",
"pop",
"(",
"'rolling'",
",",
"False",
")",
"if",
"rolling",
":",
"# just initialize a fake classifier",
"data",
"=",
"list",
"(",
"range",
"(",
"10",
")",
")",
"cls_instance",
"=",
"cls",
"(",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# and empty it, since we'll be using the update",
"cls_instance",
".",
"y",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"else",
":",
"cls_instance",
"=",
"None",
"# wrap init in a closure to make a consumer.",
"# Qc Na: \"Objects/Closures are poor man's Closures/Objects\"",
"def",
"classifier",
"(",
"data",
",",
"cls_instance",
"=",
"cls_instance",
")",
":",
"if",
"rolling",
":",
"cls_instance",
".",
"update",
"(",
"data",
",",
"inplace",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"yb",
"=",
"cls_instance",
".",
"find_bin",
"(",
"data",
")",
"else",
":",
"cls_instance",
"=",
"cls",
"(",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"yb",
"=",
"cls_instance",
".",
"yb",
"outs",
"=",
"[",
"yb",
",",
"None",
",",
"None",
",",
"None",
"]",
"outs",
"[",
"1",
"]",
"=",
"cls_instance",
"if",
"return_object",
"else",
"None",
"outs",
"[",
"2",
"]",
"=",
"cls_instance",
".",
"bins",
"if",
"return_bins",
"else",
"None",
"outs",
"[",
"3",
"]",
"=",
"cls_instance",
".",
"counts",
"if",
"return_counts",
"else",
"None",
"outs",
"=",
"[",
"a",
"for",
"a",
"in",
"outs",
"if",
"a",
"is",
"not",
"None",
"]",
"if",
"len",
"(",
"outs",
")",
"==",
"1",
":",
"return",
"outs",
"[",
"0",
"]",
"else",
":",
"return",
"outs",
"# for debugging/jic, keep around the kwargs.",
"# in future, we might want to make this a thin class, so that we can",
"# set a custom repr. Call the class `Binner` or something, that's a",
"# pre-configured Classifier that just consumes data, bins it, &",
"# possibly updates the bins.",
"classifier",
".",
"_options",
"=",
"to_annotate",
"return",
"classifier"
] | Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5]) | [
"Configure",
"and",
"create",
"a",
"classifier",
"that",
"will",
"consume",
"data",
"and",
"produce",
"classifications",
"given",
"the",
"configuration",
"options",
"specified",
"by",
"this",
"function",
"."
] | python | train |
llllllllll/codetransformer | codetransformer/utils/immutable.py | https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/utils/immutable.py#L264-L287 | def _check_missing_slots(ob):
"""Check that all slots have been initialized when a custom __init__ method
is provided.
Parameters
----------
ob : immutable
The instance that was just initialized.
Raises
------
TypeError
Raised when the instance has not set values that are named in the
__slots__.
"""
missing_slots = tuple(
filter(lambda s: not hasattr(ob, s), ob.__slots__),
)
if missing_slots:
raise TypeError(
'not all slots initialized in __init__, missing: {0}'.format(
missing_slots,
),
) | [
"def",
"_check_missing_slots",
"(",
"ob",
")",
":",
"missing_slots",
"=",
"tuple",
"(",
"filter",
"(",
"lambda",
"s",
":",
"not",
"hasattr",
"(",
"ob",
",",
"s",
")",
",",
"ob",
".",
"__slots__",
")",
",",
")",
"if",
"missing_slots",
":",
"raise",
"TypeError",
"(",
"'not all slots initialized in __init__, missing: {0}'",
".",
"format",
"(",
"missing_slots",
",",
")",
",",
")"
] | Check that all slots have been initialized when a custom __init__ method
is provided.
Parameters
----------
ob : immutable
The instance that was just initialized.
Raises
------
TypeError
Raised when the instance has not set values that are named in the
__slots__. | [
"Check",
"that",
"all",
"slots",
"have",
"been",
"initialized",
"when",
"a",
"custom",
"__init__",
"method",
"is",
"provided",
"."
] | python | train |
psd-tools/psd-tools | src/psd_tools/utils.py | https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/utils.py#L41-L51 | def write_fmt(fp, fmt, *args):
"""
Writes data to ``fp`` according to ``fmt``.
"""
fmt = str(">" + fmt)
fmt_size = struct.calcsize(fmt)
written = write_bytes(fp, struct.pack(fmt, *args))
assert written == fmt_size, 'written=%d, expected=%d' % (
written, fmt_size
)
return written | [
"def",
"write_fmt",
"(",
"fp",
",",
"fmt",
",",
"*",
"args",
")",
":",
"fmt",
"=",
"str",
"(",
"\">\"",
"+",
"fmt",
")",
"fmt_size",
"=",
"struct",
".",
"calcsize",
"(",
"fmt",
")",
"written",
"=",
"write_bytes",
"(",
"fp",
",",
"struct",
".",
"pack",
"(",
"fmt",
",",
"*",
"args",
")",
")",
"assert",
"written",
"==",
"fmt_size",
",",
"'written=%d, expected=%d'",
"%",
"(",
"written",
",",
"fmt_size",
")",
"return",
"written"
] | Writes data to ``fp`` according to ``fmt``. | [
"Writes",
"data",
"to",
"fp",
"according",
"to",
"fmt",
"."
] | python | train |
msoulier/tftpy | tftpy/TftpContexts.py | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L146-L150 | def sethost(self, host):
"""Setter method that also sets the address property as a result
of the host that is set."""
self.__host = host
self.address = socket.gethostbyname(host) | [
"def",
"sethost",
"(",
"self",
",",
"host",
")",
":",
"self",
".",
"__host",
"=",
"host",
"self",
".",
"address",
"=",
"socket",
".",
"gethostbyname",
"(",
"host",
")"
] | Setter method that also sets the address property as a result
of the host that is set. | [
"Setter",
"method",
"that",
"also",
"sets",
"the",
"address",
"property",
"as",
"a",
"result",
"of",
"the",
"host",
"that",
"is",
"set",
"."
] | python | train |
twisted/epsilon | epsilon/hotfixes/filepath_copyTo.py | https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/filepath_copyTo.py#L96-L105 | def preauthChild(self, path):
"""
Use me if `path' might have slashes in it, but you know they're safe.
(NOT slashes at the beginning. It still needs to be a _child_).
"""
newpath = abspath(joinpath(self.path, normpath(path)))
if not newpath.startswith(self.path):
raise InsecurePath("%s is not a child of %s" % (newpath, self.path))
return self.clonePath(newpath) | [
"def",
"preauthChild",
"(",
"self",
",",
"path",
")",
":",
"newpath",
"=",
"abspath",
"(",
"joinpath",
"(",
"self",
".",
"path",
",",
"normpath",
"(",
"path",
")",
")",
")",
"if",
"not",
"newpath",
".",
"startswith",
"(",
"self",
".",
"path",
")",
":",
"raise",
"InsecurePath",
"(",
"\"%s is not a child of %s\"",
"%",
"(",
"newpath",
",",
"self",
".",
"path",
")",
")",
"return",
"self",
".",
"clonePath",
"(",
"newpath",
")"
] | Use me if `path' might have slashes in it, but you know they're safe.
(NOT slashes at the beginning. It still needs to be a _child_). | [
"Use",
"me",
"if",
"path",
"might",
"have",
"slashes",
"in",
"it",
"but",
"you",
"know",
"they",
"re",
"safe",
"."
] | python | train |
nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L680-L740 | def memmap(filename, shape=None, dtype=None, page=None, series=0, mode='r+',
**kwargs):
"""Return memory-mapped numpy array stored in TIFF file.
Memory-mapping requires data stored in native byte order, without tiling,
compression, predictors, etc.
If 'shape' and 'dtype' are provided, existing files will be overwritten or
appended to depending on the 'append' parameter.
Otherwise the image data of a specified page or series in an existing
file will be memory-mapped. By default, the image data of the first page
series is memory-mapped.
Call flush() to write any changes in the array to the file.
Raise ValueError if the image data in the file is not memory-mappable.
Parameters
----------
filename : str
Name of the TIFF file which stores the array.
shape : tuple
Shape of the empty array.
dtype : numpy.dtype
Data-type of the empty array.
page : int
Index of the page which image data to memory-map.
series : int
Index of the page series which image data to memory-map.
mode : {'r+', 'r', 'c'}
The file open mode. Default is to open existing file for reading and
writing ('r+').
kwargs : dict
Additional parameters passed to imwrite() or TiffFile().
"""
if shape is not None and dtype is not None:
# create a new, empty array
kwargs.update(data=None, shape=shape, dtype=dtype, returnoffset=True,
align=TIFF.ALLOCATIONGRANULARITY)
result = imwrite(filename, **kwargs)
if result is None:
# TODO: fail before creating file or writing data
raise ValueError('image data are not memory-mappable')
offset = result[0]
else:
# use existing file
with TiffFile(filename, **kwargs) as tif:
if page is not None:
page = tif.pages[page]
if not page.is_memmappable:
raise ValueError('image data are not memory-mappable')
offset, _ = page.is_contiguous
shape = page.shape
dtype = page.dtype
else:
series = tif.series[series]
if series.offset is None:
raise ValueError('image data are not memory-mappable')
shape = series.shape
dtype = series.dtype
offset = series.offset
dtype = tif.byteorder + dtype.char
return numpy.memmap(filename, dtype, mode, offset, shape, 'C') | [
"def",
"memmap",
"(",
"filename",
",",
"shape",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"page",
"=",
"None",
",",
"series",
"=",
"0",
",",
"mode",
"=",
"'r+'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"shape",
"is",
"not",
"None",
"and",
"dtype",
"is",
"not",
"None",
":",
"# create a new, empty array",
"kwargs",
".",
"update",
"(",
"data",
"=",
"None",
",",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"returnoffset",
"=",
"True",
",",
"align",
"=",
"TIFF",
".",
"ALLOCATIONGRANULARITY",
")",
"result",
"=",
"imwrite",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
"if",
"result",
"is",
"None",
":",
"# TODO: fail before creating file or writing data",
"raise",
"ValueError",
"(",
"'image data are not memory-mappable'",
")",
"offset",
"=",
"result",
"[",
"0",
"]",
"else",
":",
"# use existing file",
"with",
"TiffFile",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
"as",
"tif",
":",
"if",
"page",
"is",
"not",
"None",
":",
"page",
"=",
"tif",
".",
"pages",
"[",
"page",
"]",
"if",
"not",
"page",
".",
"is_memmappable",
":",
"raise",
"ValueError",
"(",
"'image data are not memory-mappable'",
")",
"offset",
",",
"_",
"=",
"page",
".",
"is_contiguous",
"shape",
"=",
"page",
".",
"shape",
"dtype",
"=",
"page",
".",
"dtype",
"else",
":",
"series",
"=",
"tif",
".",
"series",
"[",
"series",
"]",
"if",
"series",
".",
"offset",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'image data are not memory-mappable'",
")",
"shape",
"=",
"series",
".",
"shape",
"dtype",
"=",
"series",
".",
"dtype",
"offset",
"=",
"series",
".",
"offset",
"dtype",
"=",
"tif",
".",
"byteorder",
"+",
"dtype",
".",
"char",
"return",
"numpy",
".",
"memmap",
"(",
"filename",
",",
"dtype",
",",
"mode",
",",
"offset",
",",
"shape",
",",
"'C'",
")"
] | Return memory-mapped numpy array stored in TIFF file.
Memory-mapping requires data stored in native byte order, without tiling,
compression, predictors, etc.
If 'shape' and 'dtype' are provided, existing files will be overwritten or
appended to depending on the 'append' parameter.
Otherwise the image data of a specified page or series in an existing
file will be memory-mapped. By default, the image data of the first page
series is memory-mapped.
Call flush() to write any changes in the array to the file.
Raise ValueError if the image data in the file is not memory-mappable.
Parameters
----------
filename : str
Name of the TIFF file which stores the array.
shape : tuple
Shape of the empty array.
dtype : numpy.dtype
Data-type of the empty array.
page : int
Index of the page which image data to memory-map.
series : int
Index of the page series which image data to memory-map.
mode : {'r+', 'r', 'c'}
The file open mode. Default is to open existing file for reading and
writing ('r+').
kwargs : dict
Additional parameters passed to imwrite() or TiffFile(). | [
"Return",
"memory",
"-",
"mapped",
"numpy",
"array",
"stored",
"in",
"TIFF",
"file",
"."
] | python | train |
NASA-AMMOS/AIT-Core | ait/core/tlm.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/tlm.py#L1035-L1038 | def create(self, name, data=None):
"""Creates a new packet with the given definition and raw data.
"""
return createPacket(self[name], data) if name in self else None | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"data",
"=",
"None",
")",
":",
"return",
"createPacket",
"(",
"self",
"[",
"name",
"]",
",",
"data",
")",
"if",
"name",
"in",
"self",
"else",
"None"
] | Creates a new packet with the given definition and raw data. | [
"Creates",
"a",
"new",
"packet",
"with",
"the",
"given",
"definition",
"and",
"raw",
"data",
"."
] | python | train |
pkgw/pwkit | pwkit/cli/latexdriver.py | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/cli/latexdriver.py#L319-L329 | def just_smart_bibtools(bib_style, aux, bib):
"""Tectonic has taken over most of the features that this tool used to provide,
but here's a hack to keep my smart .bib file generation working.
"""
extradir = Path('.bibtex')
extradir.ensure_dir(parents=True)
bib_export(bib_style, aux, extradir / 'ZZ_bibtools.bib',
no_tool_ok=True, quiet=True, ignore_missing=True)
merge_bibtex_with_aux(aux, bib, extradir) | [
"def",
"just_smart_bibtools",
"(",
"bib_style",
",",
"aux",
",",
"bib",
")",
":",
"extradir",
"=",
"Path",
"(",
"'.bibtex'",
")",
"extradir",
".",
"ensure_dir",
"(",
"parents",
"=",
"True",
")",
"bib_export",
"(",
"bib_style",
",",
"aux",
",",
"extradir",
"/",
"'ZZ_bibtools.bib'",
",",
"no_tool_ok",
"=",
"True",
",",
"quiet",
"=",
"True",
",",
"ignore_missing",
"=",
"True",
")",
"merge_bibtex_with_aux",
"(",
"aux",
",",
"bib",
",",
"extradir",
")"
] | Tectonic has taken over most of the features that this tool used to provide,
but here's a hack to keep my smart .bib file generation working. | [
"Tectonic",
"has",
"taken",
"over",
"most",
"of",
"the",
"features",
"that",
"this",
"tool",
"used",
"to",
"provide",
"but",
"here",
"s",
"a",
"hack",
"to",
"keep",
"my",
"smart",
".",
"bib",
"file",
"generation",
"working",
"."
] | python | train |
jamescooke/flake8-aaa | src/flake8_aaa/helpers.py | https://github.com/jamescooke/flake8-aaa/blob/29938b96845fe32ced4358ba66af3b3be2a37794/src/flake8_aaa/helpers.py#L105-L109 | def node_is_noop(node: ast.AST) -> bool:
"""
Node does nothing.
"""
return isinstance(node.value, ast.Str) if isinstance(node, ast.Expr) else isinstance(node, ast.Pass) | [
"def",
"node_is_noop",
"(",
"node",
":",
"ast",
".",
"AST",
")",
"->",
"bool",
":",
"return",
"isinstance",
"(",
"node",
".",
"value",
",",
"ast",
".",
"Str",
")",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Expr",
")",
"else",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Pass",
")"
] | Node does nothing. | [
"Node",
"does",
"nothing",
"."
] | python | train |
pip-services3-python/pip-services3-commons-python | pip_services3_commons/data/StringValueMap.py | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/StringValueMap.py#L480-L505 | def from_tuples_array(tuples):
"""
Creates a new StringValueMap from a list of key-value pairs called tuples.
The method is similar to [[fromTuples]] but tuples are passed as array instead of parameters.
:param tuples: a list of values where odd elements are keys and the following even elements are values
:return: a newly created StringValueMap.
"""
result = StringValueMap()
if tuples == None or len(tuples) == 0:
return result
index = 0
while index < len(tuples):
if index + 1 >= len(tuples):
break
key = StringConverter.to_string(tuples[index])
value = StringConverter.to_nullable_string(tuples[index + 1])
index += 2
result.put(key, value)
return result | [
"def",
"from_tuples_array",
"(",
"tuples",
")",
":",
"result",
"=",
"StringValueMap",
"(",
")",
"if",
"tuples",
"==",
"None",
"or",
"len",
"(",
"tuples",
")",
"==",
"0",
":",
"return",
"result",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"tuples",
")",
":",
"if",
"index",
"+",
"1",
">=",
"len",
"(",
"tuples",
")",
":",
"break",
"key",
"=",
"StringConverter",
".",
"to_string",
"(",
"tuples",
"[",
"index",
"]",
")",
"value",
"=",
"StringConverter",
".",
"to_nullable_string",
"(",
"tuples",
"[",
"index",
"+",
"1",
"]",
")",
"index",
"+=",
"2",
"result",
".",
"put",
"(",
"key",
",",
"value",
")",
"return",
"result"
] | Creates a new StringValueMap from a list of key-value pairs called tuples.
The method is similar to [[fromTuples]] but tuples are passed as array instead of parameters.
:param tuples: a list of values where odd elements are keys and the following even elements are values
:return: a newly created StringValueMap. | [
"Creates",
"a",
"new",
"StringValueMap",
"from",
"a",
"list",
"of",
"key",
"-",
"value",
"pairs",
"called",
"tuples",
".",
"The",
"method",
"is",
"similar",
"to",
"[[",
"fromTuples",
"]]",
"but",
"tuples",
"are",
"passed",
"as",
"array",
"instead",
"of",
"parameters",
"."
] | python | train |
genepattern/nbtools | nbtools/jsobject/utils.py | https://github.com/genepattern/nbtools/blob/2f74703f59926d8565f9714b1458dc87da8f8574/nbtools/jsobject/utils.py#L30-L33 | def _try_then(self):
"""Check to see if self has been resolved yet, if so invoke then."""
if self._cached is not None and self._callback is not None:
self._callback(*self._cached[0], **self._cached[1]) | [
"def",
"_try_then",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cached",
"is",
"not",
"None",
"and",
"self",
".",
"_callback",
"is",
"not",
"None",
":",
"self",
".",
"_callback",
"(",
"*",
"self",
".",
"_cached",
"[",
"0",
"]",
",",
"*",
"*",
"self",
".",
"_cached",
"[",
"1",
"]",
")"
] | Check to see if self has been resolved yet, if so invoke then. | [
"Check",
"to",
"see",
"if",
"self",
"has",
"been",
"resolved",
"yet",
"if",
"so",
"invoke",
"then",
"."
] | python | train |
lvjiyong/configreset | configreset/__init__.py | https://github.com/lvjiyong/configreset/blob/cde0a426e993a6aa483d6934358e61750c944de9/configreset/__init__.py#L53-L87 | def load_package(package_dir, package=None, exclude=None, default_section=_DEFAULT_SECTION):
"""
从目录中载入配置文件
:param package_dir:
:param package:
:param exclude:
:param default_section:
:return:
"""
init_py = '__init__.py'
py_ext = '.py'
files = os.listdir(package_dir)
if init_py in files:
files = [f for f in files if f != init_py]
if package:
files.insert(0, package)
def init_package(item):
if str(item).endswith(py_ext):
item = item[:-3]
if package:
item = '{package}.{item}'.format(package=package, item=item)
elif _is_conf(item):
item = '{package_dir}/{item}'.format(package_dir=package_dir, item=item)
else:
item = package
return str(item)
logger.debug(files)
files = [init_package(f) for f in files]
if exclude:
files = [f for f in files if f not in exclude]
settings = load(files, default_section)
return merge(settings) | [
"def",
"load_package",
"(",
"package_dir",
",",
"package",
"=",
"None",
",",
"exclude",
"=",
"None",
",",
"default_section",
"=",
"_DEFAULT_SECTION",
")",
":",
"init_py",
"=",
"'__init__.py'",
"py_ext",
"=",
"'.py'",
"files",
"=",
"os",
".",
"listdir",
"(",
"package_dir",
")",
"if",
"init_py",
"in",
"files",
":",
"files",
"=",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"f",
"!=",
"init_py",
"]",
"if",
"package",
":",
"files",
".",
"insert",
"(",
"0",
",",
"package",
")",
"def",
"init_package",
"(",
"item",
")",
":",
"if",
"str",
"(",
"item",
")",
".",
"endswith",
"(",
"py_ext",
")",
":",
"item",
"=",
"item",
"[",
":",
"-",
"3",
"]",
"if",
"package",
":",
"item",
"=",
"'{package}.{item}'",
".",
"format",
"(",
"package",
"=",
"package",
",",
"item",
"=",
"item",
")",
"elif",
"_is_conf",
"(",
"item",
")",
":",
"item",
"=",
"'{package_dir}/{item}'",
".",
"format",
"(",
"package_dir",
"=",
"package_dir",
",",
"item",
"=",
"item",
")",
"else",
":",
"item",
"=",
"package",
"return",
"str",
"(",
"item",
")",
"logger",
".",
"debug",
"(",
"files",
")",
"files",
"=",
"[",
"init_package",
"(",
"f",
")",
"for",
"f",
"in",
"files",
"]",
"if",
"exclude",
":",
"files",
"=",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"f",
"not",
"in",
"exclude",
"]",
"settings",
"=",
"load",
"(",
"files",
",",
"default_section",
")",
"return",
"merge",
"(",
"settings",
")"
] | 从目录中载入配置文件
:param package_dir:
:param package:
:param exclude:
:param default_section:
:return: | [
"从目录中载入配置文件",
":",
"param",
"package_dir",
":",
":",
"param",
"package",
":",
":",
"param",
"exclude",
":",
":",
"param",
"default_section",
":",
":",
"return",
":"
] | python | train |
AustralianSynchrotron/lightflow | lightflow/models/workflow.py | https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/workflow.py#L361-L381 | def _handle_stop_dag(self, request):
""" The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped.
"""
if (request.payload['name'] is not None) and \
(request.payload['name'] not in self._stop_dags):
self._stop_dags.append(request.payload['name'])
return Response(success=True, uid=request.uid) | [
"def",
"_handle_stop_dag",
"(",
"self",
",",
"request",
")",
":",
"if",
"(",
"request",
".",
"payload",
"[",
"'name'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"request",
".",
"payload",
"[",
"'name'",
"]",
"not",
"in",
"self",
".",
"_stop_dags",
")",
":",
"self",
".",
"_stop_dags",
".",
"append",
"(",
"request",
".",
"payload",
"[",
"'name'",
"]",
")",
"return",
"Response",
"(",
"success",
"=",
"True",
",",
"uid",
"=",
"request",
".",
"uid",
")"
] | The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped. | [
"The",
"handler",
"for",
"the",
"stop_dag",
"request",
"."
] | python | train |
cortical-io/retina-sdk.py | retinasdk/client/terms_api.py | https://github.com/cortical-io/retina-sdk.py/blob/474c13ad399fe1e974d2650335537608f4456b07/retinasdk/client/terms_api.py#L76-L104 | def getSimilarTerms(self, retina_name, term, context_id=None, pos_type=None, get_fingerprint=None, start_index=0, max_results=10):
"""Get the similar terms of a given term
Args:
retina_name, str: The retina name (required)
term, str: A term in the retina (required)
context_id, int: The identifier of a context (optional) (optional)
pos_type, str: Part of speech (optional) (optional)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
Returns: Array[Term]
"""
resourcePath = '/terms/similar_terms'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['term'] = term
queryParams['context_id'] = context_id
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['pos_type'] = pos_type
queryParams['get_fingerprint'] = get_fingerprint
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [Term(**r) for r in response.json()] | [
"def",
"getSimilarTerms",
"(",
"self",
",",
"retina_name",
",",
"term",
",",
"context_id",
"=",
"None",
",",
"pos_type",
"=",
"None",
",",
"get_fingerprint",
"=",
"None",
",",
"start_index",
"=",
"0",
",",
"max_results",
"=",
"10",
")",
":",
"resourcePath",
"=",
"'/terms/similar_terms'",
"method",
"=",
"'GET'",
"queryParams",
"=",
"{",
"}",
"headerParams",
"=",
"{",
"'Accept'",
":",
"'Application/json'",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"postData",
"=",
"None",
"queryParams",
"[",
"'retina_name'",
"]",
"=",
"retina_name",
"queryParams",
"[",
"'term'",
"]",
"=",
"term",
"queryParams",
"[",
"'context_id'",
"]",
"=",
"context_id",
"queryParams",
"[",
"'start_index'",
"]",
"=",
"start_index",
"queryParams",
"[",
"'max_results'",
"]",
"=",
"max_results",
"queryParams",
"[",
"'pos_type'",
"]",
"=",
"pos_type",
"queryParams",
"[",
"'get_fingerprint'",
"]",
"=",
"get_fingerprint",
"response",
"=",
"self",
".",
"apiClient",
".",
"_callAPI",
"(",
"resourcePath",
",",
"method",
",",
"queryParams",
",",
"postData",
",",
"headerParams",
")",
"return",
"[",
"Term",
"(",
"*",
"*",
"r",
")",
"for",
"r",
"in",
"response",
".",
"json",
"(",
")",
"]"
] | Get the similar terms of a given term
Args:
retina_name, str: The retina name (required)
term, str: A term in the retina (required)
context_id, int: The identifier of a context (optional) (optional)
pos_type, str: Part of speech (optional) (optional)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
Returns: Array[Term] | [
"Get",
"the",
"similar",
"terms",
"of",
"a",
"given",
"term",
"Args",
":",
"retina_name",
"str",
":",
"The",
"retina",
"name",
"(",
"required",
")",
"term",
"str",
":",
"A",
"term",
"in",
"the",
"retina",
"(",
"required",
")",
"context_id",
"int",
":",
"The",
"identifier",
"of",
"a",
"context",
"(",
"optional",
")",
"(",
"optional",
")",
"pos_type",
"str",
":",
"Part",
"of",
"speech",
"(",
"optional",
")",
"(",
"optional",
")",
"get_fingerprint",
"bool",
":",
"Configure",
"if",
"the",
"fingerprint",
"should",
"be",
"returned",
"as",
"part",
"of",
"the",
"results",
"(",
"optional",
")",
"start_index",
"int",
":",
"The",
"start",
"-",
"index",
"for",
"pagination",
"(",
"optional",
")",
"(",
"optional",
")",
"max_results",
"int",
":",
"Max",
"results",
"per",
"page",
"(",
"optional",
")",
"(",
"optional",
")",
"Returns",
":",
"Array",
"[",
"Term",
"]"
] | python | train |
rs/domcheck | domcheck/strategies.py | https://github.com/rs/domcheck/blob/43e10c345320564a1236778e8577e2b8ef825925/domcheck/strategies.py#L65-L86 | def check_meta_tag(domain, prefix, code):
"""
Validates a domain by checking the existance of a <meta name="{prefix}" content="{code}">
tag in the <head> of the home page of the domain using either HTTP or HTTPs protocols.
Returns true if verification suceeded.
"""
url = '://{}'.format(domain)
for proto in ('http', 'https'):
try:
req = Request(proto + url, headers={'User-Agent': 'Mozilla/5.0; Domcheck/1.0'})
res = urlopen(req, timeout=2)
if res.code == 200:
# Expect the </head> to be found in the first 100k of the page
content = str(res.read(100000))
res.close()
return search_meta_tag(content, prefix, code)
else:
res.close()
except:
logger.debug('', exc_info=True)
return False | [
"def",
"check_meta_tag",
"(",
"domain",
",",
"prefix",
",",
"code",
")",
":",
"url",
"=",
"'://{}'",
".",
"format",
"(",
"domain",
")",
"for",
"proto",
"in",
"(",
"'http'",
",",
"'https'",
")",
":",
"try",
":",
"req",
"=",
"Request",
"(",
"proto",
"+",
"url",
",",
"headers",
"=",
"{",
"'User-Agent'",
":",
"'Mozilla/5.0; Domcheck/1.0'",
"}",
")",
"res",
"=",
"urlopen",
"(",
"req",
",",
"timeout",
"=",
"2",
")",
"if",
"res",
".",
"code",
"==",
"200",
":",
"# Expect the </head> to be found in the first 100k of the page",
"content",
"=",
"str",
"(",
"res",
".",
"read",
"(",
"100000",
")",
")",
"res",
".",
"close",
"(",
")",
"return",
"search_meta_tag",
"(",
"content",
",",
"prefix",
",",
"code",
")",
"else",
":",
"res",
".",
"close",
"(",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"''",
",",
"exc_info",
"=",
"True",
")",
"return",
"False"
] | Validates a domain by checking the existance of a <meta name="{prefix}" content="{code}">
tag in the <head> of the home page of the domain using either HTTP or HTTPs protocols.
Returns true if verification suceeded. | [
"Validates",
"a",
"domain",
"by",
"checking",
"the",
"existance",
"of",
"a",
"<meta",
"name",
"=",
"{",
"prefix",
"}",
"content",
"=",
"{",
"code",
"}",
">",
"tag",
"in",
"the",
"<head",
">",
"of",
"the",
"home",
"page",
"of",
"the",
"domain",
"using",
"either",
"HTTP",
"or",
"HTTPs",
"protocols",
"."
] | python | train |
saltstack/salt | salt/client/ssh/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/__init__.py#L932-L943 | def __arg_comps(self):
'''
Return the function name and the arg list
'''
fun = self.argv[0] if self.argv else ''
parsed = salt.utils.args.parse_input(
self.argv[1:],
condition=False,
no_parse=self.opts.get('no_parse', []))
args = parsed[0]
kws = parsed[1]
return fun, args, kws | [
"def",
"__arg_comps",
"(",
"self",
")",
":",
"fun",
"=",
"self",
".",
"argv",
"[",
"0",
"]",
"if",
"self",
".",
"argv",
"else",
"''",
"parsed",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"parse_input",
"(",
"self",
".",
"argv",
"[",
"1",
":",
"]",
",",
"condition",
"=",
"False",
",",
"no_parse",
"=",
"self",
".",
"opts",
".",
"get",
"(",
"'no_parse'",
",",
"[",
"]",
")",
")",
"args",
"=",
"parsed",
"[",
"0",
"]",
"kws",
"=",
"parsed",
"[",
"1",
"]",
"return",
"fun",
",",
"args",
",",
"kws"
] | Return the function name and the arg list | [
"Return",
"the",
"function",
"name",
"and",
"the",
"arg",
"list"
] | python | train |
dcos/shakedown | shakedown/dcos/security.py | https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/security.py#L175-L196 | def add_group(id, description=None):
""" Adds group to the DCOS Enterprise. If not description
is provided the id will be used for the description.
:param id: group id
:type id: str
:param desc: description of user
:type desc: str
"""
if not description:
description = id
data = {
'description': description
}
acl_url = urljoin(_acl_url(), 'groups/{}'.format(id))
try:
r = http.put(acl_url, json=data)
assert r.status_code == 201
except DCOSHTTPException as e:
if e.response.status_code != 409:
raise | [
"def",
"add_group",
"(",
"id",
",",
"description",
"=",
"None",
")",
":",
"if",
"not",
"description",
":",
"description",
"=",
"id",
"data",
"=",
"{",
"'description'",
":",
"description",
"}",
"acl_url",
"=",
"urljoin",
"(",
"_acl_url",
"(",
")",
",",
"'groups/{}'",
".",
"format",
"(",
"id",
")",
")",
"try",
":",
"r",
"=",
"http",
".",
"put",
"(",
"acl_url",
",",
"json",
"=",
"data",
")",
"assert",
"r",
".",
"status_code",
"==",
"201",
"except",
"DCOSHTTPException",
"as",
"e",
":",
"if",
"e",
".",
"response",
".",
"status_code",
"!=",
"409",
":",
"raise"
] | Adds group to the DCOS Enterprise. If not description
is provided the id will be used for the description.
:param id: group id
:type id: str
:param desc: description of user
:type desc: str | [
"Adds",
"group",
"to",
"the",
"DCOS",
"Enterprise",
".",
"If",
"not",
"description",
"is",
"provided",
"the",
"id",
"will",
"be",
"used",
"for",
"the",
"description",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/electronic_structure/cohp.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/cohp.py#L362-L383 | def get_cohp_by_label(self, label):
"""
Get specific COHP object.
Args:
label: string (for newer Lobster versions: a number)
Returns:
Returns the COHP object to simplify plotting
"""
if label.lower() == "average":
return Cohp(efermi=self.efermi, energies=self.energies,
cohp=self.cohp, are_coops=self.are_coops, icohp=self.icohp)
else:
try:
return Cohp(efermi=self.efermi, energies=self.energies,
cohp=self.all_cohps[label].get_cohp(spin=None, integrated=False),
are_coops=self.are_coops,
icohp=self.all_cohps[label].get_icohp(spin=None))
except KeyError:
print("The label does not exist") | [
"def",
"get_cohp_by_label",
"(",
"self",
",",
"label",
")",
":",
"if",
"label",
".",
"lower",
"(",
")",
"==",
"\"average\"",
":",
"return",
"Cohp",
"(",
"efermi",
"=",
"self",
".",
"efermi",
",",
"energies",
"=",
"self",
".",
"energies",
",",
"cohp",
"=",
"self",
".",
"cohp",
",",
"are_coops",
"=",
"self",
".",
"are_coops",
",",
"icohp",
"=",
"self",
".",
"icohp",
")",
"else",
":",
"try",
":",
"return",
"Cohp",
"(",
"efermi",
"=",
"self",
".",
"efermi",
",",
"energies",
"=",
"self",
".",
"energies",
",",
"cohp",
"=",
"self",
".",
"all_cohps",
"[",
"label",
"]",
".",
"get_cohp",
"(",
"spin",
"=",
"None",
",",
"integrated",
"=",
"False",
")",
",",
"are_coops",
"=",
"self",
".",
"are_coops",
",",
"icohp",
"=",
"self",
".",
"all_cohps",
"[",
"label",
"]",
".",
"get_icohp",
"(",
"spin",
"=",
"None",
")",
")",
"except",
"KeyError",
":",
"print",
"(",
"\"The label does not exist\"",
")"
] | Get specific COHP object.
Args:
label: string (for newer Lobster versions: a number)
Returns:
Returns the COHP object to simplify plotting | [
"Get",
"specific",
"COHP",
"object",
"."
] | python | train |
marl/jams | jams/core.py | https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/core.py#L87-L152 | def _open(name_or_fdesc, mode='r', fmt='auto'):
'''An intelligent wrapper for ``open``.
Parameters
----------
name_or_fdesc : string-type or open file descriptor
If a string type, refers to the path to a file on disk.
If an open file descriptor, it is returned as-is.
mode : string
The mode with which to open the file.
See ``open`` for details.
fmt : string ['auto', 'jams', 'json', 'jamz']
The encoding for the input/output stream.
If `auto`, the format is inferred from the filename extension.
Otherwise, use the specified coding.
See Also
--------
open
gzip.open
'''
open_map = {'jams': open,
'json': open,
'jamz': gzip.open,
'gz': gzip.open}
# If we've been given an open descriptor, do the right thing
if hasattr(name_or_fdesc, 'read') or hasattr(name_or_fdesc, 'write'):
yield name_or_fdesc
elif isinstance(name_or_fdesc, six.string_types):
# Infer the opener from the extension
if fmt == 'auto':
_, ext = os.path.splitext(name_or_fdesc)
# Pull off the extension separator
ext = ext[1:]
else:
ext = fmt
try:
ext = ext.lower()
# Force text mode if we're using gzip
if ext in ['jamz', 'gz'] and 't' not in mode:
mode = '{:s}t'.format(mode)
with open_map[ext](name_or_fdesc, mode=mode) as fdesc:
yield fdesc
except KeyError:
raise ParameterError('Unknown JAMS extension '
'format: "{:s}"'.format(ext))
else:
# Don't know how to handle this. Raise a parameter error
raise ParameterError('Invalid filename or '
'descriptor: {}'.format(name_or_fdesc)) | [
"def",
"_open",
"(",
"name_or_fdesc",
",",
"mode",
"=",
"'r'",
",",
"fmt",
"=",
"'auto'",
")",
":",
"open_map",
"=",
"{",
"'jams'",
":",
"open",
",",
"'json'",
":",
"open",
",",
"'jamz'",
":",
"gzip",
".",
"open",
",",
"'gz'",
":",
"gzip",
".",
"open",
"}",
"# If we've been given an open descriptor, do the right thing",
"if",
"hasattr",
"(",
"name_or_fdesc",
",",
"'read'",
")",
"or",
"hasattr",
"(",
"name_or_fdesc",
",",
"'write'",
")",
":",
"yield",
"name_or_fdesc",
"elif",
"isinstance",
"(",
"name_or_fdesc",
",",
"six",
".",
"string_types",
")",
":",
"# Infer the opener from the extension",
"if",
"fmt",
"==",
"'auto'",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"name_or_fdesc",
")",
"# Pull off the extension separator",
"ext",
"=",
"ext",
"[",
"1",
":",
"]",
"else",
":",
"ext",
"=",
"fmt",
"try",
":",
"ext",
"=",
"ext",
".",
"lower",
"(",
")",
"# Force text mode if we're using gzip",
"if",
"ext",
"in",
"[",
"'jamz'",
",",
"'gz'",
"]",
"and",
"'t'",
"not",
"in",
"mode",
":",
"mode",
"=",
"'{:s}t'",
".",
"format",
"(",
"mode",
")",
"with",
"open_map",
"[",
"ext",
"]",
"(",
"name_or_fdesc",
",",
"mode",
"=",
"mode",
")",
"as",
"fdesc",
":",
"yield",
"fdesc",
"except",
"KeyError",
":",
"raise",
"ParameterError",
"(",
"'Unknown JAMS extension '",
"'format: \"{:s}\"'",
".",
"format",
"(",
"ext",
")",
")",
"else",
":",
"# Don't know how to handle this. Raise a parameter error",
"raise",
"ParameterError",
"(",
"'Invalid filename or '",
"'descriptor: {}'",
".",
"format",
"(",
"name_or_fdesc",
")",
")"
] | An intelligent wrapper for ``open``.
Parameters
----------
name_or_fdesc : string-type or open file descriptor
If a string type, refers to the path to a file on disk.
If an open file descriptor, it is returned as-is.
mode : string
The mode with which to open the file.
See ``open`` for details.
fmt : string ['auto', 'jams', 'json', 'jamz']
The encoding for the input/output stream.
If `auto`, the format is inferred from the filename extension.
Otherwise, use the specified coding.
See Also
--------
open
gzip.open | [
"An",
"intelligent",
"wrapper",
"for",
"open",
"."
] | python | valid |
apple/turicreate | src/unity/python/turicreate/toolkits/topic_model/topic_model.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L35-L271 | def create(dataset,
num_topics=10,
initial_topics=None,
alpha=None,
beta=.1,
num_iterations=10,
num_burnin=5,
associations=None,
verbose=False,
print_interval=10,
validation_set=None,
method='auto'):
"""
Create a topic model from the given data set. A topic model assumes each
document is a mixture of a set of topics, where for each topic some words
are more likely than others. One statistical approach to do this is called a
"topic model". This method learns a topic model for the given document
collection.
Parameters
----------
dataset : SArray of type dict or SFrame with a single column of type dict
A bag of words representation of a document corpus.
Each element is a dictionary representing a single document, where
the keys are words and the values are the number of times that word
occurs in that document.
num_topics : int, optional
The number of topics to learn.
initial_topics : SFrame, optional
An SFrame with a column of unique words representing the vocabulary
and a column of dense vectors representing
probability of that word given each topic. When provided,
these values are used to initialize the algorithm.
alpha : float, optional
Hyperparameter that controls the diversity of topics in a document.
Smaller values encourage fewer topics per document.
Provided value must be positive. Default value is 50/num_topics.
beta : float, optional
Hyperparameter that controls the diversity of words in a topic.
Smaller values encourage fewer words per topic. Provided value
must be positive.
num_iterations : int, optional
The number of iterations to perform.
num_burnin : int, optional
The number of iterations to perform when inferring the topics for
documents at prediction time.
verbose : bool, optional
When True, print most probable words for each topic while printing
progress.
print_interval : int, optional
The number of iterations to wait between progress reports.
associations : SFrame, optional
An SFrame with two columns named "word" and "topic" containing words
and the topic id that the word should be associated with. These words
are not considered during learning.
validation_set : SArray of type dict or SFrame with a single column
A bag of words representation of a document corpus, similar to the
format required for `dataset`. This will be used to monitor model
performance during training. Each document in the provided validation
set is randomly split: the first portion is used estimate which topic
each document belongs to, and the second portion is used to estimate
the model's performance at predicting the unseen words in the test data.
method : {'cgs', 'alias'}, optional
The algorithm used for learning the model.
- *cgs:* Collapsed Gibbs sampling
- *alias:* AliasLDA method.
Returns
-------
out : TopicModel
A fitted topic model. This can be used with
:py:func:`~TopicModel.get_topics()` and
:py:func:`~TopicModel.predict()`. While fitting is in progress, several
metrics are shown, including:
+------------------+---------------------------------------------------+
| Field | Description |
+==================+===================================================+
| Elapsed Time | The number of elapsed seconds. |
+------------------+---------------------------------------------------+
| Tokens/second | The number of unique words processed per second |
+------------------+---------------------------------------------------+
| Est. Perplexity | An estimate of the model's ability to model the |
| | training data. See the documentation on evaluate. |
+------------------+---------------------------------------------------+
See Also
--------
TopicModel, TopicModel.get_topics, TopicModel.predict,
turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate
References
----------
- `Wikipedia - Latent Dirichlet allocation
<http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
- Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of
Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_.
KDD 2014.
Examples
--------
The following example includes an SArray of documents, where
each element represents a document in "bag of words" representation
-- a dictionary with word keys and whose values are the number of times
that word occurred in the document:
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes')
Once in this form, it is straightforward to learn a topic model.
>>> m = turicreate.topic_model.create(docs)
It is also easy to create a new topic model from an old one -- whether
it was created using Turi Create or another package.
>>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics'])
To manually fix several words to always be assigned to a topic, use
the `associations` argument. The following will ensure that topic 0
has the most probability for each of the provided words:
>>> from turicreate import SFrame
>>> associations = SFrame({'word':['hurricane', 'wind', 'storm'],
'topic': [0, 0, 0]})
>>> m = turicreate.topic_model.create(docs,
associations=associations)
More advanced usage allows you to control aspects of the model and the
learning method.
>>> import turicreate as tc
>>> m = tc.topic_model.create(docs,
num_topics=20, # number of topics
num_iterations=10, # algorithm parameters
alpha=.01, beta=.1) # hyperparameters
To evaluate the model's ability to generalize, we can create a train/test
split where a portion of the words in each document are held out from
training.
>>> train, test = tc.text_analytics.random_split(.8)
>>> m = tc.topic_model.create(train)
>>> results = m.evaluate(test)
>>> print results['perplexity']
"""
dataset = _check_input(dataset)
_check_categorical_option_type("method", method, ['auto', 'cgs', 'alias'])
if method == 'cgs' or method == 'auto':
model_name = 'cgs_topic_model'
else:
model_name = 'alias_topic_model'
# If associations are provided, check they are in the proper format
if associations is None:
associations = _turicreate.SFrame({'word': [], 'topic': []})
if isinstance(associations, _turicreate.SFrame) and \
associations.num_rows() > 0:
assert set(associations.column_names()) == set(['word', 'topic']), \
"Provided associations must be an SFrame containing a word column\
and a topic column."
assert associations['word'].dtype == str, \
"Words must be strings."
assert associations['topic'].dtype == int, \
"Topic ids must be of int type."
if alpha is None:
alpha = float(50) / num_topics
if validation_set is not None:
_check_input(validation_set) # Must be a single column
if isinstance(validation_set, _turicreate.SFrame):
column_name = validation_set.column_names()[0]
validation_set = validation_set[column_name]
(validation_train, validation_test) = _random_split(validation_set)
else:
validation_train = _SArray()
validation_test = _SArray()
opts = {'model_name': model_name,
'data': dataset,
'num_topics': num_topics,
'num_iterations': num_iterations,
'print_interval': print_interval,
'alpha': alpha,
'beta': beta,
'num_burnin': num_burnin,
'associations': associations}
# Initialize the model with basic parameters
response = _turicreate.extensions._text.topicmodel_init(opts)
m = TopicModel(response['model'])
# If initial_topics provided, load it into the model
if isinstance(initial_topics, _turicreate.SFrame):
assert set(['vocabulary', 'topic_probabilities']) == \
set(initial_topics.column_names()), \
"The provided initial_topics does not have the proper format, \
e.g. wrong column names."
observed_topics = initial_topics['topic_probabilities'].apply(lambda x: len(x))
assert all(observed_topics == num_topics), \
"Provided num_topics value does not match the number of provided initial_topics."
# Rough estimate of total number of words
weight = len(dataset) * 1000
opts = {'model': m.__proxy__,
'topics': initial_topics['topic_probabilities'],
'vocabulary': initial_topics['vocabulary'],
'weight': weight}
response = _turicreate.extensions._text.topicmodel_set_topics(opts)
m = TopicModel(response['model'])
# Train the model on the given data set and retrieve predictions
opts = {'model': m.__proxy__,
'data': dataset,
'verbose': verbose,
'validation_train': validation_train,
'validation_test': validation_test}
response = _turicreate.extensions._text.topicmodel_train(opts)
m = TopicModel(response['model'])
return m | [
"def",
"create",
"(",
"dataset",
",",
"num_topics",
"=",
"10",
",",
"initial_topics",
"=",
"None",
",",
"alpha",
"=",
"None",
",",
"beta",
"=",
".1",
",",
"num_iterations",
"=",
"10",
",",
"num_burnin",
"=",
"5",
",",
"associations",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"print_interval",
"=",
"10",
",",
"validation_set",
"=",
"None",
",",
"method",
"=",
"'auto'",
")",
":",
"dataset",
"=",
"_check_input",
"(",
"dataset",
")",
"_check_categorical_option_type",
"(",
"\"method\"",
",",
"method",
",",
"[",
"'auto'",
",",
"'cgs'",
",",
"'alias'",
"]",
")",
"if",
"method",
"==",
"'cgs'",
"or",
"method",
"==",
"'auto'",
":",
"model_name",
"=",
"'cgs_topic_model'",
"else",
":",
"model_name",
"=",
"'alias_topic_model'",
"# If associations are provided, check they are in the proper format",
"if",
"associations",
"is",
"None",
":",
"associations",
"=",
"_turicreate",
".",
"SFrame",
"(",
"{",
"'word'",
":",
"[",
"]",
",",
"'topic'",
":",
"[",
"]",
"}",
")",
"if",
"isinstance",
"(",
"associations",
",",
"_turicreate",
".",
"SFrame",
")",
"and",
"associations",
".",
"num_rows",
"(",
")",
">",
"0",
":",
"assert",
"set",
"(",
"associations",
".",
"column_names",
"(",
")",
")",
"==",
"set",
"(",
"[",
"'word'",
",",
"'topic'",
"]",
")",
",",
"\"Provided associations must be an SFrame containing a word column\\\n and a topic column.\"",
"assert",
"associations",
"[",
"'word'",
"]",
".",
"dtype",
"==",
"str",
",",
"\"Words must be strings.\"",
"assert",
"associations",
"[",
"'topic'",
"]",
".",
"dtype",
"==",
"int",
",",
"\"Topic ids must be of int type.\"",
"if",
"alpha",
"is",
"None",
":",
"alpha",
"=",
"float",
"(",
"50",
")",
"/",
"num_topics",
"if",
"validation_set",
"is",
"not",
"None",
":",
"_check_input",
"(",
"validation_set",
")",
"# Must be a single column",
"if",
"isinstance",
"(",
"validation_set",
",",
"_turicreate",
".",
"SFrame",
")",
":",
"column_name",
"=",
"validation_set",
".",
"column_names",
"(",
")",
"[",
"0",
"]",
"validation_set",
"=",
"validation_set",
"[",
"column_name",
"]",
"(",
"validation_train",
",",
"validation_test",
")",
"=",
"_random_split",
"(",
"validation_set",
")",
"else",
":",
"validation_train",
"=",
"_SArray",
"(",
")",
"validation_test",
"=",
"_SArray",
"(",
")",
"opts",
"=",
"{",
"'model_name'",
":",
"model_name",
",",
"'data'",
":",
"dataset",
",",
"'num_topics'",
":",
"num_topics",
",",
"'num_iterations'",
":",
"num_iterations",
",",
"'print_interval'",
":",
"print_interval",
",",
"'alpha'",
":",
"alpha",
",",
"'beta'",
":",
"beta",
",",
"'num_burnin'",
":",
"num_burnin",
",",
"'associations'",
":",
"associations",
"}",
"# Initialize the model with basic parameters",
"response",
"=",
"_turicreate",
".",
"extensions",
".",
"_text",
".",
"topicmodel_init",
"(",
"opts",
")",
"m",
"=",
"TopicModel",
"(",
"response",
"[",
"'model'",
"]",
")",
"# If initial_topics provided, load it into the model",
"if",
"isinstance",
"(",
"initial_topics",
",",
"_turicreate",
".",
"SFrame",
")",
":",
"assert",
"set",
"(",
"[",
"'vocabulary'",
",",
"'topic_probabilities'",
"]",
")",
"==",
"set",
"(",
"initial_topics",
".",
"column_names",
"(",
")",
")",
",",
"\"The provided initial_topics does not have the proper format, \\\n e.g. wrong column names.\"",
"observed_topics",
"=",
"initial_topics",
"[",
"'topic_probabilities'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"len",
"(",
"x",
")",
")",
"assert",
"all",
"(",
"observed_topics",
"==",
"num_topics",
")",
",",
"\"Provided num_topics value does not match the number of provided initial_topics.\"",
"# Rough estimate of total number of words",
"weight",
"=",
"len",
"(",
"dataset",
")",
"*",
"1000",
"opts",
"=",
"{",
"'model'",
":",
"m",
".",
"__proxy__",
",",
"'topics'",
":",
"initial_topics",
"[",
"'topic_probabilities'",
"]",
",",
"'vocabulary'",
":",
"initial_topics",
"[",
"'vocabulary'",
"]",
",",
"'weight'",
":",
"weight",
"}",
"response",
"=",
"_turicreate",
".",
"extensions",
".",
"_text",
".",
"topicmodel_set_topics",
"(",
"opts",
")",
"m",
"=",
"TopicModel",
"(",
"response",
"[",
"'model'",
"]",
")",
"# Train the model on the given data set and retrieve predictions",
"opts",
"=",
"{",
"'model'",
":",
"m",
".",
"__proxy__",
",",
"'data'",
":",
"dataset",
",",
"'verbose'",
":",
"verbose",
",",
"'validation_train'",
":",
"validation_train",
",",
"'validation_test'",
":",
"validation_test",
"}",
"response",
"=",
"_turicreate",
".",
"extensions",
".",
"_text",
".",
"topicmodel_train",
"(",
"opts",
")",
"m",
"=",
"TopicModel",
"(",
"response",
"[",
"'model'",
"]",
")",
"return",
"m"
] | Create a topic model from the given data set. A topic model assumes each
document is a mixture of a set of topics, where for each topic some words
are more likely than others. One statistical approach to do this is called a
"topic model". This method learns a topic model for the given document
collection.
Parameters
----------
dataset : SArray of type dict or SFrame with a single column of type dict
A bag of words representation of a document corpus.
Each element is a dictionary representing a single document, where
the keys are words and the values are the number of times that word
occurs in that document.
num_topics : int, optional
The number of topics to learn.
initial_topics : SFrame, optional
An SFrame with a column of unique words representing the vocabulary
and a column of dense vectors representing
probability of that word given each topic. When provided,
these values are used to initialize the algorithm.
alpha : float, optional
Hyperparameter that controls the diversity of topics in a document.
Smaller values encourage fewer topics per document.
Provided value must be positive. Default value is 50/num_topics.
beta : float, optional
Hyperparameter that controls the diversity of words in a topic.
Smaller values encourage fewer words per topic. Provided value
must be positive.
num_iterations : int, optional
The number of iterations to perform.
num_burnin : int, optional
The number of iterations to perform when inferring the topics for
documents at prediction time.
verbose : bool, optional
When True, print most probable words for each topic while printing
progress.
print_interval : int, optional
The number of iterations to wait between progress reports.
associations : SFrame, optional
An SFrame with two columns named "word" and "topic" containing words
and the topic id that the word should be associated with. These words
are not considered during learning.
validation_set : SArray of type dict or SFrame with a single column
A bag of words representation of a document corpus, similar to the
format required for `dataset`. This will be used to monitor model
performance during training. Each document in the provided validation
set is randomly split: the first portion is used estimate which topic
each document belongs to, and the second portion is used to estimate
the model's performance at predicting the unseen words in the test data.
method : {'cgs', 'alias'}, optional
The algorithm used for learning the model.
- *cgs:* Collapsed Gibbs sampling
- *alias:* AliasLDA method.
Returns
-------
out : TopicModel
A fitted topic model. This can be used with
:py:func:`~TopicModel.get_topics()` and
:py:func:`~TopicModel.predict()`. While fitting is in progress, several
metrics are shown, including:
+------------------+---------------------------------------------------+
| Field | Description |
+==================+===================================================+
| Elapsed Time | The number of elapsed seconds. |
+------------------+---------------------------------------------------+
| Tokens/second | The number of unique words processed per second |
+------------------+---------------------------------------------------+
| Est. Perplexity | An estimate of the model's ability to model the |
| | training data. See the documentation on evaluate. |
+------------------+---------------------------------------------------+
See Also
--------
TopicModel, TopicModel.get_topics, TopicModel.predict,
turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate
References
----------
- `Wikipedia - Latent Dirichlet allocation
<http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
- Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of
Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_.
KDD 2014.
Examples
--------
The following example includes an SArray of documents, where
each element represents a document in "bag of words" representation
-- a dictionary with word keys and whose values are the number of times
that word occurred in the document:
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes')
Once in this form, it is straightforward to learn a topic model.
>>> m = turicreate.topic_model.create(docs)
It is also easy to create a new topic model from an old one -- whether
it was created using Turi Create or another package.
>>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics'])
To manually fix several words to always be assigned to a topic, use
the `associations` argument. The following will ensure that topic 0
has the most probability for each of the provided words:
>>> from turicreate import SFrame
>>> associations = SFrame({'word':['hurricane', 'wind', 'storm'],
'topic': [0, 0, 0]})
>>> m = turicreate.topic_model.create(docs,
associations=associations)
More advanced usage allows you to control aspects of the model and the
learning method.
>>> import turicreate as tc
>>> m = tc.topic_model.create(docs,
num_topics=20, # number of topics
num_iterations=10, # algorithm parameters
alpha=.01, beta=.1) # hyperparameters
To evaluate the model's ability to generalize, we can create a train/test
split where a portion of the words in each document are held out from
training.
>>> train, test = tc.text_analytics.random_split(.8)
>>> m = tc.topic_model.create(train)
>>> results = m.evaluate(test)
>>> print results['perplexity'] | [
"Create",
"a",
"topic",
"model",
"from",
"the",
"given",
"data",
"set",
".",
"A",
"topic",
"model",
"assumes",
"each",
"document",
"is",
"a",
"mixture",
"of",
"a",
"set",
"of",
"topics",
"where",
"for",
"each",
"topic",
"some",
"words",
"are",
"more",
"likely",
"than",
"others",
".",
"One",
"statistical",
"approach",
"to",
"do",
"this",
"is",
"called",
"a",
"topic",
"model",
".",
"This",
"method",
"learns",
"a",
"topic",
"model",
"for",
"the",
"given",
"document",
"collection",
"."
] | python | train |
Erotemic/utool | utool/util_hash.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L726-L763 | def convert_bytes_to_bigbase(bytes_, alphabet=ALPHABET_27):
r"""
Args:
bytes_ (bytes):
Returns:
str:
Ignore:
CommandLine:
python -m utool.util_hash convert_bytes_to_bigbase
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> bytes_ = b('9999999999999999999999999999999999')
>>> alphabet = ALPHABET_27
>>> result = convert_bytes_to_bigbase(bytes_, alphabet)
>>> print(result)
fervudwhpustklnptklklcgswbmvtustqocdpgiwkgrvwytvneardkpytd
"""
x = _bytes_to_int(bytes_)
if x == 0:
return '0'
sign = 1 if x > 0 else -1
x *= sign
digits = []
bigbase = len(alphabet)
while x:
digits.append(alphabet[x % bigbase])
x //= bigbase
if sign < 0:
digits.append('-')
digits.reverse()
newbase_str = ''.join(digits)
return newbase_str | [
"def",
"convert_bytes_to_bigbase",
"(",
"bytes_",
",",
"alphabet",
"=",
"ALPHABET_27",
")",
":",
"x",
"=",
"_bytes_to_int",
"(",
"bytes_",
")",
"if",
"x",
"==",
"0",
":",
"return",
"'0'",
"sign",
"=",
"1",
"if",
"x",
">",
"0",
"else",
"-",
"1",
"x",
"*=",
"sign",
"digits",
"=",
"[",
"]",
"bigbase",
"=",
"len",
"(",
"alphabet",
")",
"while",
"x",
":",
"digits",
".",
"append",
"(",
"alphabet",
"[",
"x",
"%",
"bigbase",
"]",
")",
"x",
"//=",
"bigbase",
"if",
"sign",
"<",
"0",
":",
"digits",
".",
"append",
"(",
"'-'",
")",
"digits",
".",
"reverse",
"(",
")",
"newbase_str",
"=",
"''",
".",
"join",
"(",
"digits",
")",
"return",
"newbase_str"
] | r"""
Args:
bytes_ (bytes):
Returns:
str:
Ignore:
CommandLine:
python -m utool.util_hash convert_bytes_to_bigbase
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> bytes_ = b('9999999999999999999999999999999999')
>>> alphabet = ALPHABET_27
>>> result = convert_bytes_to_bigbase(bytes_, alphabet)
>>> print(result)
fervudwhpustklnptklklcgswbmvtustqocdpgiwkgrvwytvneardkpytd | [
"r",
"Args",
":",
"bytes_",
"(",
"bytes",
")",
":"
] | python | train |
blockstack/blockstack-core | blockstack/blockstackd.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L208-L244 | def get_name_cost( db, name ):
"""
Get the cost of a name, given the fully-qualified name.
Do so by finding the namespace it belongs to (even if the namespace is being imported).
Return {'amount': ..., 'units': ...} on success
Return None if the namespace has not been declared
"""
lastblock = db.lastblock
namespace_id = get_namespace_from_name( name )
if namespace_id is None or len(namespace_id) == 0:
log.debug("No namespace '%s'" % namespace_id)
return None
namespace = db.get_namespace( namespace_id )
if namespace is None:
# maybe importing?
log.debug("Namespace '{}' is being revealed".format(namespace_id))
namespace = db.get_namespace_reveal( namespace_id )
if namespace is None:
# no such namespace
log.debug("No namespace '%s'" % namespace_id)
return None
name_fee = price_name( get_name_from_fq_name( name ), namespace, lastblock )
name_fee_units = None
if namespace['version'] == NAMESPACE_VERSION_PAY_WITH_STACKS:
name_fee_units = TOKEN_TYPE_STACKS
else:
name_fee_units = 'BTC'
name_fee = int(math.ceil(name_fee))
log.debug("Cost of '%s' at %s is %s units of %s" % (name, lastblock, name_fee, name_fee_units))
return {'amount': name_fee, 'units': name_fee_units} | [
"def",
"get_name_cost",
"(",
"db",
",",
"name",
")",
":",
"lastblock",
"=",
"db",
".",
"lastblock",
"namespace_id",
"=",
"get_namespace_from_name",
"(",
"name",
")",
"if",
"namespace_id",
"is",
"None",
"or",
"len",
"(",
"namespace_id",
")",
"==",
"0",
":",
"log",
".",
"debug",
"(",
"\"No namespace '%s'\"",
"%",
"namespace_id",
")",
"return",
"None",
"namespace",
"=",
"db",
".",
"get_namespace",
"(",
"namespace_id",
")",
"if",
"namespace",
"is",
"None",
":",
"# maybe importing?",
"log",
".",
"debug",
"(",
"\"Namespace '{}' is being revealed\"",
".",
"format",
"(",
"namespace_id",
")",
")",
"namespace",
"=",
"db",
".",
"get_namespace_reveal",
"(",
"namespace_id",
")",
"if",
"namespace",
"is",
"None",
":",
"# no such namespace",
"log",
".",
"debug",
"(",
"\"No namespace '%s'\"",
"%",
"namespace_id",
")",
"return",
"None",
"name_fee",
"=",
"price_name",
"(",
"get_name_from_fq_name",
"(",
"name",
")",
",",
"namespace",
",",
"lastblock",
")",
"name_fee_units",
"=",
"None",
"if",
"namespace",
"[",
"'version'",
"]",
"==",
"NAMESPACE_VERSION_PAY_WITH_STACKS",
":",
"name_fee_units",
"=",
"TOKEN_TYPE_STACKS",
"else",
":",
"name_fee_units",
"=",
"'BTC'",
"name_fee",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"name_fee",
")",
")",
"log",
".",
"debug",
"(",
"\"Cost of '%s' at %s is %s units of %s\"",
"%",
"(",
"name",
",",
"lastblock",
",",
"name_fee",
",",
"name_fee_units",
")",
")",
"return",
"{",
"'amount'",
":",
"name_fee",
",",
"'units'",
":",
"name_fee_units",
"}"
] | Get the cost of a name, given the fully-qualified name.
Do so by finding the namespace it belongs to (even if the namespace is being imported).
Return {'amount': ..., 'units': ...} on success
Return None if the namespace has not been declared | [
"Get",
"the",
"cost",
"of",
"a",
"name",
"given",
"the",
"fully",
"-",
"qualified",
"name",
".",
"Do",
"so",
"by",
"finding",
"the",
"namespace",
"it",
"belongs",
"to",
"(",
"even",
"if",
"the",
"namespace",
"is",
"being",
"imported",
")",
"."
] | python | train |
openthread/openthread | tools/harness-thci/OpenThread_WpanCtl.py | https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread_WpanCtl.py#L439-L459 | def __setAddressfilterMode(self, mode):
"""set address filter mode
Returns:
True: successful to set address filter mode.
False: fail to set address filter mode.
"""
print 'call setAddressFilterMode() ' + mode
try:
if re.match('list', mode, re.M|re.I):
cmd = WPANCTL_CMD + 'setprop MAC:' + mode + ':Enabled 1'
elif mode == 'disabled':
cmd = WPANCTL_CMD + 'setprop MAC:' + mode + ':Enabled 0'
else:
print 'no such option'
return False
if self.__sendCommand(cmd)[0] != 'Fail':
return True
return False
except Exception, e:
ModuleHelper.WriteIntoDebugLogger('__setAddressFilterMode() Error: ' + str(e)) | [
"def",
"__setAddressfilterMode",
"(",
"self",
",",
"mode",
")",
":",
"print",
"'call setAddressFilterMode() '",
"+",
"mode",
"try",
":",
"if",
"re",
".",
"match",
"(",
"'list'",
",",
"mode",
",",
"re",
".",
"M",
"|",
"re",
".",
"I",
")",
":",
"cmd",
"=",
"WPANCTL_CMD",
"+",
"'setprop MAC:'",
"+",
"mode",
"+",
"':Enabled 1'",
"elif",
"mode",
"==",
"'disabled'",
":",
"cmd",
"=",
"WPANCTL_CMD",
"+",
"'setprop MAC:'",
"+",
"mode",
"+",
"':Enabled 0'",
"else",
":",
"print",
"'no such option'",
"return",
"False",
"if",
"self",
".",
"__sendCommand",
"(",
"cmd",
")",
"[",
"0",
"]",
"!=",
"'Fail'",
":",
"return",
"True",
"return",
"False",
"except",
"Exception",
",",
"e",
":",
"ModuleHelper",
".",
"WriteIntoDebugLogger",
"(",
"'__setAddressFilterMode() Error: '",
"+",
"str",
"(",
"e",
")",
")"
] | set address filter mode
Returns:
True: successful to set address filter mode.
False: fail to set address filter mode. | [
"set",
"address",
"filter",
"mode"
] | python | train |
KnuVerse/knuverse-sdk-python | knuverse/knufactor.py | https://github.com/KnuVerse/knuverse-sdk-python/blob/00f1275a452a4dcf9bc92ef345f6985504226d8e/knuverse/knufactor.py#L728-L752 | def report_events(self, start_date, end_date, type="system"):
"""
Create a report for all client events or all system events.
Uses GET to /reports/events/{clients,system} interface
:Args:
* *start_date*: (datetime) Start time for report generation
* *end_date*: (datetime) End time for report generation
:Kwargs:
* *type*: (str) Type of event report to create. "system" or "clients"
:Returns: (list) List of events in the input range
"""
start_str, end_str = self._format_input_dates(start_date, end_date)
params = {
"start_date": start_str,
"end_date": end_str
}
endpoint = url.reports_events_clients if type == "clients" else url.reports_events_system
response = self._get(endpoint, params=params)
self._check_response(response, 200)
return self._create_response(response).get("events") | [
"def",
"report_events",
"(",
"self",
",",
"start_date",
",",
"end_date",
",",
"type",
"=",
"\"system\"",
")",
":",
"start_str",
",",
"end_str",
"=",
"self",
".",
"_format_input_dates",
"(",
"start_date",
",",
"end_date",
")",
"params",
"=",
"{",
"\"start_date\"",
":",
"start_str",
",",
"\"end_date\"",
":",
"end_str",
"}",
"endpoint",
"=",
"url",
".",
"reports_events_clients",
"if",
"type",
"==",
"\"clients\"",
"else",
"url",
".",
"reports_events_system",
"response",
"=",
"self",
".",
"_get",
"(",
"endpoint",
",",
"params",
"=",
"params",
")",
"self",
".",
"_check_response",
"(",
"response",
",",
"200",
")",
"return",
"self",
".",
"_create_response",
"(",
"response",
")",
".",
"get",
"(",
"\"events\"",
")"
] | Create a report for all client events or all system events.
Uses GET to /reports/events/{clients,system} interface
:Args:
* *start_date*: (datetime) Start time for report generation
* *end_date*: (datetime) End time for report generation
:Kwargs:
* *type*: (str) Type of event report to create. "system" or "clients"
:Returns: (list) List of events in the input range | [
"Create",
"a",
"report",
"for",
"all",
"client",
"events",
"or",
"all",
"system",
"events",
".",
"Uses",
"GET",
"to",
"/",
"reports",
"/",
"events",
"/",
"{",
"clients",
"system",
"}",
"interface"
] | python | train |
saltstack/salt | salt/client/ssh/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/state.py#L92-L119 | def _master_tops(self):
'''
Evaluate master_tops locally
'''
if 'id' not in self.opts:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, self.opts['id']):
return {}
# Evaluate all configured master_tops interfaces
grains = {}
ret = {}
if 'grains' in self.opts:
grains = self.opts['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=self.opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function %s failed with error %s for minion %s',
fun, exc, self.opts['id']
)
return ret | [
"def",
"_master_tops",
"(",
"self",
")",
":",
"if",
"'id'",
"not",
"in",
"self",
".",
"opts",
":",
"log",
".",
"error",
"(",
"'Received call for external nodes without an id'",
")",
"return",
"{",
"}",
"if",
"not",
"salt",
".",
"utils",
".",
"verify",
".",
"valid_id",
"(",
"self",
".",
"opts",
",",
"self",
".",
"opts",
"[",
"'id'",
"]",
")",
":",
"return",
"{",
"}",
"# Evaluate all configured master_tops interfaces",
"grains",
"=",
"{",
"}",
"ret",
"=",
"{",
"}",
"if",
"'grains'",
"in",
"self",
".",
"opts",
":",
"grains",
"=",
"self",
".",
"opts",
"[",
"'grains'",
"]",
"for",
"fun",
"in",
"self",
".",
"tops",
":",
"if",
"fun",
"not",
"in",
"self",
".",
"opts",
".",
"get",
"(",
"'master_tops'",
",",
"{",
"}",
")",
":",
"continue",
"try",
":",
"ret",
".",
"update",
"(",
"self",
".",
"tops",
"[",
"fun",
"]",
"(",
"opts",
"=",
"self",
".",
"opts",
",",
"grains",
"=",
"grains",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"# If anything happens in the top generation, log it and move on",
"log",
".",
"error",
"(",
"'Top function %s failed with error %s for minion %s'",
",",
"fun",
",",
"exc",
",",
"self",
".",
"opts",
"[",
"'id'",
"]",
")",
"return",
"ret"
] | Evaluate master_tops locally | [
"Evaluate",
"master_tops",
"locally"
] | python | train |
tchellomello/raincloudy | raincloudy/controller.py | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L48-L55 | def _assign_faucets(self, faucets):
"""Assign RainCloudyFaucet objects to self.faucets."""
if not faucets:
raise TypeError("Controller does not have a faucet assigned.")
for faucet_id in faucets:
self.faucets.append(
RainCloudyFaucet(self._parent, self, faucet_id)) | [
"def",
"_assign_faucets",
"(",
"self",
",",
"faucets",
")",
":",
"if",
"not",
"faucets",
":",
"raise",
"TypeError",
"(",
"\"Controller does not have a faucet assigned.\"",
")",
"for",
"faucet_id",
"in",
"faucets",
":",
"self",
".",
"faucets",
".",
"append",
"(",
"RainCloudyFaucet",
"(",
"self",
".",
"_parent",
",",
"self",
",",
"faucet_id",
")",
")"
] | Assign RainCloudyFaucet objects to self.faucets. | [
"Assign",
"RainCloudyFaucet",
"objects",
"to",
"self",
".",
"faucets",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.