repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
django-auth-ldap/django-auth-ldap | django_auth_ldap/backend.py | https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L526-L543 | def _search_for_user_dn(self):
"""
Searches the directory for a user matching AUTH_LDAP_USER_SEARCH.
Populates self._user_dn and self._user_attrs.
"""
search = self.settings.USER_SEARCH
if search is None:
raise ImproperlyConfigured(
"AUTH_LDAP_USER_SEARCH must be an LDAPSearch instance."
)
results = search.execute(self.connection, {"user": self._username})
if results is not None and len(results) == 1:
(user_dn, self._user_attrs) = next(iter(results))
else:
user_dn = None
return user_dn | [
"def",
"_search_for_user_dn",
"(",
"self",
")",
":",
"search",
"=",
"self",
".",
"settings",
".",
"USER_SEARCH",
"if",
"search",
"is",
"None",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"AUTH_LDAP_USER_SEARCH must be an LDAPSearch instance.\"",
")",
"results",
"=",
"search",
".",
"execute",
"(",
"self",
".",
"connection",
",",
"{",
"\"user\"",
":",
"self",
".",
"_username",
"}",
")",
"if",
"results",
"is",
"not",
"None",
"and",
"len",
"(",
"results",
")",
"==",
"1",
":",
"(",
"user_dn",
",",
"self",
".",
"_user_attrs",
")",
"=",
"next",
"(",
"iter",
"(",
"results",
")",
")",
"else",
":",
"user_dn",
"=",
"None",
"return",
"user_dn"
] | Searches the directory for a user matching AUTH_LDAP_USER_SEARCH.
Populates self._user_dn and self._user_attrs. | [
"Searches",
"the",
"directory",
"for",
"a",
"user",
"matching",
"AUTH_LDAP_USER_SEARCH",
".",
"Populates",
"self",
".",
"_user_dn",
"and",
"self",
".",
"_user_attrs",
"."
] | python | train |
tensorpack/tensorpack | examples/DynamicFilterNetwork/steering-filter.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DynamicFilterNetwork/steering-filter.py#L24-L59 | def DynamicConvFilter(inputs, filters, out_channel,
kernel_shape,
stride=1,
padding='SAME'):
""" see "Dynamic Filter Networks" (NIPS 2016)
by Bert De Brabandere*, Xu Jia*, Tinne Tuytelaars and Luc Van Gool
Remarks:
This is the convolution version of a dynamic filter.
Args:
inputs : unfiltered input [b, h, w, 1] only grayscale images.
filters : learned filters of [b, k, k, 1] (dynamically generated by the network).
out_channel (int): number of output channel.
kernel_shape: (h, w) tuple or a int.
stride: (h, w) tuple or a int.
padding (str): 'valid' or 'same'. Case insensitive.
Returns
tf.Tensor named ``output``.
"""
# tf.unstack only works with known batch_size :-(
batch_size, h, w, in_channel = inputs.get_shape().as_list()
stride = shape4d(stride)
inputs = tf.unstack(inputs)
filters = tf.reshape(filters, [batch_size] + shape2d(kernel_shape) + [in_channel, out_channel])
filters = tf.unstack(filters)
# this is ok as TF uses the cuda stream context
rsl = [tf.nn.conv2d(tf.reshape(d, [1, h, w, in_channel]),
tf.reshape(k, [kernel_shape, kernel_shape, in_channel, out_channel]),
stride, padding="SAME") for d, k in zip(inputs, filters)]
rsl = tf.concat(rsl, axis=0, name='output')
return rsl | [
"def",
"DynamicConvFilter",
"(",
"inputs",
",",
"filters",
",",
"out_channel",
",",
"kernel_shape",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"'SAME'",
")",
":",
"# tf.unstack only works with known batch_size :-(",
"batch_size",
",",
"h",
",",
"w",
",",
"in_channel",
"=",
"inputs",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"stride",
"=",
"shape4d",
"(",
"stride",
")",
"inputs",
"=",
"tf",
".",
"unstack",
"(",
"inputs",
")",
"filters",
"=",
"tf",
".",
"reshape",
"(",
"filters",
",",
"[",
"batch_size",
"]",
"+",
"shape2d",
"(",
"kernel_shape",
")",
"+",
"[",
"in_channel",
",",
"out_channel",
"]",
")",
"filters",
"=",
"tf",
".",
"unstack",
"(",
"filters",
")",
"# this is ok as TF uses the cuda stream context",
"rsl",
"=",
"[",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"tf",
".",
"reshape",
"(",
"d",
",",
"[",
"1",
",",
"h",
",",
"w",
",",
"in_channel",
"]",
")",
",",
"tf",
".",
"reshape",
"(",
"k",
",",
"[",
"kernel_shape",
",",
"kernel_shape",
",",
"in_channel",
",",
"out_channel",
"]",
")",
",",
"stride",
",",
"padding",
"=",
"\"SAME\"",
")",
"for",
"d",
",",
"k",
"in",
"zip",
"(",
"inputs",
",",
"filters",
")",
"]",
"rsl",
"=",
"tf",
".",
"concat",
"(",
"rsl",
",",
"axis",
"=",
"0",
",",
"name",
"=",
"'output'",
")",
"return",
"rsl"
] | see "Dynamic Filter Networks" (NIPS 2016)
by Bert De Brabandere*, Xu Jia*, Tinne Tuytelaars and Luc Van Gool
Remarks:
This is the convolution version of a dynamic filter.
Args:
inputs : unfiltered input [b, h, w, 1] only grayscale images.
filters : learned filters of [b, k, k, 1] (dynamically generated by the network).
out_channel (int): number of output channel.
kernel_shape: (h, w) tuple or a int.
stride: (h, w) tuple or a int.
padding (str): 'valid' or 'same'. Case insensitive.
Returns
tf.Tensor named ``output``. | [
"see",
"Dynamic",
"Filter",
"Networks",
"(",
"NIPS",
"2016",
")",
"by",
"Bert",
"De",
"Brabandere",
"*",
"Xu",
"Jia",
"*",
"Tinne",
"Tuytelaars",
"and",
"Luc",
"Van",
"Gool"
] | python | train |
readbeyond/aeneas | aeneas/globalfunctions.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/globalfunctions.py#L446-L469 | def config_dict_to_string(dictionary):
"""
Convert a given config dictionary ::
dictionary[key_1] = value_1
dictionary[key_2] = value_2
...
dictionary[key_n] = value_n
into the corresponding string ::
key_1=value_1|key_2=value_2|...|key_n=value_n
:param dict dictionary: the config dictionary
:rtype: string
"""
parameters = []
for key in dictionary:
parameters.append(u"%s%s%s" % (
key,
gc.CONFIG_STRING_ASSIGNMENT_SYMBOL,
dictionary[key]
))
return gc.CONFIG_STRING_SEPARATOR_SYMBOL.join(parameters) | [
"def",
"config_dict_to_string",
"(",
"dictionary",
")",
":",
"parameters",
"=",
"[",
"]",
"for",
"key",
"in",
"dictionary",
":",
"parameters",
".",
"append",
"(",
"u\"%s%s%s\"",
"%",
"(",
"key",
",",
"gc",
".",
"CONFIG_STRING_ASSIGNMENT_SYMBOL",
",",
"dictionary",
"[",
"key",
"]",
")",
")",
"return",
"gc",
".",
"CONFIG_STRING_SEPARATOR_SYMBOL",
".",
"join",
"(",
"parameters",
")"
] | Convert a given config dictionary ::
dictionary[key_1] = value_1
dictionary[key_2] = value_2
...
dictionary[key_n] = value_n
into the corresponding string ::
key_1=value_1|key_2=value_2|...|key_n=value_n
:param dict dictionary: the config dictionary
:rtype: string | [
"Convert",
"a",
"given",
"config",
"dictionary",
"::"
] | python | train |
languitar/pass-git-helper | passgithelper.py | https://github.com/languitar/pass-git-helper/blob/f84376d9ed6f7c47454a499da103da6fc2575a25/passgithelper.py#L206-L214 | def get_value(self,
entry_name: Text,
entry_lines: Sequence[Text]) -> Optional[Text]:
"""See base class method."""
raw_value = self._get_raw(entry_name, entry_lines)
if raw_value is not None:
return raw_value[self._prefix_length:]
else:
return None | [
"def",
"get_value",
"(",
"self",
",",
"entry_name",
":",
"Text",
",",
"entry_lines",
":",
"Sequence",
"[",
"Text",
"]",
")",
"->",
"Optional",
"[",
"Text",
"]",
":",
"raw_value",
"=",
"self",
".",
"_get_raw",
"(",
"entry_name",
",",
"entry_lines",
")",
"if",
"raw_value",
"is",
"not",
"None",
":",
"return",
"raw_value",
"[",
"self",
".",
"_prefix_length",
":",
"]",
"else",
":",
"return",
"None"
] | See base class method. | [
"See",
"base",
"class",
"method",
"."
] | python | train |
kobejohn/PQHelper | pqhelper/versus.py | https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/versus.py#L114-L130 | def _summarize_result(self, root_action, leaf_eot):
"""Return a dict with useful information that summarizes this action."""
root_board = root_action.parent.board
action_detail = root_action.position_pair
score = self._relative_score(root_action, leaf_eot,
root_action.parent.player,
root_action.parent.opponent)
# mana drain info
total_leaves = 0
mana_drain_leaves = 0
for leaf in root_action.leaves():
total_leaves += 1
if leaf.is_mana_drain:
mana_drain_leaves += 1
summary = base.Summary(root_board, action_detail, score,
mana_drain_leaves, total_leaves)
return summary | [
"def",
"_summarize_result",
"(",
"self",
",",
"root_action",
",",
"leaf_eot",
")",
":",
"root_board",
"=",
"root_action",
".",
"parent",
".",
"board",
"action_detail",
"=",
"root_action",
".",
"position_pair",
"score",
"=",
"self",
".",
"_relative_score",
"(",
"root_action",
",",
"leaf_eot",
",",
"root_action",
".",
"parent",
".",
"player",
",",
"root_action",
".",
"parent",
".",
"opponent",
")",
"# mana drain info",
"total_leaves",
"=",
"0",
"mana_drain_leaves",
"=",
"0",
"for",
"leaf",
"in",
"root_action",
".",
"leaves",
"(",
")",
":",
"total_leaves",
"+=",
"1",
"if",
"leaf",
".",
"is_mana_drain",
":",
"mana_drain_leaves",
"+=",
"1",
"summary",
"=",
"base",
".",
"Summary",
"(",
"root_board",
",",
"action_detail",
",",
"score",
",",
"mana_drain_leaves",
",",
"total_leaves",
")",
"return",
"summary"
] | Return a dict with useful information that summarizes this action. | [
"Return",
"a",
"dict",
"with",
"useful",
"information",
"that",
"summarizes",
"this",
"action",
"."
] | python | train |
saltstack/salt | salt/cloud/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L532-L543 | def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers | [
"def",
"get_configured_providers",
"(",
"self",
")",
":",
"providers",
"=",
"set",
"(",
")",
"for",
"alias",
",",
"drivers",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"opts",
"[",
"'providers'",
"]",
")",
":",
"if",
"len",
"(",
"drivers",
")",
">",
"1",
":",
"for",
"driver",
"in",
"drivers",
":",
"providers",
".",
"add",
"(",
"'{0}:{1}'",
".",
"format",
"(",
"alias",
",",
"driver",
")",
")",
"continue",
"providers",
".",
"add",
"(",
"alias",
")",
"return",
"providers"
] | Return the configured providers | [
"Return",
"the",
"configured",
"providers"
] | python | train |
atlassian-api/atlassian-python-api | examples/confluence-trash-cleaner.py | https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/examples/confluence-trash-cleaner.py#L33-L51 | def clean_all_trash_pages_from_all_spaces(confluence):
"""
Main function for retrieve space keys and provide space for cleaner
:param confluence:
:return:
"""
limit = 50
flag = True
i = 0
while flag:
space_lists = confluence.get_all_spaces(start=i * limit, limit=limit)
if space_lists and len(space_lists) != 0:
i += 1
for space_list in space_lists:
print("Start review the space with key = " + space_list['key'])
clean_pages_from_space(confluence=confluence, space_key=space_list['key'])
else:
flag = False
return 0 | [
"def",
"clean_all_trash_pages_from_all_spaces",
"(",
"confluence",
")",
":",
"limit",
"=",
"50",
"flag",
"=",
"True",
"i",
"=",
"0",
"while",
"flag",
":",
"space_lists",
"=",
"confluence",
".",
"get_all_spaces",
"(",
"start",
"=",
"i",
"*",
"limit",
",",
"limit",
"=",
"limit",
")",
"if",
"space_lists",
"and",
"len",
"(",
"space_lists",
")",
"!=",
"0",
":",
"i",
"+=",
"1",
"for",
"space_list",
"in",
"space_lists",
":",
"print",
"(",
"\"Start review the space with key = \"",
"+",
"space_list",
"[",
"'key'",
"]",
")",
"clean_pages_from_space",
"(",
"confluence",
"=",
"confluence",
",",
"space_key",
"=",
"space_list",
"[",
"'key'",
"]",
")",
"else",
":",
"flag",
"=",
"False",
"return",
"0"
] | Main function for retrieve space keys and provide space for cleaner
:param confluence:
:return: | [
"Main",
"function",
"for",
"retrieve",
"space",
"keys",
"and",
"provide",
"space",
"for",
"cleaner",
":",
"param",
"confluence",
":",
":",
"return",
":"
] | python | train |
UCL-INGI/INGInious | inginious/client/_zeromq_client.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/client/_zeromq_client.py#L152-L178 | async def _reconnect(self):
"""
Called when the remote server is innacessible and the connection has to be restarted
"""
# 1. Close all transactions
for msg_class in self._transactions:
_1, _2, _3, coroutine_abrt, _4 = self._msgs_registered[msg_class]
if coroutine_abrt is not None:
for key in self._transactions[msg_class]:
for args, kwargs in self._transactions[msg_class][key]:
self._loop.create_task(coroutine_abrt(key, *args, **kwargs))
self._transactions[msg_class] = {}
# 2. Call on_disconnect
await self._on_disconnect()
# 3. Stop tasks
for task in self._restartable_tasks:
task.cancel()
self._restartable_tasks = []
# 4. Restart socket
self._socket.disconnect(self._router_addr)
# 5. Re-do start sequence
await self.client_start() | [
"async",
"def",
"_reconnect",
"(",
"self",
")",
":",
"# 1. Close all transactions",
"for",
"msg_class",
"in",
"self",
".",
"_transactions",
":",
"_1",
",",
"_2",
",",
"_3",
",",
"coroutine_abrt",
",",
"_4",
"=",
"self",
".",
"_msgs_registered",
"[",
"msg_class",
"]",
"if",
"coroutine_abrt",
"is",
"not",
"None",
":",
"for",
"key",
"in",
"self",
".",
"_transactions",
"[",
"msg_class",
"]",
":",
"for",
"args",
",",
"kwargs",
"in",
"self",
".",
"_transactions",
"[",
"msg_class",
"]",
"[",
"key",
"]",
":",
"self",
".",
"_loop",
".",
"create_task",
"(",
"coroutine_abrt",
"(",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"self",
".",
"_transactions",
"[",
"msg_class",
"]",
"=",
"{",
"}",
"# 2. Call on_disconnect",
"await",
"self",
".",
"_on_disconnect",
"(",
")",
"# 3. Stop tasks",
"for",
"task",
"in",
"self",
".",
"_restartable_tasks",
":",
"task",
".",
"cancel",
"(",
")",
"self",
".",
"_restartable_tasks",
"=",
"[",
"]",
"# 4. Restart socket",
"self",
".",
"_socket",
".",
"disconnect",
"(",
"self",
".",
"_router_addr",
")",
"# 5. Re-do start sequence",
"await",
"self",
".",
"client_start",
"(",
")"
] | Called when the remote server is innacessible and the connection has to be restarted | [
"Called",
"when",
"the",
"remote",
"server",
"is",
"innacessible",
"and",
"the",
"connection",
"has",
"to",
"be",
"restarted"
] | python | train |
python-gitlab/python-gitlab | gitlab/v4/objects.py | https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L3776-L3791 | def transfer_project(self, to_namespace, **kwargs):
"""Transfer a project to the given namespace ID
Args:
to_namespace (str): ID or path of the namespace to transfer the
project to
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTransferProjectError: If the project could not be transfered
"""
path = '/projects/%s/transfer' % (self.id,)
self.manager.gitlab.http_put(path,
post_data={"namespace": to_namespace},
**kwargs) | [
"def",
"transfer_project",
"(",
"self",
",",
"to_namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"'/projects/%s/transfer'",
"%",
"(",
"self",
".",
"id",
",",
")",
"self",
".",
"manager",
".",
"gitlab",
".",
"http_put",
"(",
"path",
",",
"post_data",
"=",
"{",
"\"namespace\"",
":",
"to_namespace",
"}",
",",
"*",
"*",
"kwargs",
")"
] | Transfer a project to the given namespace ID
Args:
to_namespace (str): ID or path of the namespace to transfer the
project to
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTransferProjectError: If the project could not be transfered | [
"Transfer",
"a",
"project",
"to",
"the",
"given",
"namespace",
"ID"
] | python | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L30-L48 | def _obtain_token(self):
"""Obtain an auth token from client id and client secret."""
# don't renew token if hasn't expired yet
if self.expiration and self.expiration > datetime.datetime.now():
return
resp = requests.post("{}/1.1/oauth/token".format(API_URL), data={
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "client_credentials"
}).json()
if "error" in resp:
raise APIError("LibCal Auth Failed: {}, {}".format(resp["error"], resp.get("error_description")))
self.expiration = datetime.datetime.now() + datetime.timedelta(seconds=resp["expires_in"])
self.token = resp["access_token"]
print(self.token) | [
"def",
"_obtain_token",
"(",
"self",
")",
":",
"# don't renew token if hasn't expired yet",
"if",
"self",
".",
"expiration",
"and",
"self",
".",
"expiration",
">",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
":",
"return",
"resp",
"=",
"requests",
".",
"post",
"(",
"\"{}/1.1/oauth/token\"",
".",
"format",
"(",
"API_URL",
")",
",",
"data",
"=",
"{",
"\"client_id\"",
":",
"self",
".",
"client_id",
",",
"\"client_secret\"",
":",
"self",
".",
"client_secret",
",",
"\"grant_type\"",
":",
"\"client_credentials\"",
"}",
")",
".",
"json",
"(",
")",
"if",
"\"error\"",
"in",
"resp",
":",
"raise",
"APIError",
"(",
"\"LibCal Auth Failed: {}, {}\"",
".",
"format",
"(",
"resp",
"[",
"\"error\"",
"]",
",",
"resp",
".",
"get",
"(",
"\"error_description\"",
")",
")",
")",
"self",
".",
"expiration",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"resp",
"[",
"\"expires_in\"",
"]",
")",
"self",
".",
"token",
"=",
"resp",
"[",
"\"access_token\"",
"]",
"print",
"(",
"self",
".",
"token",
")"
] | Obtain an auth token from client id and client secret. | [
"Obtain",
"an",
"auth",
"token",
"from",
"client",
"id",
"and",
"client",
"secret",
"."
] | python | train |
rwl/godot | godot/run.py | https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/run.py#L25-L35 | def main():
""" Runs Godot.
"""
application = GodotApplication( id="godot",
plugins=[CorePlugin(),
PuddlePlugin(),
WorkbenchPlugin(),
ResourcePlugin(),
GodotPlugin()] )
application.run() | [
"def",
"main",
"(",
")",
":",
"application",
"=",
"GodotApplication",
"(",
"id",
"=",
"\"godot\"",
",",
"plugins",
"=",
"[",
"CorePlugin",
"(",
")",
",",
"PuddlePlugin",
"(",
")",
",",
"WorkbenchPlugin",
"(",
")",
",",
"ResourcePlugin",
"(",
")",
",",
"GodotPlugin",
"(",
")",
"]",
")",
"application",
".",
"run",
"(",
")"
] | Runs Godot. | [
"Runs",
"Godot",
"."
] | python | test |
peri-source/peri | peri/conf.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/conf.py#L55-L61 | def read_environment():
""" Read all environment variables to see if they contain PERI """
out = {}
for k,v in iteritems(os.environ):
if transform(k) in default_conf:
out[transform(k)] = v
return out | [
"def",
"read_environment",
"(",
")",
":",
"out",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"os",
".",
"environ",
")",
":",
"if",
"transform",
"(",
"k",
")",
"in",
"default_conf",
":",
"out",
"[",
"transform",
"(",
"k",
")",
"]",
"=",
"v",
"return",
"out"
] | Read all environment variables to see if they contain PERI | [
"Read",
"all",
"environment",
"variables",
"to",
"see",
"if",
"they",
"contain",
"PERI"
] | python | valid |
mcs07/ChemDataExtractor | chemdataextractor/cli/__init__.py | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/__init__.py#L60-L66 | def read(ctx, input, output):
"""Output processed document elements."""
log.info('chemdataextractor.read')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
output.write(u'%s : %s\n=====\n' % (element.__class__.__name__, six.text_type(element))) | [
"def",
"read",
"(",
"ctx",
",",
"input",
",",
"output",
")",
":",
"log",
".",
"info",
"(",
"'chemdataextractor.read'",
")",
"log",
".",
"info",
"(",
"'Reading %s'",
"%",
"input",
".",
"name",
")",
"doc",
"=",
"Document",
".",
"from_file",
"(",
"input",
")",
"for",
"element",
"in",
"doc",
".",
"elements",
":",
"output",
".",
"write",
"(",
"u'%s : %s\\n=====\\n'",
"%",
"(",
"element",
".",
"__class__",
".",
"__name__",
",",
"six",
".",
"text_type",
"(",
"element",
")",
")",
")"
] | Output processed document elements. | [
"Output",
"processed",
"document",
"elements",
"."
] | python | train |
PaulHancock/Aegean | AegeanTools/angle_tools.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L38-L59 | def dec2dec(dec):
"""
Convert sexegessimal RA string into a float in degrees.
Parameters
----------
dec : string
A string separated representing the Dec.
Expected format is `[+- ]hh:mm[:ss.s]`
Colons can be replaced with any whit space character.
Returns
-------
dec : float
The Dec in degrees.
"""
d = dec.replace(':', ' ').split()
if len(d) == 2:
d.append(0.0)
if d[0].startswith('-') or float(d[0]) < 0:
return float(d[0]) - float(d[1]) / 60.0 - float(d[2]) / 3600.0
return float(d[0]) + float(d[1]) / 60.0 + float(d[2]) / 3600.0 | [
"def",
"dec2dec",
"(",
"dec",
")",
":",
"d",
"=",
"dec",
".",
"replace",
"(",
"':'",
",",
"' '",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"d",
")",
"==",
"2",
":",
"d",
".",
"append",
"(",
"0.0",
")",
"if",
"d",
"[",
"0",
"]",
".",
"startswith",
"(",
"'-'",
")",
"or",
"float",
"(",
"d",
"[",
"0",
"]",
")",
"<",
"0",
":",
"return",
"float",
"(",
"d",
"[",
"0",
"]",
")",
"-",
"float",
"(",
"d",
"[",
"1",
"]",
")",
"/",
"60.0",
"-",
"float",
"(",
"d",
"[",
"2",
"]",
")",
"/",
"3600.0",
"return",
"float",
"(",
"d",
"[",
"0",
"]",
")",
"+",
"float",
"(",
"d",
"[",
"1",
"]",
")",
"/",
"60.0",
"+",
"float",
"(",
"d",
"[",
"2",
"]",
")",
"/",
"3600.0"
] | Convert sexegessimal RA string into a float in degrees.
Parameters
----------
dec : string
A string separated representing the Dec.
Expected format is `[+- ]hh:mm[:ss.s]`
Colons can be replaced with any whit space character.
Returns
-------
dec : float
The Dec in degrees. | [
"Convert",
"sexegessimal",
"RA",
"string",
"into",
"a",
"float",
"in",
"degrees",
"."
] | python | train |
spencerahill/aospy | aospy/calc.py | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L297-L329 | def _get_input_data(self, var, start_date, end_date):
"""Get the data for a single variable over the desired date range."""
logging.info(self._print_verbose("Getting input data:", var))
if isinstance(var, (float, int)):
return var
else:
cond_pfull = ((not hasattr(self, internal_names.PFULL_STR))
and var.def_vert and
self.dtype_in_vert == internal_names.ETA_STR)
data = self.data_loader.recursively_compute_variable(
var, start_date, end_date, self.time_offset, self.model,
**self.data_loader_attrs)
name = data.name
data = self._add_grid_attributes(data.to_dataset(name=data.name))
data = data[name]
if cond_pfull:
try:
self.pfull_coord = data[internal_names.PFULL_STR]
except KeyError:
pass
# Force all data to be at full pressure levels, not half levels.
bool_to_pfull = (self.dtype_in_vert == internal_names.ETA_STR and
var.def_vert == internal_names.PHALF_STR)
if bool_to_pfull:
data = utils.vertcoord.to_pfull_from_phalf(data,
self.pfull_coord)
if var.def_time:
# Restrict to the desired dates within each year.
if self.dtype_in_time != 'av':
return self._to_desired_dates(data)
else:
return data | [
"def",
"_get_input_data",
"(",
"self",
",",
"var",
",",
"start_date",
",",
"end_date",
")",
":",
"logging",
".",
"info",
"(",
"self",
".",
"_print_verbose",
"(",
"\"Getting input data:\"",
",",
"var",
")",
")",
"if",
"isinstance",
"(",
"var",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"return",
"var",
"else",
":",
"cond_pfull",
"=",
"(",
"(",
"not",
"hasattr",
"(",
"self",
",",
"internal_names",
".",
"PFULL_STR",
")",
")",
"and",
"var",
".",
"def_vert",
"and",
"self",
".",
"dtype_in_vert",
"==",
"internal_names",
".",
"ETA_STR",
")",
"data",
"=",
"self",
".",
"data_loader",
".",
"recursively_compute_variable",
"(",
"var",
",",
"start_date",
",",
"end_date",
",",
"self",
".",
"time_offset",
",",
"self",
".",
"model",
",",
"*",
"*",
"self",
".",
"data_loader_attrs",
")",
"name",
"=",
"data",
".",
"name",
"data",
"=",
"self",
".",
"_add_grid_attributes",
"(",
"data",
".",
"to_dataset",
"(",
"name",
"=",
"data",
".",
"name",
")",
")",
"data",
"=",
"data",
"[",
"name",
"]",
"if",
"cond_pfull",
":",
"try",
":",
"self",
".",
"pfull_coord",
"=",
"data",
"[",
"internal_names",
".",
"PFULL_STR",
"]",
"except",
"KeyError",
":",
"pass",
"# Force all data to be at full pressure levels, not half levels.",
"bool_to_pfull",
"=",
"(",
"self",
".",
"dtype_in_vert",
"==",
"internal_names",
".",
"ETA_STR",
"and",
"var",
".",
"def_vert",
"==",
"internal_names",
".",
"PHALF_STR",
")",
"if",
"bool_to_pfull",
":",
"data",
"=",
"utils",
".",
"vertcoord",
".",
"to_pfull_from_phalf",
"(",
"data",
",",
"self",
".",
"pfull_coord",
")",
"if",
"var",
".",
"def_time",
":",
"# Restrict to the desired dates within each year.",
"if",
"self",
".",
"dtype_in_time",
"!=",
"'av'",
":",
"return",
"self",
".",
"_to_desired_dates",
"(",
"data",
")",
"else",
":",
"return",
"data"
] | Get the data for a single variable over the desired date range. | [
"Get",
"the",
"data",
"for",
"a",
"single",
"variable",
"over",
"the",
"desired",
"date",
"range",
"."
] | python | train |
thespacedoctor/polyglot | polyglot/printpdf.py | https://github.com/thespacedoctor/polyglot/blob/98038d746aa67e343b73b3ccee1e02d31dab81ec/polyglot/printpdf.py#L169-L203 | def _print_original_webpage(
self):
"""*print the original webpage*
**Return:**
- ``pdfPath`` -- the path to the generated PDF
"""
self.log.debug('starting the ``_print_original_webpage`` method')
if not self.title:
r = requests.get(self.url)
title = bs4.BeautifulSoup(r.text).title.text
print title
else:
title = self.title
# CONVERT TO PDF WITH ELECTON PDF
url = self.url
pdfPath = self.folderpath + "/" + title + self.append + ".pdf"
electron = self.settings["executables"]["electron path"]
cmd = """%(electron)s -i "%(url)s" -o "%(pdfPath)s" --printBackground """ % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
self.log.debug('output: %(stdout)s' % locals())
if len(stderr):
print stderr
exists = os.path.exists(pdfPath)
if not exists:
print "%(pdfPath)s was not generated for some reason - please investigate" % locals()
sys.exit(0)
self.log.debug('completed the ``_print_original_webpage`` method')
return pdfPath | [
"def",
"_print_original_webpage",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_print_original_webpage`` method'",
")",
"if",
"not",
"self",
".",
"title",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"url",
")",
"title",
"=",
"bs4",
".",
"BeautifulSoup",
"(",
"r",
".",
"text",
")",
".",
"title",
".",
"text",
"print",
"title",
"else",
":",
"title",
"=",
"self",
".",
"title",
"# CONVERT TO PDF WITH ELECTON PDF",
"url",
"=",
"self",
".",
"url",
"pdfPath",
"=",
"self",
".",
"folderpath",
"+",
"\"/\"",
"+",
"title",
"+",
"self",
".",
"append",
"+",
"\".pdf\"",
"electron",
"=",
"self",
".",
"settings",
"[",
"\"executables\"",
"]",
"[",
"\"electron path\"",
"]",
"cmd",
"=",
"\"\"\"%(electron)s -i \"%(url)s\" -o \"%(pdfPath)s\" --printBackground \"\"\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'output: %(stdout)s'",
"%",
"locals",
"(",
")",
")",
"if",
"len",
"(",
"stderr",
")",
":",
"print",
"stderr",
"exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"pdfPath",
")",
"if",
"not",
"exists",
":",
"print",
"\"%(pdfPath)s was not generated for some reason - please investigate\"",
"%",
"locals",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_print_original_webpage`` method'",
")",
"return",
"pdfPath"
] | *print the original webpage*
**Return:**
- ``pdfPath`` -- the path to the generated PDF | [
"*",
"print",
"the",
"original",
"webpage",
"*"
] | python | train |
raiden-network/raiden | raiden/waiting.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/waiting.py#L70-L105 | def wait_for_participant_newbalance(
raiden: 'RaidenService',
payment_network_id: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
target_address: Address,
target_balance: TokenAmount,
retry_timeout: float,
) -> None:
"""Wait until a given channels balance exceeds the target balance.
Note:
This does not time out, use gevent.Timeout.
"""
if target_address == raiden.address:
balance = lambda channel_state: channel_state.our_state.contract_balance
elif target_address == partner_address:
balance = lambda channel_state: channel_state.partner_state.contract_balance
else:
raise ValueError('target_address must be one of the channel participants')
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
while balance(channel_state) < target_balance:
gevent.sleep(retry_timeout)
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
) | [
"def",
"wait_for_participant_newbalance",
"(",
"raiden",
":",
"'RaidenService'",
",",
"payment_network_id",
":",
"PaymentNetworkID",
",",
"token_address",
":",
"TokenAddress",
",",
"partner_address",
":",
"Address",
",",
"target_address",
":",
"Address",
",",
"target_balance",
":",
"TokenAmount",
",",
"retry_timeout",
":",
"float",
",",
")",
"->",
"None",
":",
"if",
"target_address",
"==",
"raiden",
".",
"address",
":",
"balance",
"=",
"lambda",
"channel_state",
":",
"channel_state",
".",
"our_state",
".",
"contract_balance",
"elif",
"target_address",
"==",
"partner_address",
":",
"balance",
"=",
"lambda",
"channel_state",
":",
"channel_state",
".",
"partner_state",
".",
"contract_balance",
"else",
":",
"raise",
"ValueError",
"(",
"'target_address must be one of the channel participants'",
")",
"channel_state",
"=",
"views",
".",
"get_channelstate_for",
"(",
"views",
".",
"state_from_raiden",
"(",
"raiden",
")",
",",
"payment_network_id",
",",
"token_address",
",",
"partner_address",
",",
")",
"while",
"balance",
"(",
"channel_state",
")",
"<",
"target_balance",
":",
"gevent",
".",
"sleep",
"(",
"retry_timeout",
")",
"channel_state",
"=",
"views",
".",
"get_channelstate_for",
"(",
"views",
".",
"state_from_raiden",
"(",
"raiden",
")",
",",
"payment_network_id",
",",
"token_address",
",",
"partner_address",
",",
")"
] | Wait until a given channels balance exceeds the target balance.
Note:
This does not time out, use gevent.Timeout. | [
"Wait",
"until",
"a",
"given",
"channels",
"balance",
"exceeds",
"the",
"target",
"balance",
"."
] | python | train |
launchdarkly/relayCommander | relay_commander/validator.py | https://github.com/launchdarkly/relayCommander/blob/eee7fa22f04edc3854dd53c3ec2db8c599ad1e89/relay_commander/validator.py#L61-L74 | def valid_env_vars() -> bool:
"""Validate that required env vars exist.
:returns: True if required env vars exist.
.. versionadded:: 0.0.12
"""
for envvar in _REQUIRED_ENV_VARS:
try:
_check_env_var(envvar)
except KeyError as ex:
LOG.error(ex)
sys.exit(1)
return True | [
"def",
"valid_env_vars",
"(",
")",
"->",
"bool",
":",
"for",
"envvar",
"in",
"_REQUIRED_ENV_VARS",
":",
"try",
":",
"_check_env_var",
"(",
"envvar",
")",
"except",
"KeyError",
"as",
"ex",
":",
"LOG",
".",
"error",
"(",
"ex",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"True"
] | Validate that required env vars exist.
:returns: True if required env vars exist.
.. versionadded:: 0.0.12 | [
"Validate",
"that",
"required",
"env",
"vars",
"exist",
"."
] | python | train |
cackharot/suds-py3 | suds/bindings/binding.py | https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/bindings/binding.py#L206-L245 | def replycomposite(self, rtypes, nodes):
"""
Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},...
"""
dictionary = {}
for rt in rtypes:
dictionary[rt.name] = rt
unmarshaller = self.unmarshaller()
composite = Factory.object('reply')
for node in nodes:
tag = node.name
rt = dictionary.get(tag, None)
if rt is None:
if node.get('id') is None:
raise Exception('<%s/> not mapped to message part' % tag)
else:
continue
resolved = rt.resolve(nobuiltin=True)
sobject = unmarshaller.process(node, resolved)
value = getattr(composite, tag, None)
if value is None:
if rt.unbounded():
value = []
setattr(composite, tag, value)
value.append(sobject)
else:
setattr(composite, tag, sobject)
else:
if not isinstance(value, list):
value = [value, ]
setattr(composite, tag, value)
value.append(sobject)
return composite | [
"def",
"replycomposite",
"(",
"self",
",",
"rtypes",
",",
"nodes",
")",
":",
"dictionary",
"=",
"{",
"}",
"for",
"rt",
"in",
"rtypes",
":",
"dictionary",
"[",
"rt",
".",
"name",
"]",
"=",
"rt",
"unmarshaller",
"=",
"self",
".",
"unmarshaller",
"(",
")",
"composite",
"=",
"Factory",
".",
"object",
"(",
"'reply'",
")",
"for",
"node",
"in",
"nodes",
":",
"tag",
"=",
"node",
".",
"name",
"rt",
"=",
"dictionary",
".",
"get",
"(",
"tag",
",",
"None",
")",
"if",
"rt",
"is",
"None",
":",
"if",
"node",
".",
"get",
"(",
"'id'",
")",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'<%s/> not mapped to message part'",
"%",
"tag",
")",
"else",
":",
"continue",
"resolved",
"=",
"rt",
".",
"resolve",
"(",
"nobuiltin",
"=",
"True",
")",
"sobject",
"=",
"unmarshaller",
".",
"process",
"(",
"node",
",",
"resolved",
")",
"value",
"=",
"getattr",
"(",
"composite",
",",
"tag",
",",
"None",
")",
"if",
"value",
"is",
"None",
":",
"if",
"rt",
".",
"unbounded",
"(",
")",
":",
"value",
"=",
"[",
"]",
"setattr",
"(",
"composite",
",",
"tag",
",",
"value",
")",
"value",
".",
"append",
"(",
"sobject",
")",
"else",
":",
"setattr",
"(",
"composite",
",",
"tag",
",",
"sobject",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"[",
"value",
",",
"]",
"setattr",
"(",
"composite",
",",
"tag",
",",
"value",
")",
"value",
".",
"append",
"(",
"sobject",
")",
"return",
"composite"
] | Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},... | [
"Construct",
"a",
"I",
"{",
"composite",
"}",
"reply",
".",
"This",
"method",
"is",
"called",
"when",
"it",
"has",
"been",
"detected",
"that",
"the",
"reply",
"has",
"multiple",
"root",
"nodes",
"."
] | python | train |
CyberZHG/keras-word-char-embd | keras_wc_embd/wrapper.py | https://github.com/CyberZHG/keras-word-char-embd/blob/cca6ddff01b6264dd0d12613bb9ed308e1367b8c/keras_wc_embd/wrapper.py#L30-L36 | def update_dicts(self, sentence):
"""Add new sentence to generate dictionaries.
:param sentence: A list of strings representing the sentence.
"""
self.dict_generator(sentence=sentence)
self.word_dict, self.char_dict = None, None | [
"def",
"update_dicts",
"(",
"self",
",",
"sentence",
")",
":",
"self",
".",
"dict_generator",
"(",
"sentence",
"=",
"sentence",
")",
"self",
".",
"word_dict",
",",
"self",
".",
"char_dict",
"=",
"None",
",",
"None"
] | Add new sentence to generate dictionaries.
:param sentence: A list of strings representing the sentence. | [
"Add",
"new",
"sentence",
"to",
"generate",
"dictionaries",
"."
] | python | train |
twidi/py-dataql | dataql/solvers/filters.py | https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/filters.py#L153-L187 | def solve(self, value, filter_):
"""Returns the value of an attribute of the value, or the result of a call to a function.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.Filter
An instance of ``Filter`` to solve with the given value.
Returns
-------
Depending on the source, the filter may ask for an attribute of the value, or for the
result of a call to a standalone function taking the value as first argument.
This method returns this attribute or result.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, ['day', 'strftime'])
>>> solver = FilterSolver(registry)
>>> solver.solve(date(2015, 6, 1), Filter(name='day'))
1
>>> from dataql.resources import PosArg
>>> solver.solve(date(2015, 6, 1), Filter(name='strftime', args=[PosArg('%F')]))
'2015-06-01'
"""
args, kwargs = filter_.get_args_and_kwargs()
source = self.registry[value]
return source.solve(value, filter_.name, args, kwargs) | [
"def",
"solve",
"(",
"self",
",",
"value",
",",
"filter_",
")",
":",
"args",
",",
"kwargs",
"=",
"filter_",
".",
"get_args_and_kwargs",
"(",
")",
"source",
"=",
"self",
".",
"registry",
"[",
"value",
"]",
"return",
"source",
".",
"solve",
"(",
"value",
",",
"filter_",
".",
"name",
",",
"args",
",",
"kwargs",
")"
] | Returns the value of an attribute of the value, or the result of a call to a function.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.Filter
An instance of ``Filter`` to solve with the given value.
Returns
-------
Depending on the source, the filter may ask for an attribute of the value, or for the
result of a call to a standalone function taking the value as first argument.
This method returns this attribute or result.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, ['day', 'strftime'])
>>> solver = FilterSolver(registry)
>>> solver.solve(date(2015, 6, 1), Filter(name='day'))
1
>>> from dataql.resources import PosArg
>>> solver.solve(date(2015, 6, 1), Filter(name='strftime', args=[PosArg('%F')]))
'2015-06-01' | [
"Returns",
"the",
"value",
"of",
"an",
"attribute",
"of",
"the",
"value",
"or",
"the",
"result",
"of",
"a",
"call",
"to",
"a",
"function",
"."
] | python | train |
XuShaohua/bcloud | bcloud/DownloadPage.py | https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/DownloadPage.py#L288-L318 | def init_db(self):
'''这个任务数据库只在程序开始时读入, 在程序关闭时导出.
因为Gtk没有像在Qt中那么方便的使用SQLite, 而必须将所有数据读入一个
liststore中才行.
'''
cache_path = os.path.join(Config.CACHE_DIR,
self.app.profile['username'])
if not os.path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
db = os.path.join(cache_path, TASK_FILE)
self.conn = sqlite3.connect(db)
self.cursor = self.conn.cursor()
sql = '''CREATE TABLE IF NOT EXISTS tasks (
name CHAR NOT NULL,
path CHAR NOT NULL,
fsid CHAR NOT NULL,
size INTEGER NOT NULL,
currsize INTEGER NOT NULL,
link CHAR,
isdir INTEGER,
savename CHAR NOT NULL,
savedir CHAR NOT NULL,
state INT NOT NULL,
statename CHAR NOT NULL,
humansize CHAR NOT NULL,
percent INT NOT NULL,
tooltip CHAR
)
'''
self.cursor.execute(sql) | [
"def",
"init_db",
"(",
"self",
")",
":",
"cache_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"Config",
".",
"CACHE_DIR",
",",
"self",
".",
"app",
".",
"profile",
"[",
"'username'",
"]",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cache_path",
")",
":",
"os",
".",
"makedirs",
"(",
"cache_path",
",",
"exist_ok",
"=",
"True",
")",
"db",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_path",
",",
"TASK_FILE",
")",
"self",
".",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"db",
")",
"self",
".",
"cursor",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"sql",
"=",
"'''CREATE TABLE IF NOT EXISTS tasks (\n name CHAR NOT NULL,\n path CHAR NOT NULL,\n fsid CHAR NOT NULL,\n size INTEGER NOT NULL,\n currsize INTEGER NOT NULL,\n link CHAR,\n isdir INTEGER,\n savename CHAR NOT NULL,\n savedir CHAR NOT NULL,\n state INT NOT NULL,\n statename CHAR NOT NULL,\n humansize CHAR NOT NULL,\n percent INT NOT NULL,\n tooltip CHAR\n )\n '''",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
")"
] | 这个任务数据库只在程序开始时读入, 在程序关闭时导出.
因为Gtk没有像在Qt中那么方便的使用SQLite, 而必须将所有数据读入一个
liststore中才行. | [
"这个任务数据库只在程序开始时读入",
"在程序关闭时导出",
"."
] | python | train |
sam-cox/pytides | pytides/tide.py | https://github.com/sam-cox/pytides/blob/63a2507299002f1979ea55a17a82561158d685f7/pytides/tide.py#L271-L406 | def decompose(
cls,
heights,
t = None,
t0 = None,
interval = None,
constituents = constituent.noaa,
initial = None,
n_period = 2,
callback = None,
full_output = False
):
"""
Return an instance of Tide which has been fitted to a series of tidal observations.
Arguments:
It is not necessary to provide t0 or interval if t is provided.
heights -- ndarray of tidal observation heights
t -- ndarray of tidal observation times
t0 -- datetime representing the time at which heights[0] was recorded
interval -- hourly interval between readings
constituents -- list of constituents to use in the fit (default: constituent.noaa)
initial -- optional Tide instance to use as first guess for least squares solver
n_period -- only include constituents which complete at least this many periods (default: 2)
callback -- optional function to be called at each iteration of the solver
full_output -- whether to return the output of scipy's leastsq solver (default: False)
"""
if t is not None:
if isinstance(t[0], datetime):
hours = Tide._hours(t[0], t)
t0 = t[0]
elif t0 is not None:
hours = t
else:
raise ValueError("t can be an array of datetimes, or an array "
"of hours since t0 in which case t0 must be "
"specified.")
elif None not in [t0, interval]:
hours = np.arange(len(heights)) * interval
else:
raise ValueError("Must provide t(datetimes), or t(hours) and "
"t0(datetime), or interval(hours) and t0(datetime) "
"so that each height can be identified with an "
"instant in time.")
#Remove duplicate constituents (those which travel at exactly the same
#speed, irrespective of phase)
constituents = list(OrderedDict.fromkeys(constituents))
#No need for least squares to find the mean water level constituent z0,
#work relative to mean
constituents = [c for c in constituents if not c == constituent._Z0]
z0 = np.mean(heights)
heights = heights - z0
#Only analyse frequencies which complete at least n_period cycles over
#the data period.
constituents = [
c for c in constituents
if 360.0 * n_period < hours[-1] * c.speed(astro(t0))
]
n = len(constituents)
sort = np.argsort(hours)
hours = hours[sort]
heights = heights[sort]
#We partition our time/height data into intervals over which we consider
#the values of u and f to assume a constant value (that is, their true
#value at the midpoint of the interval). Constituent
#speeds change much more slowly than the node factors, so we will
#consider these constant and equal to their speed at t0, regardless of
#the length of the time series.
partition = 240.0
t = Tide._partition(hours, partition)
times = Tide._times(t0, [(i + 0.5)*partition for i in range(len(t))])
speed, u, f, V0 = Tide._prepare(constituents, t0, times, radians = True)
#Residual to be minimised by variation of parameters (amplitudes, phases)
def residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
s = np.concatenate([
Tide._tidal_series(t_i, H, p, speed, u_i, f_i, V0)
for t_i, u_i, f_i in izip(t, u, f)
])
res = heights - s
if callback:
callback(res)
return res
#Analytic Jacobian of the residual - this makes solving significantly
#faster than just using gradient approximation, especially with many
#measurements / constituents.
def D_residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
ds_dH = np.concatenate([
f_i*np.cos(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
ds_dp = np.concatenate([
H*f_i*np.sin(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
return np.append(-ds_dH, -ds_dp, axis=0)
#Initial guess for solver, haven't done any analysis on this since the
#solver seems to converge well regardless of the initial guess We do
#however scale the initial amplitude guess with some measure of the
#variation
amplitudes = np.ones(n) * (np.sqrt(np.dot(heights, heights)) / len(heights))
phases = np.ones(n)
if initial:
for (c0, amplitude, phase) in initial.model:
for i, c in enumerate(constituents):
if c0 == c:
amplitudes[i] = amplitude
phases[i] = d2r*phase
initial = np.append(amplitudes, phases)
lsq = leastsq(residual, initial, Dfun=D_residual, col_deriv=True, ftol=1e-7)
model = np.zeros(1+n, dtype=cls.dtype)
model[0] = (constituent._Z0, z0, 0)
model[1:]['constituent'] = constituents[:]
model[1:]['amplitude'] = lsq[0][:n]
model[1:]['phase'] = lsq[0][n:]
if full_output:
return cls(model = model, radians = True), lsq
return cls(model = model, radians = True) | [
"def",
"decompose",
"(",
"cls",
",",
"heights",
",",
"t",
"=",
"None",
",",
"t0",
"=",
"None",
",",
"interval",
"=",
"None",
",",
"constituents",
"=",
"constituent",
".",
"noaa",
",",
"initial",
"=",
"None",
",",
"n_period",
"=",
"2",
",",
"callback",
"=",
"None",
",",
"full_output",
"=",
"False",
")",
":",
"if",
"t",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"t",
"[",
"0",
"]",
",",
"datetime",
")",
":",
"hours",
"=",
"Tide",
".",
"_hours",
"(",
"t",
"[",
"0",
"]",
",",
"t",
")",
"t0",
"=",
"t",
"[",
"0",
"]",
"elif",
"t0",
"is",
"not",
"None",
":",
"hours",
"=",
"t",
"else",
":",
"raise",
"ValueError",
"(",
"\"t can be an array of datetimes, or an array \"",
"\"of hours since t0 in which case t0 must be \"",
"\"specified.\"",
")",
"elif",
"None",
"not",
"in",
"[",
"t0",
",",
"interval",
"]",
":",
"hours",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"heights",
")",
")",
"*",
"interval",
"else",
":",
"raise",
"ValueError",
"(",
"\"Must provide t(datetimes), or t(hours) and \"",
"\"t0(datetime), or interval(hours) and t0(datetime) \"",
"\"so that each height can be identified with an \"",
"\"instant in time.\"",
")",
"#Remove duplicate constituents (those which travel at exactly the same",
"#speed, irrespective of phase)",
"constituents",
"=",
"list",
"(",
"OrderedDict",
".",
"fromkeys",
"(",
"constituents",
")",
")",
"#No need for least squares to find the mean water level constituent z0,",
"#work relative to mean",
"constituents",
"=",
"[",
"c",
"for",
"c",
"in",
"constituents",
"if",
"not",
"c",
"==",
"constituent",
".",
"_Z0",
"]",
"z0",
"=",
"np",
".",
"mean",
"(",
"heights",
")",
"heights",
"=",
"heights",
"-",
"z0",
"#Only analyse frequencies which complete at least n_period cycles over",
"#the data period.",
"constituents",
"=",
"[",
"c",
"for",
"c",
"in",
"constituents",
"if",
"360.0",
"*",
"n_period",
"<",
"hours",
"[",
"-",
"1",
"]",
"*",
"c",
".",
"speed",
"(",
"astro",
"(",
"t0",
")",
")",
"]",
"n",
"=",
"len",
"(",
"constituents",
")",
"sort",
"=",
"np",
".",
"argsort",
"(",
"hours",
")",
"hours",
"=",
"hours",
"[",
"sort",
"]",
"heights",
"=",
"heights",
"[",
"sort",
"]",
"#We partition our time/height data into intervals over which we consider",
"#the values of u and f to assume a constant value (that is, their true",
"#value at the midpoint of the interval). Constituent",
"#speeds change much more slowly than the node factors, so we will",
"#consider these constant and equal to their speed at t0, regardless of",
"#the length of the time series.",
"partition",
"=",
"240.0",
"t",
"=",
"Tide",
".",
"_partition",
"(",
"hours",
",",
"partition",
")",
"times",
"=",
"Tide",
".",
"_times",
"(",
"t0",
",",
"[",
"(",
"i",
"+",
"0.5",
")",
"*",
"partition",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"t",
")",
")",
"]",
")",
"speed",
",",
"u",
",",
"f",
",",
"V0",
"=",
"Tide",
".",
"_prepare",
"(",
"constituents",
",",
"t0",
",",
"times",
",",
"radians",
"=",
"True",
")",
"#Residual to be minimised by variation of parameters (amplitudes, phases)",
"def",
"residual",
"(",
"hp",
")",
":",
"H",
",",
"p",
"=",
"hp",
"[",
":",
"n",
",",
"np",
".",
"newaxis",
"]",
",",
"hp",
"[",
"n",
":",
",",
"np",
".",
"newaxis",
"]",
"s",
"=",
"np",
".",
"concatenate",
"(",
"[",
"Tide",
".",
"_tidal_series",
"(",
"t_i",
",",
"H",
",",
"p",
",",
"speed",
",",
"u_i",
",",
"f_i",
",",
"V0",
")",
"for",
"t_i",
",",
"u_i",
",",
"f_i",
"in",
"izip",
"(",
"t",
",",
"u",
",",
"f",
")",
"]",
")",
"res",
"=",
"heights",
"-",
"s",
"if",
"callback",
":",
"callback",
"(",
"res",
")",
"return",
"res",
"#Analytic Jacobian of the residual - this makes solving significantly",
"#faster than just using gradient approximation, especially with many",
"#measurements / constituents.",
"def",
"D_residual",
"(",
"hp",
")",
":",
"H",
",",
"p",
"=",
"hp",
"[",
":",
"n",
",",
"np",
".",
"newaxis",
"]",
",",
"hp",
"[",
"n",
":",
",",
"np",
".",
"newaxis",
"]",
"ds_dH",
"=",
"np",
".",
"concatenate",
"(",
"[",
"f_i",
"*",
"np",
".",
"cos",
"(",
"speed",
"*",
"t_i",
"+",
"u_i",
"+",
"V0",
"-",
"p",
")",
"for",
"t_i",
",",
"u_i",
",",
"f_i",
"in",
"izip",
"(",
"t",
",",
"u",
",",
"f",
")",
"]",
",",
"axis",
"=",
"1",
")",
"ds_dp",
"=",
"np",
".",
"concatenate",
"(",
"[",
"H",
"*",
"f_i",
"*",
"np",
".",
"sin",
"(",
"speed",
"*",
"t_i",
"+",
"u_i",
"+",
"V0",
"-",
"p",
")",
"for",
"t_i",
",",
"u_i",
",",
"f_i",
"in",
"izip",
"(",
"t",
",",
"u",
",",
"f",
")",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"np",
".",
"append",
"(",
"-",
"ds_dH",
",",
"-",
"ds_dp",
",",
"axis",
"=",
"0",
")",
"#Initial guess for solver, haven't done any analysis on this since the",
"#solver seems to converge well regardless of the initial guess We do",
"#however scale the initial amplitude guess with some measure of the",
"#variation",
"amplitudes",
"=",
"np",
".",
"ones",
"(",
"n",
")",
"*",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"heights",
",",
"heights",
")",
")",
"/",
"len",
"(",
"heights",
")",
")",
"phases",
"=",
"np",
".",
"ones",
"(",
"n",
")",
"if",
"initial",
":",
"for",
"(",
"c0",
",",
"amplitude",
",",
"phase",
")",
"in",
"initial",
".",
"model",
":",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"constituents",
")",
":",
"if",
"c0",
"==",
"c",
":",
"amplitudes",
"[",
"i",
"]",
"=",
"amplitude",
"phases",
"[",
"i",
"]",
"=",
"d2r",
"*",
"phase",
"initial",
"=",
"np",
".",
"append",
"(",
"amplitudes",
",",
"phases",
")",
"lsq",
"=",
"leastsq",
"(",
"residual",
",",
"initial",
",",
"Dfun",
"=",
"D_residual",
",",
"col_deriv",
"=",
"True",
",",
"ftol",
"=",
"1e-7",
")",
"model",
"=",
"np",
".",
"zeros",
"(",
"1",
"+",
"n",
",",
"dtype",
"=",
"cls",
".",
"dtype",
")",
"model",
"[",
"0",
"]",
"=",
"(",
"constituent",
".",
"_Z0",
",",
"z0",
",",
"0",
")",
"model",
"[",
"1",
":",
"]",
"[",
"'constituent'",
"]",
"=",
"constituents",
"[",
":",
"]",
"model",
"[",
"1",
":",
"]",
"[",
"'amplitude'",
"]",
"=",
"lsq",
"[",
"0",
"]",
"[",
":",
"n",
"]",
"model",
"[",
"1",
":",
"]",
"[",
"'phase'",
"]",
"=",
"lsq",
"[",
"0",
"]",
"[",
"n",
":",
"]",
"if",
"full_output",
":",
"return",
"cls",
"(",
"model",
"=",
"model",
",",
"radians",
"=",
"True",
")",
",",
"lsq",
"return",
"cls",
"(",
"model",
"=",
"model",
",",
"radians",
"=",
"True",
")"
] | Return an instance of Tide which has been fitted to a series of tidal observations.
Arguments:
It is not necessary to provide t0 or interval if t is provided.
heights -- ndarray of tidal observation heights
t -- ndarray of tidal observation times
t0 -- datetime representing the time at which heights[0] was recorded
interval -- hourly interval between readings
constituents -- list of constituents to use in the fit (default: constituent.noaa)
initial -- optional Tide instance to use as first guess for least squares solver
n_period -- only include constituents which complete at least this many periods (default: 2)
callback -- optional function to be called at each iteration of the solver
full_output -- whether to return the output of scipy's leastsq solver (default: False) | [
"Return",
"an",
"instance",
"of",
"Tide",
"which",
"has",
"been",
"fitted",
"to",
"a",
"series",
"of",
"tidal",
"observations",
".",
"Arguments",
":",
"It",
"is",
"not",
"necessary",
"to",
"provide",
"t0",
"or",
"interval",
"if",
"t",
"is",
"provided",
".",
"heights",
"--",
"ndarray",
"of",
"tidal",
"observation",
"heights",
"t",
"--",
"ndarray",
"of",
"tidal",
"observation",
"times",
"t0",
"--",
"datetime",
"representing",
"the",
"time",
"at",
"which",
"heights",
"[",
"0",
"]",
"was",
"recorded",
"interval",
"--",
"hourly",
"interval",
"between",
"readings",
"constituents",
"--",
"list",
"of",
"constituents",
"to",
"use",
"in",
"the",
"fit",
"(",
"default",
":",
"constituent",
".",
"noaa",
")",
"initial",
"--",
"optional",
"Tide",
"instance",
"to",
"use",
"as",
"first",
"guess",
"for",
"least",
"squares",
"solver",
"n_period",
"--",
"only",
"include",
"constituents",
"which",
"complete",
"at",
"least",
"this",
"many",
"periods",
"(",
"default",
":",
"2",
")",
"callback",
"--",
"optional",
"function",
"to",
"be",
"called",
"at",
"each",
"iteration",
"of",
"the",
"solver",
"full_output",
"--",
"whether",
"to",
"return",
"the",
"output",
"of",
"scipy",
"s",
"leastsq",
"solver",
"(",
"default",
":",
"False",
")"
] | python | train |
diffeo/rejester | rejester/workers.py | https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/workers.py#L934-L948 | def stop_gracefully(self):
'''Refuse to start more processes.
This runs in response to SIGINT or SIGTERM; if this isn't a
background process, control-C and a normal ``kill`` command
cause this.
'''
if self.shutting_down:
self.log(logging.INFO,
'second shutdown request, shutting down now')
self.scram()
else:
self.log(logging.INFO, 'shutting down after current jobs finish')
self.shutting_down = True | [
"def",
"stop_gracefully",
"(",
"self",
")",
":",
"if",
"self",
".",
"shutting_down",
":",
"self",
".",
"log",
"(",
"logging",
".",
"INFO",
",",
"'second shutdown request, shutting down now'",
")",
"self",
".",
"scram",
"(",
")",
"else",
":",
"self",
".",
"log",
"(",
"logging",
".",
"INFO",
",",
"'shutting down after current jobs finish'",
")",
"self",
".",
"shutting_down",
"=",
"True"
] | Refuse to start more processes.
This runs in response to SIGINT or SIGTERM; if this isn't a
background process, control-C and a normal ``kill`` command
cause this. | [
"Refuse",
"to",
"start",
"more",
"processes",
"."
] | python | train |
ashmastaflash/kal-wrapper | kalibrate/fn.py | https://github.com/ashmastaflash/kal-wrapper/blob/80ee03ab7bd3172ac26b769d6b442960f3424b0e/kalibrate/fn.py#L28-L35 | def build_kal_scan_channel_string(kal_bin, channel, args):
"""Return string for CLI invocation of kal, for channel scan."""
option_mapping = {"gain": "-g",
"device": "-d",
"error": "-e"}
base_string = "%s -v -c %s" % (kal_bin, channel)
base_string += options_string_builder(option_mapping, args)
return(base_string) | [
"def",
"build_kal_scan_channel_string",
"(",
"kal_bin",
",",
"channel",
",",
"args",
")",
":",
"option_mapping",
"=",
"{",
"\"gain\"",
":",
"\"-g\"",
",",
"\"device\"",
":",
"\"-d\"",
",",
"\"error\"",
":",
"\"-e\"",
"}",
"base_string",
"=",
"\"%s -v -c %s\"",
"%",
"(",
"kal_bin",
",",
"channel",
")",
"base_string",
"+=",
"options_string_builder",
"(",
"option_mapping",
",",
"args",
")",
"return",
"(",
"base_string",
")"
] | Return string for CLI invocation of kal, for channel scan. | [
"Return",
"string",
"for",
"CLI",
"invocation",
"of",
"kal",
"for",
"channel",
"scan",
"."
] | python | train |
Julius2342/pyvlx | pyvlx/frames/frame_command_send.py | https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_command_send.py#L52-L65 | def from_payload(self, payload):
"""Init frame from binary data."""
self.session_id = payload[0]*256 + payload[1]
self.originator = Originator(payload[2])
self.priority = Priority(payload[3])
len_node_ids = payload[41]
if len_node_ids > 20:
raise PyVLXException("command_send_request_wrong_node_length")
self.node_ids = []
for i in range(len_node_ids):
self.node_ids.append(payload[42] + i)
self.parameter = Parameter(payload[7:9]) | [
"def",
"from_payload",
"(",
"self",
",",
"payload",
")",
":",
"self",
".",
"session_id",
"=",
"payload",
"[",
"0",
"]",
"*",
"256",
"+",
"payload",
"[",
"1",
"]",
"self",
".",
"originator",
"=",
"Originator",
"(",
"payload",
"[",
"2",
"]",
")",
"self",
".",
"priority",
"=",
"Priority",
"(",
"payload",
"[",
"3",
"]",
")",
"len_node_ids",
"=",
"payload",
"[",
"41",
"]",
"if",
"len_node_ids",
">",
"20",
":",
"raise",
"PyVLXException",
"(",
"\"command_send_request_wrong_node_length\"",
")",
"self",
".",
"node_ids",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len_node_ids",
")",
":",
"self",
".",
"node_ids",
".",
"append",
"(",
"payload",
"[",
"42",
"]",
"+",
"i",
")",
"self",
".",
"parameter",
"=",
"Parameter",
"(",
"payload",
"[",
"7",
":",
"9",
"]",
")"
] | Init frame from binary data. | [
"Init",
"frame",
"from",
"binary",
"data",
"."
] | python | train |
inveniosoftware/invenio-oauthclient | invenio_oauthclient/contrib/github.py | https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/contrib/github.py#L110-L113 | def _extract_email(gh):
"""Get user email from github."""
return next(
(x.email for x in gh.emails() if x.verified and x.primary), None) | [
"def",
"_extract_email",
"(",
"gh",
")",
":",
"return",
"next",
"(",
"(",
"x",
".",
"email",
"for",
"x",
"in",
"gh",
".",
"emails",
"(",
")",
"if",
"x",
".",
"verified",
"and",
"x",
".",
"primary",
")",
",",
"None",
")"
] | Get user email from github. | [
"Get",
"user",
"email",
"from",
"github",
"."
] | python | train |
numba/llvmlite | llvmlite/binding/module.py | https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/binding/module.py#L184-L190 | def functions(self):
"""
Return an iterator over this module's functions.
The iterator will yield a ValueRef for each function.
"""
it = ffi.lib.LLVMPY_ModuleFunctionsIter(self)
return _FunctionsIterator(it, dict(module=self)) | [
"def",
"functions",
"(",
"self",
")",
":",
"it",
"=",
"ffi",
".",
"lib",
".",
"LLVMPY_ModuleFunctionsIter",
"(",
"self",
")",
"return",
"_FunctionsIterator",
"(",
"it",
",",
"dict",
"(",
"module",
"=",
"self",
")",
")"
] | Return an iterator over this module's functions.
The iterator will yield a ValueRef for each function. | [
"Return",
"an",
"iterator",
"over",
"this",
"module",
"s",
"functions",
".",
"The",
"iterator",
"will",
"yield",
"a",
"ValueRef",
"for",
"each",
"function",
"."
] | python | train |
asifpy/django-crudbuilder | crudbuilder/helpers.py | https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/helpers.py#L19-L80 | def plural(text):
"""
>>> plural('activity')
'activities'
"""
aberrant = {
'knife': 'knives',
'self': 'selves',
'elf': 'elves',
'life': 'lives',
'hoof': 'hooves',
'leaf': 'leaves',
'echo': 'echoes',
'embargo': 'embargoes',
'hero': 'heroes',
'potato': 'potatoes',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'child': 'children',
'woman': 'women',
'man': 'men',
'person': 'people',
'goose': 'geese',
'mouse': 'mice',
'barracks': 'barracks',
'deer': 'deer',
'nucleus': 'nuclei',
'syllabus': 'syllabi',
'focus': 'foci',
'fungus': 'fungi',
'cactus': 'cacti',
'phenomenon': 'phenomena',
'index': 'indices',
'appendix': 'appendices',
'criterion': 'criteria',
}
if text in aberrant:
result = '%s' % aberrant[text]
else:
postfix = 's'
if len(text) > 2:
vowels = 'aeiou'
if text[-2:] in ('ch', 'sh'):
postfix = 'es'
elif text[-1:] == 'y':
if (text[-2:-1] in vowels) or (text[0] in string.ascii_uppercase):
postfix = 's'
else:
postfix = 'ies'
text = text[:-1]
elif text[-2:] == 'is':
postfix = 'es'
text = text[:-2]
elif text[-1:] in ('s', 'z', 'x'):
postfix = 'es'
result = '%s%s' % (text, postfix)
return result | [
"def",
"plural",
"(",
"text",
")",
":",
"aberrant",
"=",
"{",
"'knife'",
":",
"'knives'",
",",
"'self'",
":",
"'selves'",
",",
"'elf'",
":",
"'elves'",
",",
"'life'",
":",
"'lives'",
",",
"'hoof'",
":",
"'hooves'",
",",
"'leaf'",
":",
"'leaves'",
",",
"'echo'",
":",
"'echoes'",
",",
"'embargo'",
":",
"'embargoes'",
",",
"'hero'",
":",
"'heroes'",
",",
"'potato'",
":",
"'potatoes'",
",",
"'tomato'",
":",
"'tomatoes'",
",",
"'torpedo'",
":",
"'torpedoes'",
",",
"'veto'",
":",
"'vetoes'",
",",
"'child'",
":",
"'children'",
",",
"'woman'",
":",
"'women'",
",",
"'man'",
":",
"'men'",
",",
"'person'",
":",
"'people'",
",",
"'goose'",
":",
"'geese'",
",",
"'mouse'",
":",
"'mice'",
",",
"'barracks'",
":",
"'barracks'",
",",
"'deer'",
":",
"'deer'",
",",
"'nucleus'",
":",
"'nuclei'",
",",
"'syllabus'",
":",
"'syllabi'",
",",
"'focus'",
":",
"'foci'",
",",
"'fungus'",
":",
"'fungi'",
",",
"'cactus'",
":",
"'cacti'",
",",
"'phenomenon'",
":",
"'phenomena'",
",",
"'index'",
":",
"'indices'",
",",
"'appendix'",
":",
"'appendices'",
",",
"'criterion'",
":",
"'criteria'",
",",
"}",
"if",
"text",
"in",
"aberrant",
":",
"result",
"=",
"'%s'",
"%",
"aberrant",
"[",
"text",
"]",
"else",
":",
"postfix",
"=",
"'s'",
"if",
"len",
"(",
"text",
")",
">",
"2",
":",
"vowels",
"=",
"'aeiou'",
"if",
"text",
"[",
"-",
"2",
":",
"]",
"in",
"(",
"'ch'",
",",
"'sh'",
")",
":",
"postfix",
"=",
"'es'",
"elif",
"text",
"[",
"-",
"1",
":",
"]",
"==",
"'y'",
":",
"if",
"(",
"text",
"[",
"-",
"2",
":",
"-",
"1",
"]",
"in",
"vowels",
")",
"or",
"(",
"text",
"[",
"0",
"]",
"in",
"string",
".",
"ascii_uppercase",
")",
":",
"postfix",
"=",
"'s'",
"else",
":",
"postfix",
"=",
"'ies'",
"text",
"=",
"text",
"[",
":",
"-",
"1",
"]",
"elif",
"text",
"[",
"-",
"2",
":",
"]",
"==",
"'is'",
":",
"postfix",
"=",
"'es'",
"text",
"=",
"text",
"[",
":",
"-",
"2",
"]",
"elif",
"text",
"[",
"-",
"1",
":",
"]",
"in",
"(",
"'s'",
",",
"'z'",
",",
"'x'",
")",
":",
"postfix",
"=",
"'es'",
"result",
"=",
"'%s%s'",
"%",
"(",
"text",
",",
"postfix",
")",
"return",
"result"
] | >>> plural('activity')
'activities' | [
">>>",
"plural",
"(",
"activity",
")",
"activities"
] | python | train |
biolink/ontobio | bin/qbiogolr.py | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/bin/qbiogolr.py#L26-L155 | def main():
"""
Wrapper for OGR
"""
parser = argparse.ArgumentParser(
description='Command line interface to python-ontobio.golr library'
"""
Provides command line interface onto the ontobio.golr python library, a high level
abstraction layer over Monarch and GO solr indices.
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-r', '--resource', type=str, required=False,
help='Name of ontology')
parser.add_argument('-d', '--display', type=str, default='o', required=False,
help='What to display: some combination of o, s, r. o=object ancestors, s=subject ancestors. If r present, draws s<->o relations ')
parser.add_argument('-o', '--outfile', type=str, required=False,
help='Path to output file')
parser.add_argument('-t', '--to', type=str, required=False,
help='Output to (tree, dot, ...)')
parser.add_argument('-C', '--category', type=str, required=False,
help='Category')
parser.add_argument('-c', '--container_properties', nargs='*', type=str, required=False,
help='Properties to nest in graph')
parser.add_argument('-s', '--species', type=str, required=False,
help='NCBITaxon ID')
parser.add_argument('-e', '--evidence', type=str, required=False,
help='ECO ID')
parser.add_argument('-G', '--graph', type=str, default='', required=False,
help='Graph type. m=minimal')
parser.add_argument('-S', '--slim', nargs='*', type=str, required=False,
help='Slim IDs')
parser.add_argument('-M', '--mapids', type=str, required=False,
help='Map identifiers to this ID space, e.g. ENSEMBL')
parser.add_argument('-p', '--properties', nargs='*', type=str, required=False,
help='Properties')
parser.add_argument('-v', '--verbosity', default=0, action='count',
help='Increase output verbosity')
parser.add_argument('ids',nargs='*')
# ontology
args = parser.parse_args()
if args.verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
logging.info("Welcome!")
ont = None
g = None
handle = args.resource
if handle is not None:
logging.info("Handle: {}".format(handle))
factory = OntologyFactory()
logging.info("Factory: {}".format(factory))
ont = factory.create(handle)
logging.info("Created ont: {}".format(ont))
g = ont.get_filtered_graph(relations=args.properties)
w = GraphRenderer.create(args.to)
nodes = set()
display = args.display
# query all IDs, gathering associations
assocs = []
for id in args.ids:
this_assocs, facets = search_golr_wrap(id,
args.category,
subject_taxon=args.species,
rows=1000,
slim=args.slim,
evidence=args.evidence,
map_identifiers=args.mapids)
assocs += this_assocs
logging.info("Num assocs: {}".format(len(assocs)))
for a in assocs:
print("{}\t{}\t{}\t{}".format(a['subject'],
a['subject_label'],
a['relation'],
";".join(a['objects'])))
if ont is not None:
# gather all ontology classes used
for a in assocs:
objs = a['objects']
if display.find('r') > -1:
pass
if display.find('o') > -1:
for obj in objs:
nodes.add(obj)
if ont is not None:
nodes.update(ont.ancestors(obj))
if display.find('s') > -1:
sub = a['subject']
nodes.add(sub)
if ont is not None:
nodes.update(ont.ancestors(sub))
# create a subgraph
subg = g.subgraph(nodes)
# optionally add edges between subj and obj nodes
if display.find('r') > -1:
for a in assocs:
rel = a['relation']
sub = a['subject']
objs = a['objects']
if rel is None:
rel = 'rdfs:seeAlso'
for obj in objs:
logging.info("Adding assoc rel {} {} {}".format(sub,obj,rel))
subg.add_edge(obj,sub,pred=rel)
# display tree/graph
show_graph(subg, nodes, objs, args) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Command line interface to python-ontobio.golr library'",
"\"\"\"\n\n Provides command line interface onto the ontobio.golr python library, a high level\n abstraction layer over Monarch and GO solr indices.\n \"\"\"",
",",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--resource'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Name of ontology'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--display'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'o'",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'What to display: some combination of o, s, r. o=object ancestors, s=subject ancestors. If r present, draws s<->o relations '",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--outfile'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Path to output file'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--to'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Output to (tree, dot, ...)'",
")",
"parser",
".",
"add_argument",
"(",
"'-C'",
",",
"'--category'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Category'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--container_properties'",
",",
"nargs",
"=",
"'*'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Properties to nest in graph'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--species'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'NCBITaxon ID'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--evidence'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'ECO ID'",
")",
"parser",
".",
"add_argument",
"(",
"'-G'",
",",
"'--graph'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Graph type. m=minimal'",
")",
"parser",
".",
"add_argument",
"(",
"'-S'",
",",
"'--slim'",
",",
"nargs",
"=",
"'*'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Slim IDs'",
")",
"parser",
".",
"add_argument",
"(",
"'-M'",
",",
"'--mapids'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Map identifiers to this ID space, e.g. ENSEMBL'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--properties'",
",",
"nargs",
"=",
"'*'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Properties'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbosity'",
",",
"default",
"=",
"0",
",",
"action",
"=",
"'count'",
",",
"help",
"=",
"'Increase output verbosity'",
")",
"parser",
".",
"add_argument",
"(",
"'ids'",
",",
"nargs",
"=",
"'*'",
")",
"# ontology",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"verbosity",
">=",
"2",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"elif",
"args",
".",
"verbosity",
"==",
"1",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"WARNING",
")",
"logging",
".",
"info",
"(",
"\"Welcome!\"",
")",
"ont",
"=",
"None",
"g",
"=",
"None",
"handle",
"=",
"args",
".",
"resource",
"if",
"handle",
"is",
"not",
"None",
":",
"logging",
".",
"info",
"(",
"\"Handle: {}\"",
".",
"format",
"(",
"handle",
")",
")",
"factory",
"=",
"OntologyFactory",
"(",
")",
"logging",
".",
"info",
"(",
"\"Factory: {}\"",
".",
"format",
"(",
"factory",
")",
")",
"ont",
"=",
"factory",
".",
"create",
"(",
"handle",
")",
"logging",
".",
"info",
"(",
"\"Created ont: {}\"",
".",
"format",
"(",
"ont",
")",
")",
"g",
"=",
"ont",
".",
"get_filtered_graph",
"(",
"relations",
"=",
"args",
".",
"properties",
")",
"w",
"=",
"GraphRenderer",
".",
"create",
"(",
"args",
".",
"to",
")",
"nodes",
"=",
"set",
"(",
")",
"display",
"=",
"args",
".",
"display",
"# query all IDs, gathering associations",
"assocs",
"=",
"[",
"]",
"for",
"id",
"in",
"args",
".",
"ids",
":",
"this_assocs",
",",
"facets",
"=",
"search_golr_wrap",
"(",
"id",
",",
"args",
".",
"category",
",",
"subject_taxon",
"=",
"args",
".",
"species",
",",
"rows",
"=",
"1000",
",",
"slim",
"=",
"args",
".",
"slim",
",",
"evidence",
"=",
"args",
".",
"evidence",
",",
"map_identifiers",
"=",
"args",
".",
"mapids",
")",
"assocs",
"+=",
"this_assocs",
"logging",
".",
"info",
"(",
"\"Num assocs: {}\"",
".",
"format",
"(",
"len",
"(",
"assocs",
")",
")",
")",
"for",
"a",
"in",
"assocs",
":",
"print",
"(",
"\"{}\\t{}\\t{}\\t{}\"",
".",
"format",
"(",
"a",
"[",
"'subject'",
"]",
",",
"a",
"[",
"'subject_label'",
"]",
",",
"a",
"[",
"'relation'",
"]",
",",
"\";\"",
".",
"join",
"(",
"a",
"[",
"'objects'",
"]",
")",
")",
")",
"if",
"ont",
"is",
"not",
"None",
":",
"# gather all ontology classes used",
"for",
"a",
"in",
"assocs",
":",
"objs",
"=",
"a",
"[",
"'objects'",
"]",
"if",
"display",
".",
"find",
"(",
"'r'",
")",
">",
"-",
"1",
":",
"pass",
"if",
"display",
".",
"find",
"(",
"'o'",
")",
">",
"-",
"1",
":",
"for",
"obj",
"in",
"objs",
":",
"nodes",
".",
"add",
"(",
"obj",
")",
"if",
"ont",
"is",
"not",
"None",
":",
"nodes",
".",
"update",
"(",
"ont",
".",
"ancestors",
"(",
"obj",
")",
")",
"if",
"display",
".",
"find",
"(",
"'s'",
")",
">",
"-",
"1",
":",
"sub",
"=",
"a",
"[",
"'subject'",
"]",
"nodes",
".",
"add",
"(",
"sub",
")",
"if",
"ont",
"is",
"not",
"None",
":",
"nodes",
".",
"update",
"(",
"ont",
".",
"ancestors",
"(",
"sub",
")",
")",
"# create a subgraph",
"subg",
"=",
"g",
".",
"subgraph",
"(",
"nodes",
")",
"# optionally add edges between subj and obj nodes",
"if",
"display",
".",
"find",
"(",
"'r'",
")",
">",
"-",
"1",
":",
"for",
"a",
"in",
"assocs",
":",
"rel",
"=",
"a",
"[",
"'relation'",
"]",
"sub",
"=",
"a",
"[",
"'subject'",
"]",
"objs",
"=",
"a",
"[",
"'objects'",
"]",
"if",
"rel",
"is",
"None",
":",
"rel",
"=",
"'rdfs:seeAlso'",
"for",
"obj",
"in",
"objs",
":",
"logging",
".",
"info",
"(",
"\"Adding assoc rel {} {} {}\"",
".",
"format",
"(",
"sub",
",",
"obj",
",",
"rel",
")",
")",
"subg",
".",
"add_edge",
"(",
"obj",
",",
"sub",
",",
"pred",
"=",
"rel",
")",
"# display tree/graph",
"show_graph",
"(",
"subg",
",",
"nodes",
",",
"objs",
",",
"args",
")"
] | Wrapper for OGR | [
"Wrapper",
"for",
"OGR"
] | python | train |
tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/core_v1_api.py | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L2363-L2385 | def connect_options_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
"""connect_options_namespaced_pod_proxy # noqa: E501
connect OPTIONS requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_options_namespaced_pod_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_options_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_options_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | [
"def",
"connect_options_namespaced_pod_proxy",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"connect_options_namespaced_pod_proxy_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"connect_options_namespaced_pod_proxy_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | connect_options_namespaced_pod_proxy # noqa: E501
connect OPTIONS requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_options_namespaced_pod_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread. | [
"connect_options_namespaced_pod_proxy",
"#",
"noqa",
":",
"E501"
] | python | train |
pypa/pipenv | pipenv/vendor/cerberus/schema.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/schema.py#L116-L133 | def _expand_logical_shortcuts(cls, schema):
""" Expand agglutinated rules in a definition-schema.
:param schema: The schema-definition to expand.
:return: The expanded schema-definition.
"""
def is_of_rule(x):
return isinstance(x, _str_type) and \
x.startswith(('allof_', 'anyof_', 'noneof_', 'oneof_'))
for field in schema:
for of_rule in (x for x in schema[field] if is_of_rule(x)):
operator, rule = of_rule.split('_')
schema[field].update({operator: []})
for value in schema[field][of_rule]:
schema[field][operator].append({rule: value})
del schema[field][of_rule]
return schema | [
"def",
"_expand_logical_shortcuts",
"(",
"cls",
",",
"schema",
")",
":",
"def",
"is_of_rule",
"(",
"x",
")",
":",
"return",
"isinstance",
"(",
"x",
",",
"_str_type",
")",
"and",
"x",
".",
"startswith",
"(",
"(",
"'allof_'",
",",
"'anyof_'",
",",
"'noneof_'",
",",
"'oneof_'",
")",
")",
"for",
"field",
"in",
"schema",
":",
"for",
"of_rule",
"in",
"(",
"x",
"for",
"x",
"in",
"schema",
"[",
"field",
"]",
"if",
"is_of_rule",
"(",
"x",
")",
")",
":",
"operator",
",",
"rule",
"=",
"of_rule",
".",
"split",
"(",
"'_'",
")",
"schema",
"[",
"field",
"]",
".",
"update",
"(",
"{",
"operator",
":",
"[",
"]",
"}",
")",
"for",
"value",
"in",
"schema",
"[",
"field",
"]",
"[",
"of_rule",
"]",
":",
"schema",
"[",
"field",
"]",
"[",
"operator",
"]",
".",
"append",
"(",
"{",
"rule",
":",
"value",
"}",
")",
"del",
"schema",
"[",
"field",
"]",
"[",
"of_rule",
"]",
"return",
"schema"
] | Expand agglutinated rules in a definition-schema.
:param schema: The schema-definition to expand.
:return: The expanded schema-definition. | [
"Expand",
"agglutinated",
"rules",
"in",
"a",
"definition",
"-",
"schema",
"."
] | python | train |
dossier/dossier.store | dossier/store/store.py | https://github.com/dossier/dossier.store/blob/b22ffe2470bba9fcc98a30cb55b437bfa1521e7f/dossier/store/store.py#L490-L501 | def _index(self, name):
'''Returns index transforms for ``name``.
:type name: unicode
:rtype: ``{ create |--> function, transform |--> function }``
'''
name = name.decode('utf-8')
try:
return self._indexes[name]
except KeyError:
raise KeyError('Index "%s" has not been registered with '
'this FC store.' % name) | [
"def",
"_index",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"name",
".",
"decode",
"(",
"'utf-8'",
")",
"try",
":",
"return",
"self",
".",
"_indexes",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"'Index \"%s\" has not been registered with '",
"'this FC store.'",
"%",
"name",
")"
] | Returns index transforms for ``name``.
:type name: unicode
:rtype: ``{ create |--> function, transform |--> function }`` | [
"Returns",
"index",
"transforms",
"for",
"name",
"."
] | python | test |
zyga/python-glibc | pyglibc/selectors.py | https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/pyglibc/selectors.py#L229-L238 | def get_epoll_events(self):
"""
Create a bit mask using ``EPOLL*`` family of constants.
"""
epoll_events = 0
if self & EVENT_READ:
epoll_events |= select.EPOLLIN
if self & EVENT_WRITE:
epoll_events |= select.EPOLLOUT
return epoll_events | [
"def",
"get_epoll_events",
"(",
"self",
")",
":",
"epoll_events",
"=",
"0",
"if",
"self",
"&",
"EVENT_READ",
":",
"epoll_events",
"|=",
"select",
".",
"EPOLLIN",
"if",
"self",
"&",
"EVENT_WRITE",
":",
"epoll_events",
"|=",
"select",
".",
"EPOLLOUT",
"return",
"epoll_events"
] | Create a bit mask using ``EPOLL*`` family of constants. | [
"Create",
"a",
"bit",
"mask",
"using",
"EPOLL",
"*",
"family",
"of",
"constants",
"."
] | python | train |
bionikspoon/pureyaml | pureyaml/grammar/productions.py | https://github.com/bionikspoon/pureyaml/blob/784830b907ca14525c4cecdb6ae35306f6f8a877/pureyaml/grammar/productions.py#L275-L287 | def p_scalar_group(self, p):
"""
scalar_group : SCALAR
| scalar_group SCALAR
"""
if len(p) == 2:
p[0] = (str(p[1]),)
if len(p) == 3:
p[0] = p[1] + (str(p[2]),)
if len(p) == 4:
p[0] = p[1] + (str(p[3]),) | [
"def",
"p_scalar_group",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"str",
"(",
"p",
"[",
"1",
"]",
")",
",",
")",
"if",
"len",
"(",
"p",
")",
"==",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"+",
"(",
"str",
"(",
"p",
"[",
"2",
"]",
")",
",",
")",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"+",
"(",
"str",
"(",
"p",
"[",
"3",
"]",
")",
",",
")"
] | scalar_group : SCALAR
| scalar_group SCALAR | [
"scalar_group",
":",
"SCALAR",
"|",
"scalar_group",
"SCALAR"
] | python | train |
postlund/pyatv | pyatv/__main__.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__main__.py#L242-L319 | async def cli_handler(loop):
"""Application starts here."""
parser = argparse.ArgumentParser()
parser.add_argument('command', nargs='+',
help='commands, help, ...')
parser.add_argument('--name', help='apple tv name',
dest='name', default='Apple TV')
parser.add_argument('--address', help='device ip address or hostname',
dest='address', default=None)
parser.add_argument('--protocol', action=TransformProtocol,
help='protocol to use (values: dmap, mrp)',
dest='protocol', default=None)
parser.add_argument('--port', help='port when connecting',
dest='port', type=_in_range(0, 65535),
default=0)
parser.add_argument('-t', '--scan-timeout', help='timeout when scanning',
dest='scan_timeout', type=_in_range(1, 100),
metavar='TIMEOUT', default=3)
parser.add_argument('--version', action='version',
help='version of atvremote and pyatv',
version='%(prog)s {0}'.format(const.__version__))
pairing = parser.add_argument_group('pairing')
pairing.add_argument('--remote-name', help='remote pairing name',
dest='remote_name', default='pyatv')
pairing.add_argument('-p', '--pin', help='pairing pin code',
dest='pin_code', metavar='PIN', default=1234,
type=_in_range(0, 9999, allow_none=True))
pairing.add_argument('--pairing-guid',
help='pairing guid (16 chars hex)',
dest='pairing_guid', default=None)
parser.add_argument('-a', '--autodiscover', action='store_true',
help='automatically find a device',
dest='autodiscover', default=False)
parser.add_argument('--device_credentials', help='credentials to device',
dest='device_credentials', default=None)
airplay = parser.add_argument_group('airplay')
airplay.add_argument('--airplay_credentials',
help='credentials for airplay',
dest='airplay_credentials', default=None)
debug = parser.add_argument_group('debugging')
debug.add_argument('-v', '--verbose', help='increase output verbosity',
action='store_true', dest='verbose')
debug.add_argument('--debug', help='print debug information',
action='store_true', dest='debug')
args = parser.parse_args()
loglevel = logging.WARNING
if args.verbose:
loglevel = logging.INFO
if args.debug:
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel,
format='%(levelname)s: %(message)s')
logging.getLogger('requests').setLevel(logging.WARNING)
cmds = retrieve_commands(GlobalCommands)
if args.command[0] in cmds:
glob_cmds = GlobalCommands(args, loop)
return (await _exec_command(
glob_cmds, args.command[0], print_result=False))
if args.autodiscover:
if not await _autodiscover_device(args, loop):
return 1
return await _handle_commands(args, loop)
if args.address:
return await _handle_commands(args, loop)
logging.error('To autodiscover an Apple TV, add -a')
return 1 | [
"async",
"def",
"cli_handler",
"(",
"loop",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'command'",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'commands, help, ...'",
")",
"parser",
".",
"add_argument",
"(",
"'--name'",
",",
"help",
"=",
"'apple tv name'",
",",
"dest",
"=",
"'name'",
",",
"default",
"=",
"'Apple TV'",
")",
"parser",
".",
"add_argument",
"(",
"'--address'",
",",
"help",
"=",
"'device ip address or hostname'",
",",
"dest",
"=",
"'address'",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--protocol'",
",",
"action",
"=",
"TransformProtocol",
",",
"help",
"=",
"'protocol to use (values: dmap, mrp)'",
",",
"dest",
"=",
"'protocol'",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--port'",
",",
"help",
"=",
"'port when connecting'",
",",
"dest",
"=",
"'port'",
",",
"type",
"=",
"_in_range",
"(",
"0",
",",
"65535",
")",
",",
"default",
"=",
"0",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--scan-timeout'",
",",
"help",
"=",
"'timeout when scanning'",
",",
"dest",
"=",
"'scan_timeout'",
",",
"type",
"=",
"_in_range",
"(",
"1",
",",
"100",
")",
",",
"metavar",
"=",
"'TIMEOUT'",
",",
"default",
"=",
"3",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"help",
"=",
"'version of atvremote and pyatv'",
",",
"version",
"=",
"'%(prog)s {0}'",
".",
"format",
"(",
"const",
".",
"__version__",
")",
")",
"pairing",
"=",
"parser",
".",
"add_argument_group",
"(",
"'pairing'",
")",
"pairing",
".",
"add_argument",
"(",
"'--remote-name'",
",",
"help",
"=",
"'remote pairing name'",
",",
"dest",
"=",
"'remote_name'",
",",
"default",
"=",
"'pyatv'",
")",
"pairing",
".",
"add_argument",
"(",
"'-p'",
",",
"'--pin'",
",",
"help",
"=",
"'pairing pin code'",
",",
"dest",
"=",
"'pin_code'",
",",
"metavar",
"=",
"'PIN'",
",",
"default",
"=",
"1234",
",",
"type",
"=",
"_in_range",
"(",
"0",
",",
"9999",
",",
"allow_none",
"=",
"True",
")",
")",
"pairing",
".",
"add_argument",
"(",
"'--pairing-guid'",
",",
"help",
"=",
"'pairing guid (16 chars hex)'",
",",
"dest",
"=",
"'pairing_guid'",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'-a'",
",",
"'--autodiscover'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'automatically find a device'",
",",
"dest",
"=",
"'autodiscover'",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'--device_credentials'",
",",
"help",
"=",
"'credentials to device'",
",",
"dest",
"=",
"'device_credentials'",
",",
"default",
"=",
"None",
")",
"airplay",
"=",
"parser",
".",
"add_argument_group",
"(",
"'airplay'",
")",
"airplay",
".",
"add_argument",
"(",
"'--airplay_credentials'",
",",
"help",
"=",
"'credentials for airplay'",
",",
"dest",
"=",
"'airplay_credentials'",
",",
"default",
"=",
"None",
")",
"debug",
"=",
"parser",
".",
"add_argument_group",
"(",
"'debugging'",
")",
"debug",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"help",
"=",
"'increase output verbosity'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verbose'",
")",
"debug",
".",
"add_argument",
"(",
"'--debug'",
",",
"help",
"=",
"'print debug information'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'debug'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"loglevel",
"=",
"logging",
".",
"WARNING",
"if",
"args",
".",
"verbose",
":",
"loglevel",
"=",
"logging",
".",
"INFO",
"if",
"args",
".",
"debug",
":",
"loglevel",
"=",
"logging",
".",
"DEBUG",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"loglevel",
",",
"format",
"=",
"'%(levelname)s: %(message)s'",
")",
"logging",
".",
"getLogger",
"(",
"'requests'",
")",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"cmds",
"=",
"retrieve_commands",
"(",
"GlobalCommands",
")",
"if",
"args",
".",
"command",
"[",
"0",
"]",
"in",
"cmds",
":",
"glob_cmds",
"=",
"GlobalCommands",
"(",
"args",
",",
"loop",
")",
"return",
"(",
"await",
"_exec_command",
"(",
"glob_cmds",
",",
"args",
".",
"command",
"[",
"0",
"]",
",",
"print_result",
"=",
"False",
")",
")",
"if",
"args",
".",
"autodiscover",
":",
"if",
"not",
"await",
"_autodiscover_device",
"(",
"args",
",",
"loop",
")",
":",
"return",
"1",
"return",
"await",
"_handle_commands",
"(",
"args",
",",
"loop",
")",
"if",
"args",
".",
"address",
":",
"return",
"await",
"_handle_commands",
"(",
"args",
",",
"loop",
")",
"logging",
".",
"error",
"(",
"'To autodiscover an Apple TV, add -a'",
")",
"return",
"1"
] | Application starts here. | [
"Application",
"starts",
"here",
"."
] | python | train |
PmagPy/PmagPy | programs/magic_gui.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui.py#L446-L460 | def highlight_button(self, event):
"""
Draw a red highlight line around the event object
"""
wind = event.GetEventObject()
pos = wind.GetPosition()
size = wind.GetSize()
try:
dc = wx.PaintDC(self)
except wx._core.PyAssertionError:
# if it's not a native paint event, we can't us wx.PaintDC
dc = wx.ClientDC(self)
dc.SetPen(wx.Pen('red', 5, wx.SOLID))
dc.DrawRectangle(pos[0], pos[1], size[0], size[1])
event.Skip() | [
"def",
"highlight_button",
"(",
"self",
",",
"event",
")",
":",
"wind",
"=",
"event",
".",
"GetEventObject",
"(",
")",
"pos",
"=",
"wind",
".",
"GetPosition",
"(",
")",
"size",
"=",
"wind",
".",
"GetSize",
"(",
")",
"try",
":",
"dc",
"=",
"wx",
".",
"PaintDC",
"(",
"self",
")",
"except",
"wx",
".",
"_core",
".",
"PyAssertionError",
":",
"# if it's not a native paint event, we can't us wx.PaintDC",
"dc",
"=",
"wx",
".",
"ClientDC",
"(",
"self",
")",
"dc",
".",
"SetPen",
"(",
"wx",
".",
"Pen",
"(",
"'red'",
",",
"5",
",",
"wx",
".",
"SOLID",
")",
")",
"dc",
".",
"DrawRectangle",
"(",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
",",
"size",
"[",
"0",
"]",
",",
"size",
"[",
"1",
"]",
")",
"event",
".",
"Skip",
"(",
")"
] | Draw a red highlight line around the event object | [
"Draw",
"a",
"red",
"highlight",
"line",
"around",
"the",
"event",
"object"
] | python | train |
oscarbranson/latools | latools/latools.py | https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3506-L3594 | def sample_stats(self, analytes=None, filt=True,
stats=['mean', 'std'],
eachtrace=True, csf_dict={}):
"""
Calculate sample statistics.
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Included stat functions:
* :func:`~latools.stat_fns.mean`: arithmetic mean
* :func:`~latools.stat_fns.std`: arithmetic standard deviation
* :func:`~latools.stat_fns.se`: arithmetic standard error
* :func:`~latools.stat_fns.H15_mean`: Huber mean (outlier removal)
* :func:`~latools.stat_fns.H15_std`: Huber standard deviation (outlier removal)
* :func:`~latools.stat_fns.H15_se`: Huber standard error (outlier removal)
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to calculate statistics for. Defaults to
all analytes.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
stats : array_like
take a single array_like input, and return a single statistic.
list of functions or names (see above) or functions that
Function should be able to cope with NaN values.
eachtrace : bool
Whether to calculate the statistics for each analysis
spot individually, or to produce per - sample means.
Default is True.
Returns
-------
None
Adds dict to analyse object containing samples, analytes and
functions and data.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
self.stats = Bunch()
self.stats_calced = []
stat_fns = Bunch()
stat_dict = {'mean': np.nanmean,
'std': np.nanstd,
'nanmean': np.nanmean,
'nanstd': np.nanstd,
'se': stderr,
'H15_mean': H15_mean,
'H15_std': H15_std,
'H15_se': H15_se}
for s in stats:
if isinstance(s, str):
if s in stat_dict.keys():
self.stats_calced.append(s)
stat_fns[s] = stat_dict[s]
if s in csf_dict.keys():
self.stats_calced.append(s)
exec(csf_dict[s])
stat_fns[s] = eval(s)
elif callable(s):
self.stats_calced.append(s.__name__)
stat_fns[s.__name__] = s
if not hasattr(self, 'custom_stat_functions'):
self.custom_stat_functions = ''
self.custom_stat_functions += inspect.getsource(s) + '\n\n\n\n'
# calculate stats for each sample
with self.pbar.set(total=len(self.samples), desc='Calculating Stats') as prog:
for s in self.samples:
if self.srm_identifier not in s:
self.data[s].sample_stats(analytes, filt=filt,
stat_fns=stat_fns,
eachtrace=eachtrace)
self.stats[s] = self.data[s].stats
prog.update() | [
"def",
"sample_stats",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"filt",
"=",
"True",
",",
"stats",
"=",
"[",
"'mean'",
",",
"'std'",
"]",
",",
"eachtrace",
"=",
"True",
",",
"csf_dict",
"=",
"{",
"}",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"self",
".",
"stats",
"=",
"Bunch",
"(",
")",
"self",
".",
"stats_calced",
"=",
"[",
"]",
"stat_fns",
"=",
"Bunch",
"(",
")",
"stat_dict",
"=",
"{",
"'mean'",
":",
"np",
".",
"nanmean",
",",
"'std'",
":",
"np",
".",
"nanstd",
",",
"'nanmean'",
":",
"np",
".",
"nanmean",
",",
"'nanstd'",
":",
"np",
".",
"nanstd",
",",
"'se'",
":",
"stderr",
",",
"'H15_mean'",
":",
"H15_mean",
",",
"'H15_std'",
":",
"H15_std",
",",
"'H15_se'",
":",
"H15_se",
"}",
"for",
"s",
"in",
"stats",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"if",
"s",
"in",
"stat_dict",
".",
"keys",
"(",
")",
":",
"self",
".",
"stats_calced",
".",
"append",
"(",
"s",
")",
"stat_fns",
"[",
"s",
"]",
"=",
"stat_dict",
"[",
"s",
"]",
"if",
"s",
"in",
"csf_dict",
".",
"keys",
"(",
")",
":",
"self",
".",
"stats_calced",
".",
"append",
"(",
"s",
")",
"exec",
"(",
"csf_dict",
"[",
"s",
"]",
")",
"stat_fns",
"[",
"s",
"]",
"=",
"eval",
"(",
"s",
")",
"elif",
"callable",
"(",
"s",
")",
":",
"self",
".",
"stats_calced",
".",
"append",
"(",
"s",
".",
"__name__",
")",
"stat_fns",
"[",
"s",
".",
"__name__",
"]",
"=",
"s",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'custom_stat_functions'",
")",
":",
"self",
".",
"custom_stat_functions",
"=",
"''",
"self",
".",
"custom_stat_functions",
"+=",
"inspect",
".",
"getsource",
"(",
"s",
")",
"+",
"'\\n\\n\\n\\n'",
"# calculate stats for each sample",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"self",
".",
"samples",
")",
",",
"desc",
"=",
"'Calculating Stats'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"self",
".",
"samples",
":",
"if",
"self",
".",
"srm_identifier",
"not",
"in",
"s",
":",
"self",
".",
"data",
"[",
"s",
"]",
".",
"sample_stats",
"(",
"analytes",
",",
"filt",
"=",
"filt",
",",
"stat_fns",
"=",
"stat_fns",
",",
"eachtrace",
"=",
"eachtrace",
")",
"self",
".",
"stats",
"[",
"s",
"]",
"=",
"self",
".",
"data",
"[",
"s",
"]",
".",
"stats",
"prog",
".",
"update",
"(",
")"
] | Calculate sample statistics.
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Included stat functions:
* :func:`~latools.stat_fns.mean`: arithmetic mean
* :func:`~latools.stat_fns.std`: arithmetic standard deviation
* :func:`~latools.stat_fns.se`: arithmetic standard error
* :func:`~latools.stat_fns.H15_mean`: Huber mean (outlier removal)
* :func:`~latools.stat_fns.H15_std`: Huber standard deviation (outlier removal)
* :func:`~latools.stat_fns.H15_se`: Huber standard error (outlier removal)
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to calculate statistics for. Defaults to
all analytes.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
stats : array_like
take a single array_like input, and return a single statistic.
list of functions or names (see above) or functions that
Function should be able to cope with NaN values.
eachtrace : bool
Whether to calculate the statistics for each analysis
spot individually, or to produce per - sample means.
Default is True.
Returns
-------
None
Adds dict to analyse object containing samples, analytes and
functions and data. | [
"Calculate",
"sample",
"statistics",
"."
] | python | test |
ouroboroscoding/format-oc-python | FormatOC/__init__.py | https://github.com/ouroboroscoding/format-oc-python/blob/c160b46fe4ff2c92333c776991c712de23991225/FormatOC/__init__.py#L1200-L1378 | def minmax(self, minimum=None, maximum=None):
"""Min/Max
Sets or gets the minimum and/or maximum values for the Node. For
getting, returns {"minimum":mixed,"maximum":mixed}
Arguments:
minimum {mixed} -- The minimum value
maximum {mixed} -- The maximum value
Raises:
TypeError, ValueError
Returns:
None | dict
"""
# If neither min or max is set, this is a getter
if minimum is None and maximum is None:
return {"minimum": self._minimum, "maximum": self._maximum};
# If the minimum is set
if minimum != None:
# If the current type is a date, datetime, ip, or time
if self._type in ['base64', 'date', 'datetime', 'ip', 'time']:
# Make sure the value is valid for the type
if not isinstance(minimum, basestring) \
or not _typeToRegex[self._type].match(minimum):
raise ValueError('__minimum__')
# Else if the type is an int (unsigned, timestamp), or a string in
# which the min/max are lengths
elif self._type in ['int', 'string', 'timestamp', 'uint']:
# If the value is not a valid int or long
if not isinstance(minimum, (int, long)):
# If it's a valid representation of an integer
if isinstance(minimum, basestring) \
and _typeToRegex['int'].match(minimum):
# Convert it
minimum = int(minimum, 0)
# Else, raise an error
else:
raise ValueError('__minimum__')
# If the type is meant to be unsigned
if self._type in ['base64', 'string', 'timestamp', 'uint']:
# And it's below zero
if minimum < 0:
raise ValueError('__minimum__')
# Else if the type is decimal
elif self._type == 'decimal':
# Store it if it's valid, else throw a ValueError
try:
minimum = Decimal(minimum)
except ValueError:
raise ValueError('__minimum__')
# Else if the type is float
elif self._type == 'float':
# Store it if it's valid, else throw a ValueError
try:
minimum = float(minimum)
except ValueError:
raise ValueError('__minimum__')
# Else if the type is price
elif self._type == 'price':
# If it's not a valid representation of a price
if not isinstance(minimum, basestring) or not _typeToRegex['price'].match(minimum):
raise ValueError('__minimum__')
# Store it as a Decimal
minimum = Decimal(minimum)
# Else we can't have a minimum
else:
raise TypeError('can not set __minimum__ for ' + self._type)
# Store the minimum
self._minimum = minimum
# If the maximum is set
if maximum != None:
# If the current type is a date, datetime, ip, or time
if self._type in ['date', 'datetime', 'ip', 'time']:
# Make sure the value is valid for the type
if not isinstance(maximum, basestring) \
or not _typeToRegex[self._type].match(maximum):
raise ValueError('__maximum__')
# Else if the type is an int (unsigned, timestamp), or a string in
# which the min/max are lengths
elif self._type in ['int', 'string', 'timestamp', 'uint']:
# If the value is not a valid int or long
if not isinstance(maximum, (int, long)):
# If it's a valid representation of an integer
if isinstance(maximum, basestring) \
and _typeToRegex['int'].match(maximum):
# Convert it
maximum = int(maximum, 0)
# Else, raise an error
else:
raise ValueError('__minimum__')
# If the type is meant to be unsigned
if self._type in ['string', 'timestamp', 'uint']:
# And it's below zero
if maximum < 0:
raise ValueError('__maximum__')
# Else if the type is decimal
elif self._type == 'decimal':
# Store it if it's valid, else throw a ValueError
try:
maximum = Decimal(maximum)
except ValueError:
raise ValueError('__maximum__')
# Else if the type is float
elif self._type == 'float':
# Store it if it's valid, else throw a ValueError
try:
minimum = float(minimum)
except ValueError:
raise ValueError('__maximum__')
# Else if the type is price
elif self._type == 'price':
# If it's not a valid representation of a price
if not isinstance(maximum, basestring) or not _typeToRegex['price'].match(maximum):
raise ValueError('__maximum__')
# Store it as a Decimal
maximum = Decimal(maximum)
# Else we can't have a maximum
else:
raise TypeError('can not set __maximum__ for ' + self._type)
# If we also have a minimum
if self._minimum is not None:
# If the type is an IP
if self._type == 'ip':
# If the min is above the max, we have a problem
if self.__compare_ips(self._minimum, maximum) == 1:
raise ValueError('__maximum__')
# Else any other data type
else:
# If the min is above the max, we have a problem
if self._minimum > maximum:
raise ValueError('__maximum__')
# Store the maximum
self._maximum = maximum | [
"def",
"minmax",
"(",
"self",
",",
"minimum",
"=",
"None",
",",
"maximum",
"=",
"None",
")",
":",
"# If neither min or max is set, this is a getter",
"if",
"minimum",
"is",
"None",
"and",
"maximum",
"is",
"None",
":",
"return",
"{",
"\"minimum\"",
":",
"self",
".",
"_minimum",
",",
"\"maximum\"",
":",
"self",
".",
"_maximum",
"}",
"# If the minimum is set",
"if",
"minimum",
"!=",
"None",
":",
"# If the current type is a date, datetime, ip, or time",
"if",
"self",
".",
"_type",
"in",
"[",
"'base64'",
",",
"'date'",
",",
"'datetime'",
",",
"'ip'",
",",
"'time'",
"]",
":",
"# Make sure the value is valid for the type",
"if",
"not",
"isinstance",
"(",
"minimum",
",",
"basestring",
")",
"or",
"not",
"_typeToRegex",
"[",
"self",
".",
"_type",
"]",
".",
"match",
"(",
"minimum",
")",
":",
"raise",
"ValueError",
"(",
"'__minimum__'",
")",
"# Else if the type is an int (unsigned, timestamp), or a string in",
"# \twhich the min/max are lengths",
"elif",
"self",
".",
"_type",
"in",
"[",
"'int'",
",",
"'string'",
",",
"'timestamp'",
",",
"'uint'",
"]",
":",
"# If the value is not a valid int or long",
"if",
"not",
"isinstance",
"(",
"minimum",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"# If it's a valid representation of an integer",
"if",
"isinstance",
"(",
"minimum",
",",
"basestring",
")",
"and",
"_typeToRegex",
"[",
"'int'",
"]",
".",
"match",
"(",
"minimum",
")",
":",
"# Convert it",
"minimum",
"=",
"int",
"(",
"minimum",
",",
"0",
")",
"# Else, raise an error",
"else",
":",
"raise",
"ValueError",
"(",
"'__minimum__'",
")",
"# If the type is meant to be unsigned",
"if",
"self",
".",
"_type",
"in",
"[",
"'base64'",
",",
"'string'",
",",
"'timestamp'",
",",
"'uint'",
"]",
":",
"# And it's below zero",
"if",
"minimum",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'__minimum__'",
")",
"# Else if the type is decimal",
"elif",
"self",
".",
"_type",
"==",
"'decimal'",
":",
"# Store it if it's valid, else throw a ValueError",
"try",
":",
"minimum",
"=",
"Decimal",
"(",
"minimum",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'__minimum__'",
")",
"# Else if the type is float",
"elif",
"self",
".",
"_type",
"==",
"'float'",
":",
"# Store it if it's valid, else throw a ValueError",
"try",
":",
"minimum",
"=",
"float",
"(",
"minimum",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'__minimum__'",
")",
"# Else if the type is price",
"elif",
"self",
".",
"_type",
"==",
"'price'",
":",
"# If it's not a valid representation of a price",
"if",
"not",
"isinstance",
"(",
"minimum",
",",
"basestring",
")",
"or",
"not",
"_typeToRegex",
"[",
"'price'",
"]",
".",
"match",
"(",
"minimum",
")",
":",
"raise",
"ValueError",
"(",
"'__minimum__'",
")",
"# Store it as a Decimal",
"minimum",
"=",
"Decimal",
"(",
"minimum",
")",
"# Else we can't have a minimum",
"else",
":",
"raise",
"TypeError",
"(",
"'can not set __minimum__ for '",
"+",
"self",
".",
"_type",
")",
"# Store the minimum",
"self",
".",
"_minimum",
"=",
"minimum",
"# If the maximum is set",
"if",
"maximum",
"!=",
"None",
":",
"# If the current type is a date, datetime, ip, or time",
"if",
"self",
".",
"_type",
"in",
"[",
"'date'",
",",
"'datetime'",
",",
"'ip'",
",",
"'time'",
"]",
":",
"# Make sure the value is valid for the type",
"if",
"not",
"isinstance",
"(",
"maximum",
",",
"basestring",
")",
"or",
"not",
"_typeToRegex",
"[",
"self",
".",
"_type",
"]",
".",
"match",
"(",
"maximum",
")",
":",
"raise",
"ValueError",
"(",
"'__maximum__'",
")",
"# Else if the type is an int (unsigned, timestamp), or a string in",
"# \twhich the min/max are lengths",
"elif",
"self",
".",
"_type",
"in",
"[",
"'int'",
",",
"'string'",
",",
"'timestamp'",
",",
"'uint'",
"]",
":",
"# If the value is not a valid int or long",
"if",
"not",
"isinstance",
"(",
"maximum",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"# If it's a valid representation of an integer",
"if",
"isinstance",
"(",
"maximum",
",",
"basestring",
")",
"and",
"_typeToRegex",
"[",
"'int'",
"]",
".",
"match",
"(",
"maximum",
")",
":",
"# Convert it",
"maximum",
"=",
"int",
"(",
"maximum",
",",
"0",
")",
"# Else, raise an error",
"else",
":",
"raise",
"ValueError",
"(",
"'__minimum__'",
")",
"# If the type is meant to be unsigned",
"if",
"self",
".",
"_type",
"in",
"[",
"'string'",
",",
"'timestamp'",
",",
"'uint'",
"]",
":",
"# And it's below zero",
"if",
"maximum",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'__maximum__'",
")",
"# Else if the type is decimal",
"elif",
"self",
".",
"_type",
"==",
"'decimal'",
":",
"# Store it if it's valid, else throw a ValueError",
"try",
":",
"maximum",
"=",
"Decimal",
"(",
"maximum",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'__maximum__'",
")",
"# Else if the type is float",
"elif",
"self",
".",
"_type",
"==",
"'float'",
":",
"# Store it if it's valid, else throw a ValueError",
"try",
":",
"minimum",
"=",
"float",
"(",
"minimum",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'__maximum__'",
")",
"# Else if the type is price",
"elif",
"self",
".",
"_type",
"==",
"'price'",
":",
"# If it's not a valid representation of a price",
"if",
"not",
"isinstance",
"(",
"maximum",
",",
"basestring",
")",
"or",
"not",
"_typeToRegex",
"[",
"'price'",
"]",
".",
"match",
"(",
"maximum",
")",
":",
"raise",
"ValueError",
"(",
"'__maximum__'",
")",
"# Store it as a Decimal",
"maximum",
"=",
"Decimal",
"(",
"maximum",
")",
"# Else we can't have a maximum",
"else",
":",
"raise",
"TypeError",
"(",
"'can not set __maximum__ for '",
"+",
"self",
".",
"_type",
")",
"# If we also have a minimum",
"if",
"self",
".",
"_minimum",
"is",
"not",
"None",
":",
"# If the type is an IP",
"if",
"self",
".",
"_type",
"==",
"'ip'",
":",
"# If the min is above the max, we have a problem",
"if",
"self",
".",
"__compare_ips",
"(",
"self",
".",
"_minimum",
",",
"maximum",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"'__maximum__'",
")",
"# Else any other data type",
"else",
":",
"# If the min is above the max, we have a problem",
"if",
"self",
".",
"_minimum",
">",
"maximum",
":",
"raise",
"ValueError",
"(",
"'__maximum__'",
")",
"# Store the maximum",
"self",
".",
"_maximum",
"=",
"maximum"
] | Min/Max
Sets or gets the minimum and/or maximum values for the Node. For
getting, returns {"minimum":mixed,"maximum":mixed}
Arguments:
minimum {mixed} -- The minimum value
maximum {mixed} -- The maximum value
Raises:
TypeError, ValueError
Returns:
None | dict | [
"Min",
"/",
"Max"
] | python | train |
rflamary/POT | ot/bregman.py | https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/bregman.py#L796-L965 | def sinkhorn_epsilon_scaling(a, b, M, reg, numItermax=100, epsilon0=1e4, numInnerItermax=100,
tau=1e3, stopThr=1e-9, warmstart=None, verbose=False, print_period=10, log=False, **kwargs):
"""
Solve the entropic regularization optimal transport problem with log
stabilization and epsilon scaling.
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
scaling algorithm as proposed in [2]_ but with the log stabilization
proposed in [10]_ and the log scaling proposed in [9]_ algorithm 3.2
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,)
samples in the target domain
M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term >0
tau : float
thershold for max value in u or v for log scaling
tau : float
thershold for max value in u or v for log scaling
warmstart : tible of vectors
if given then sarting values for alpha an beta log scalings
numItermax : int, optional
Max number of iterations
numInnerItermax : int, optional
Max number of iterationsin the inner slog stabilized sinkhorn
epsilon0 : int, optional
first epsilon regularization value (then exponential decrease to reg)
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> import ot
>>> a=[.5,.5]
>>> b=[.5,.5]
>>> M=[[0.,1.],[1.,0.]]
>>> ot.bregman.sinkhorn_epsilon_scaling(a,b,M,1)
array([[ 0.36552929, 0.13447071],
[ 0.13447071, 0.36552929]])
References
----------
.. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
.. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519.
See Also
--------
ot.lp.emd : Unregularized OT
ot.optim.cg : General regularized OT
"""
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64)
if len(a) == 0:
a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0]
if len(b) == 0:
b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1]
# init data
na = len(a)
nb = len(b)
# nrelative umerical precision with 64 bits
numItermin = 35
numItermax = max(numItermin, numItermax) # ensure that last velue is exact
cpt = 0
if log:
log = {'err': []}
# we assume that no distances are null except those of the diagonal of
# distances
if warmstart is None:
alpha, beta = np.zeros(na), np.zeros(nb)
else:
alpha, beta = warmstart
def get_K(alpha, beta):
"""log space computation"""
return np.exp(-(M - alpha.reshape((na, 1))
- beta.reshape((1, nb))) / reg)
# print(np.min(K))
def get_reg(n): # exponential decreasing
return (epsilon0 - reg) * np.exp(-n) + reg
loop = 1
cpt = 0
err = 1
while loop:
regi = get_reg(cpt)
G, logi = sinkhorn_stabilized(a, b, M, regi, numItermax=numInnerItermax, stopThr=1e-9, warmstart=(
alpha, beta), verbose=False, print_period=20, tau=tau, log=True)
alpha = logi['alpha']
beta = logi['beta']
if cpt >= numItermax:
loop = False
if cpt % (print_period) == 0: # spsion nearly converged
# we can speed up the process by checking for the error only all
# the 10th iterations
transp = G
err = np.linalg.norm(
(np.sum(transp, axis=0) - b))**2 + np.linalg.norm((np.sum(transp, axis=1) - a))**2
if log:
log['err'].append(err)
if verbose:
if cpt % (print_period * 10) == 0:
print(
'{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
if err <= stopThr and cpt > numItermin:
loop = False
cpt = cpt + 1
# print('err=',err,' cpt=',cpt)
if log:
log['alpha'] = alpha
log['beta'] = beta
log['warmstart'] = (log['alpha'], log['beta'])
return G, log
else:
return G | [
"def",
"sinkhorn_epsilon_scaling",
"(",
"a",
",",
"b",
",",
"M",
",",
"reg",
",",
"numItermax",
"=",
"100",
",",
"epsilon0",
"=",
"1e4",
",",
"numInnerItermax",
"=",
"100",
",",
"tau",
"=",
"1e3",
",",
"stopThr",
"=",
"1e-9",
",",
"warmstart",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"print_period",
"=",
"10",
",",
"log",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"a",
"=",
"np",
".",
"asarray",
"(",
"a",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"b",
"=",
"np",
".",
"asarray",
"(",
"b",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"M",
"=",
"np",
".",
"asarray",
"(",
"M",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"len",
"(",
"a",
")",
"==",
"0",
":",
"a",
"=",
"np",
".",
"ones",
"(",
"(",
"M",
".",
"shape",
"[",
"0",
"]",
",",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"/",
"M",
".",
"shape",
"[",
"0",
"]",
"if",
"len",
"(",
"b",
")",
"==",
"0",
":",
"b",
"=",
"np",
".",
"ones",
"(",
"(",
"M",
".",
"shape",
"[",
"1",
"]",
",",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"/",
"M",
".",
"shape",
"[",
"1",
"]",
"# init data",
"na",
"=",
"len",
"(",
"a",
")",
"nb",
"=",
"len",
"(",
"b",
")",
"# nrelative umerical precision with 64 bits",
"numItermin",
"=",
"35",
"numItermax",
"=",
"max",
"(",
"numItermin",
",",
"numItermax",
")",
"# ensure that last velue is exact",
"cpt",
"=",
"0",
"if",
"log",
":",
"log",
"=",
"{",
"'err'",
":",
"[",
"]",
"}",
"# we assume that no distances are null except those of the diagonal of",
"# distances",
"if",
"warmstart",
"is",
"None",
":",
"alpha",
",",
"beta",
"=",
"np",
".",
"zeros",
"(",
"na",
")",
",",
"np",
".",
"zeros",
"(",
"nb",
")",
"else",
":",
"alpha",
",",
"beta",
"=",
"warmstart",
"def",
"get_K",
"(",
"alpha",
",",
"beta",
")",
":",
"\"\"\"log space computation\"\"\"",
"return",
"np",
".",
"exp",
"(",
"-",
"(",
"M",
"-",
"alpha",
".",
"reshape",
"(",
"(",
"na",
",",
"1",
")",
")",
"-",
"beta",
".",
"reshape",
"(",
"(",
"1",
",",
"nb",
")",
")",
")",
"/",
"reg",
")",
"# print(np.min(K))",
"def",
"get_reg",
"(",
"n",
")",
":",
"# exponential decreasing",
"return",
"(",
"epsilon0",
"-",
"reg",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"n",
")",
"+",
"reg",
"loop",
"=",
"1",
"cpt",
"=",
"0",
"err",
"=",
"1",
"while",
"loop",
":",
"regi",
"=",
"get_reg",
"(",
"cpt",
")",
"G",
",",
"logi",
"=",
"sinkhorn_stabilized",
"(",
"a",
",",
"b",
",",
"M",
",",
"regi",
",",
"numItermax",
"=",
"numInnerItermax",
",",
"stopThr",
"=",
"1e-9",
",",
"warmstart",
"=",
"(",
"alpha",
",",
"beta",
")",
",",
"verbose",
"=",
"False",
",",
"print_period",
"=",
"20",
",",
"tau",
"=",
"tau",
",",
"log",
"=",
"True",
")",
"alpha",
"=",
"logi",
"[",
"'alpha'",
"]",
"beta",
"=",
"logi",
"[",
"'beta'",
"]",
"if",
"cpt",
">=",
"numItermax",
":",
"loop",
"=",
"False",
"if",
"cpt",
"%",
"(",
"print_period",
")",
"==",
"0",
":",
"# spsion nearly converged",
"# we can speed up the process by checking for the error only all",
"# the 10th iterations",
"transp",
"=",
"G",
"err",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"(",
"np",
".",
"sum",
"(",
"transp",
",",
"axis",
"=",
"0",
")",
"-",
"b",
")",
")",
"**",
"2",
"+",
"np",
".",
"linalg",
".",
"norm",
"(",
"(",
"np",
".",
"sum",
"(",
"transp",
",",
"axis",
"=",
"1",
")",
"-",
"a",
")",
")",
"**",
"2",
"if",
"log",
":",
"log",
"[",
"'err'",
"]",
".",
"append",
"(",
"err",
")",
"if",
"verbose",
":",
"if",
"cpt",
"%",
"(",
"print_period",
"*",
"10",
")",
"==",
"0",
":",
"print",
"(",
"'{:5s}|{:12s}'",
".",
"format",
"(",
"'It.'",
",",
"'Err'",
")",
"+",
"'\\n'",
"+",
"'-'",
"*",
"19",
")",
"print",
"(",
"'{:5d}|{:8e}|'",
".",
"format",
"(",
"cpt",
",",
"err",
")",
")",
"if",
"err",
"<=",
"stopThr",
"and",
"cpt",
">",
"numItermin",
":",
"loop",
"=",
"False",
"cpt",
"=",
"cpt",
"+",
"1",
"# print('err=',err,' cpt=',cpt)",
"if",
"log",
":",
"log",
"[",
"'alpha'",
"]",
"=",
"alpha",
"log",
"[",
"'beta'",
"]",
"=",
"beta",
"log",
"[",
"'warmstart'",
"]",
"=",
"(",
"log",
"[",
"'alpha'",
"]",
",",
"log",
"[",
"'beta'",
"]",
")",
"return",
"G",
",",
"log",
"else",
":",
"return",
"G"
] | Solve the entropic regularization optimal transport problem with log
stabilization and epsilon scaling.
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
scaling algorithm as proposed in [2]_ but with the log stabilization
proposed in [10]_ and the log scaling proposed in [9]_ algorithm 3.2
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,)
samples in the target domain
M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term >0
tau : float
thershold for max value in u or v for log scaling
tau : float
thershold for max value in u or v for log scaling
warmstart : tible of vectors
if given then sarting values for alpha an beta log scalings
numItermax : int, optional
Max number of iterations
numInnerItermax : int, optional
Max number of iterationsin the inner slog stabilized sinkhorn
epsilon0 : int, optional
first epsilon regularization value (then exponential decrease to reg)
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> import ot
>>> a=[.5,.5]
>>> b=[.5,.5]
>>> M=[[0.,1.],[1.,0.]]
>>> ot.bregman.sinkhorn_epsilon_scaling(a,b,M,1)
array([[ 0.36552929, 0.13447071],
[ 0.13447071, 0.36552929]])
References
----------
.. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
.. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519.
See Also
--------
ot.lp.emd : Unregularized OT
ot.optim.cg : General regularized OT | [
"Solve",
"the",
"entropic",
"regularization",
"optimal",
"transport",
"problem",
"with",
"log",
"stabilization",
"and",
"epsilon",
"scaling",
"."
] | python | train |
fuzeman/PyUPnP | pyupnp/util.py | https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/util.py#L24-L32 | def twisted_absolute_path(path, request):
"""Hack to fix twisted not accepting absolute URIs"""
parsed = urlparse.urlparse(request.uri)
if parsed.scheme != '':
path_parts = parsed.path.lstrip('/').split('/')
request.prepath = path_parts[0:1]
request.postpath = path_parts[1:]
path = request.prepath[0]
return path, request | [
"def",
"twisted_absolute_path",
"(",
"path",
",",
"request",
")",
":",
"parsed",
"=",
"urlparse",
".",
"urlparse",
"(",
"request",
".",
"uri",
")",
"if",
"parsed",
".",
"scheme",
"!=",
"''",
":",
"path_parts",
"=",
"parsed",
".",
"path",
".",
"lstrip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"request",
".",
"prepath",
"=",
"path_parts",
"[",
"0",
":",
"1",
"]",
"request",
".",
"postpath",
"=",
"path_parts",
"[",
"1",
":",
"]",
"path",
"=",
"request",
".",
"prepath",
"[",
"0",
"]",
"return",
"path",
",",
"request"
] | Hack to fix twisted not accepting absolute URIs | [
"Hack",
"to",
"fix",
"twisted",
"not",
"accepting",
"absolute",
"URIs"
] | python | train |
michael-lazar/rtv | rtv/packages/praw/objects.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/objects.py#L1281-L1292 | def lock(self):
"""Lock thread.
Requires that the currently authenticated user has the modposts oauth
scope or has user/password authentication as a mod of the subreddit.
:returns: The json response from the server.
"""
url = self.reddit_session.config['lock']
data = {'id': self.fullname}
return self.reddit_session.request_json(url, data=data) | [
"def",
"lock",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"reddit_session",
".",
"config",
"[",
"'lock'",
"]",
"data",
"=",
"{",
"'id'",
":",
"self",
".",
"fullname",
"}",
"return",
"self",
".",
"reddit_session",
".",
"request_json",
"(",
"url",
",",
"data",
"=",
"data",
")"
] | Lock thread.
Requires that the currently authenticated user has the modposts oauth
scope or has user/password authentication as a mod of the subreddit.
:returns: The json response from the server. | [
"Lock",
"thread",
"."
] | python | train |
PlaidWeb/Publ | publ/index.py | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/index.py#L169-L173 | def on_modified(self, event):
""" on_modified handler """
logger.debug("file modified: %s", event.src_path)
if not event.is_directory:
self.update_file(event.src_path) | [
"def",
"on_modified",
"(",
"self",
",",
"event",
")",
":",
"logger",
".",
"debug",
"(",
"\"file modified: %s\"",
",",
"event",
".",
"src_path",
")",
"if",
"not",
"event",
".",
"is_directory",
":",
"self",
".",
"update_file",
"(",
"event",
".",
"src_path",
")"
] | on_modified handler | [
"on_modified",
"handler"
] | python | train |
quantumlib/Cirq | cirq/circuits/text_diagram_drawer.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/text_diagram_drawer.py#L134-L142 | def horizontal_line(self,
y: Union[int, float],
x1: Union[int, float],
x2: Union[int, float],
emphasize: bool = False
) -> None:
"""Adds a line from (x1, y) to (x2, y)."""
x1, x2 = sorted([x1, x2])
self.horizontal_lines.append(_HorizontalLine(y, x1, x2, emphasize)) | [
"def",
"horizontal_line",
"(",
"self",
",",
"y",
":",
"Union",
"[",
"int",
",",
"float",
"]",
",",
"x1",
":",
"Union",
"[",
"int",
",",
"float",
"]",
",",
"x2",
":",
"Union",
"[",
"int",
",",
"float",
"]",
",",
"emphasize",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"x1",
",",
"x2",
"=",
"sorted",
"(",
"[",
"x1",
",",
"x2",
"]",
")",
"self",
".",
"horizontal_lines",
".",
"append",
"(",
"_HorizontalLine",
"(",
"y",
",",
"x1",
",",
"x2",
",",
"emphasize",
")",
")"
] | Adds a line from (x1, y) to (x2, y). | [
"Adds",
"a",
"line",
"from",
"(",
"x1",
"y",
")",
"to",
"(",
"x2",
"y",
")",
"."
] | python | train |
cltk/cltk | cltk/tokenize/word.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/tokenize/word.py#L433-L445 | def tokenize_middle_high_german_words(text):
"""Tokenizes MHG text"""
assert isinstance(text, str)
# As far as I know, hyphens were never used for compounds, so the tokenizer treats all hyphens as line-breaks
text = re.sub(r'-\n',r'-', text)
text = re.sub(r'\n', r' ', text)
text = re.sub(r'(?<=.)(?=[\.\";\,\:\[\]\(\)!&?])',r' ', text)
text = re.sub(r'(?<=[\.\";\,\:\[\]\(\)!&?])(?=.)',r' ', text)
text = re.sub(r'\s+',r' ', text)
text = str.split(text)
return text | [
"def",
"tokenize_middle_high_german_words",
"(",
"text",
")",
":",
"assert",
"isinstance",
"(",
"text",
",",
"str",
")",
"# As far as I know, hyphens were never used for compounds, so the tokenizer treats all hyphens as line-breaks",
"text",
"=",
"re",
".",
"sub",
"(",
"r'-\\n'",
",",
"r'-'",
",",
"text",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'\\n'",
",",
"r' '",
",",
"text",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(?<=.)(?=[\\.\\\";\\,\\:\\[\\]\\(\\)!&?])'",
",",
"r' '",
",",
"text",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(?<=[\\.\\\";\\,\\:\\[\\]\\(\\)!&?])(?=.)'",
",",
"r' '",
",",
"text",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"r' '",
",",
"text",
")",
"text",
"=",
"str",
".",
"split",
"(",
"text",
")",
"return",
"text"
] | Tokenizes MHG text | [
"Tokenizes",
"MHG",
"text"
] | python | train |
Becksteinlab/GromacsWrapper | gromacs/config.py | https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/config.py#L356-L361 | def resource_basename(resource):
"""Last component of a resource (which always uses '/' as sep)."""
if resource.endswith('/'):
resource = resource[:-1]
parts = resource.split('/')
return parts[-1] | [
"def",
"resource_basename",
"(",
"resource",
")",
":",
"if",
"resource",
".",
"endswith",
"(",
"'/'",
")",
":",
"resource",
"=",
"resource",
"[",
":",
"-",
"1",
"]",
"parts",
"=",
"resource",
".",
"split",
"(",
"'/'",
")",
"return",
"parts",
"[",
"-",
"1",
"]"
] | Last component of a resource (which always uses '/' as sep). | [
"Last",
"component",
"of",
"a",
"resource",
"(",
"which",
"always",
"uses",
"/",
"as",
"sep",
")",
"."
] | python | valid |
nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4808-L4846 | def _gettags(self, codes=None, lock=None):
"""Return list of (code, TiffTag) from file."""
fh = self.parent.filehandle
tiff = self.parent.tiff
unpack = struct.unpack
lock = NullContext() if lock is None else lock
tags = []
with lock:
fh.seek(self.offset)
try:
tagno = unpack(tiff.tagnoformat, fh.read(tiff.tagnosize))[0]
if tagno > 4096:
raise TiffFileError('suspicious number of tags')
except Exception:
raise TiffFileError(
'corrupted page list at offset %i' % self.offset)
tagoffset = self.offset + tiff.tagnosize # fh.tell()
tagsize = tiff.tagsize
tagindex = -tagsize
codeformat = tiff.tagformat1[:2]
tagbytes = fh.read(tagsize * tagno)
for _ in range(tagno):
tagindex += tagsize
code = unpack(codeformat, tagbytes[tagindex:tagindex+2])[0]
if codes and code not in codes:
continue
try:
tag = TiffTag(self.parent,
tagbytes[tagindex:tagindex+tagsize],
tagoffset+tagindex)
except TiffFileError as exc:
log.warning('%s: %s', exc.__class__.__name__, exc)
continue
tags.append((code, tag))
return tags | [
"def",
"_gettags",
"(",
"self",
",",
"codes",
"=",
"None",
",",
"lock",
"=",
"None",
")",
":",
"fh",
"=",
"self",
".",
"parent",
".",
"filehandle",
"tiff",
"=",
"self",
".",
"parent",
".",
"tiff",
"unpack",
"=",
"struct",
".",
"unpack",
"lock",
"=",
"NullContext",
"(",
")",
"if",
"lock",
"is",
"None",
"else",
"lock",
"tags",
"=",
"[",
"]",
"with",
"lock",
":",
"fh",
".",
"seek",
"(",
"self",
".",
"offset",
")",
"try",
":",
"tagno",
"=",
"unpack",
"(",
"tiff",
".",
"tagnoformat",
",",
"fh",
".",
"read",
"(",
"tiff",
".",
"tagnosize",
")",
")",
"[",
"0",
"]",
"if",
"tagno",
">",
"4096",
":",
"raise",
"TiffFileError",
"(",
"'suspicious number of tags'",
")",
"except",
"Exception",
":",
"raise",
"TiffFileError",
"(",
"'corrupted page list at offset %i'",
"%",
"self",
".",
"offset",
")",
"tagoffset",
"=",
"self",
".",
"offset",
"+",
"tiff",
".",
"tagnosize",
"# fh.tell()",
"tagsize",
"=",
"tiff",
".",
"tagsize",
"tagindex",
"=",
"-",
"tagsize",
"codeformat",
"=",
"tiff",
".",
"tagformat1",
"[",
":",
"2",
"]",
"tagbytes",
"=",
"fh",
".",
"read",
"(",
"tagsize",
"*",
"tagno",
")",
"for",
"_",
"in",
"range",
"(",
"tagno",
")",
":",
"tagindex",
"+=",
"tagsize",
"code",
"=",
"unpack",
"(",
"codeformat",
",",
"tagbytes",
"[",
"tagindex",
":",
"tagindex",
"+",
"2",
"]",
")",
"[",
"0",
"]",
"if",
"codes",
"and",
"code",
"not",
"in",
"codes",
":",
"continue",
"try",
":",
"tag",
"=",
"TiffTag",
"(",
"self",
".",
"parent",
",",
"tagbytes",
"[",
"tagindex",
":",
"tagindex",
"+",
"tagsize",
"]",
",",
"tagoffset",
"+",
"tagindex",
")",
"except",
"TiffFileError",
"as",
"exc",
":",
"log",
".",
"warning",
"(",
"'%s: %s'",
",",
"exc",
".",
"__class__",
".",
"__name__",
",",
"exc",
")",
"continue",
"tags",
".",
"append",
"(",
"(",
"code",
",",
"tag",
")",
")",
"return",
"tags"
] | Return list of (code, TiffTag) from file. | [
"Return",
"list",
"of",
"(",
"code",
"TiffTag",
")",
"from",
"file",
"."
] | python | train |
rndusr/torf | torf/_torrent.py | https://github.com/rndusr/torf/blob/df0363232daacd3f8c91aafddaa0623b8c28cbd2/torf/_torrent.py#L555-L559 | def infohash_base32(self):
"""Base32 encoded SHA1 info hash"""
self.validate()
info = self.convert()[b'info']
return b32encode(sha1(bencode(info)).digest()) | [
"def",
"infohash_base32",
"(",
"self",
")",
":",
"self",
".",
"validate",
"(",
")",
"info",
"=",
"self",
".",
"convert",
"(",
")",
"[",
"b'info'",
"]",
"return",
"b32encode",
"(",
"sha1",
"(",
"bencode",
"(",
"info",
")",
")",
".",
"digest",
"(",
")",
")"
] | Base32 encoded SHA1 info hash | [
"Base32",
"encoded",
"SHA1",
"info",
"hash"
] | python | train |
Infinidat/infi.clickhouse_orm | src/infi/clickhouse_orm/database.py | https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/src/infi/clickhouse_orm/database.py#L154-L161 | def does_table_exist(self, model_class):
'''
Checks whether a table for the given model class already exists.
Note that this only checks for existence of a table with the expected name.
'''
sql = "SELECT count() FROM system.tables WHERE database = '%s' AND name = '%s'"
r = self._send(sql % (self.db_name, model_class.table_name()))
return r.text.strip() == '1' | [
"def",
"does_table_exist",
"(",
"self",
",",
"model_class",
")",
":",
"sql",
"=",
"\"SELECT count() FROM system.tables WHERE database = '%s' AND name = '%s'\"",
"r",
"=",
"self",
".",
"_send",
"(",
"sql",
"%",
"(",
"self",
".",
"db_name",
",",
"model_class",
".",
"table_name",
"(",
")",
")",
")",
"return",
"r",
".",
"text",
".",
"strip",
"(",
")",
"==",
"'1'"
] | Checks whether a table for the given model class already exists.
Note that this only checks for existence of a table with the expected name. | [
"Checks",
"whether",
"a",
"table",
"for",
"the",
"given",
"model",
"class",
"already",
"exists",
".",
"Note",
"that",
"this",
"only",
"checks",
"for",
"existence",
"of",
"a",
"table",
"with",
"the",
"expected",
"name",
"."
] | python | train |
SUSE-Enceladus/ipa | ipa/ipa_distro.py | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_distro.py#L150-L168 | def update(self, client):
"""Execute update command on instance."""
update_cmd = "{sudo} '{refresh};{update}'".format(
sudo=self.get_sudo_exec_wrapper(),
refresh=self.get_refresh_repo_cmd(),
update=self.get_update_cmd()
)
out = ''
try:
out = ipa_utils.execute_ssh_command(
client,
update_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred updating instance: %s' % error
)
return out | [
"def",
"update",
"(",
"self",
",",
"client",
")",
":",
"update_cmd",
"=",
"\"{sudo} '{refresh};{update}'\"",
".",
"format",
"(",
"sudo",
"=",
"self",
".",
"get_sudo_exec_wrapper",
"(",
")",
",",
"refresh",
"=",
"self",
".",
"get_refresh_repo_cmd",
"(",
")",
",",
"update",
"=",
"self",
".",
"get_update_cmd",
"(",
")",
")",
"out",
"=",
"''",
"try",
":",
"out",
"=",
"ipa_utils",
".",
"execute_ssh_command",
"(",
"client",
",",
"update_cmd",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"IpaDistroException",
"(",
"'An error occurred updating instance: %s'",
"%",
"error",
")",
"return",
"out"
] | Execute update command on instance. | [
"Execute",
"update",
"command",
"on",
"instance",
"."
] | python | train |
jrspruitt/ubi_reader | ubireader/ubi/block/layout.py | https://github.com/jrspruitt/ubi_reader/blob/7079dd380c1c9896bced30d6d34e8780b9181597/ubireader/ubi/block/layout.py#L74-L91 | def associate_blocks(blocks, layout_pairs, start_peb_num):
"""Group block indexes with appropriate layout pairs
Arguments:
List:blocks -- List of block objects
List:layout_pairs -- List of grouped layout blocks
Int:start_peb_num -- Number of the PEB to start from.
Returns:
List -- Layout block pairs grouped with associated block ranges.
"""
seq_blocks = []
for layout_pair in layout_pairs:
seq_blocks = sort.by_image_seq(blocks, blocks[layout_pair[0]].ec_hdr.image_seq)
layout_pair.append(seq_blocks)
return layout_pairs | [
"def",
"associate_blocks",
"(",
"blocks",
",",
"layout_pairs",
",",
"start_peb_num",
")",
":",
"seq_blocks",
"=",
"[",
"]",
"for",
"layout_pair",
"in",
"layout_pairs",
":",
"seq_blocks",
"=",
"sort",
".",
"by_image_seq",
"(",
"blocks",
",",
"blocks",
"[",
"layout_pair",
"[",
"0",
"]",
"]",
".",
"ec_hdr",
".",
"image_seq",
")",
"layout_pair",
".",
"append",
"(",
"seq_blocks",
")",
"return",
"layout_pairs"
] | Group block indexes with appropriate layout pairs
Arguments:
List:blocks -- List of block objects
List:layout_pairs -- List of grouped layout blocks
Int:start_peb_num -- Number of the PEB to start from.
Returns:
List -- Layout block pairs grouped with associated block ranges. | [
"Group",
"block",
"indexes",
"with",
"appropriate",
"layout",
"pairs"
] | python | train |
rocky/python3-trepan | trepan/post_mortem.py | https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/post_mortem.py#L80-L168 | def post_mortem(exc=None, frameno=1, dbg=None):
"""Enter debugger read loop after your program has crashed.
exc is a triple like you get back from sys.exc_info. If no exc
parameter, is supplied, the values from sys.last_type,
sys.last_value, sys.last_traceback are used. And if these don't
exist either we'll assume that sys.exc_info() contains what we
want and frameno is the index location of where we want to start.
'frameno' specifies how many frames to ignore in the traceback.
The default is 1, that is, we don't need to show the immediate
call into post_mortem. If you have wrapper functions that call
this one, you may want to increase frameno.
"""
if dbg is None:
# Check for a global debugger object
if Mdebugger.debugger_obj is None:
Mdebugger.debugger_obj = Mdebugger.Trepan()
pass
dbg = Mdebugger.debugger_obj
pass
re_bogus_file = re.compile("^<.+>$")
if exc[0] is None:
# frameno+1 because we are about to add one more level of call
# in get_last_or_frame_exception
exc = get_last_or_frame_exception()
if exc[0] is None:
print("Can't find traceback for post_mortem "
"in sys.last_traceback or sys.exec_info()")
return
pass
exc_type, exc_value, exc_tb = exc
dbg.core.execution_status = ('Terminated with unhandled exception %s'
% exc_type)
# tb has least-recent traceback entry first. We want the most-recent
# entry. Also we'll pick out a mainpyfile name if it hasn't previously
# been set.
if exc_tb is not None:
while exc_tb.tb_next is not None:
filename = exc_tb.tb_frame.f_code.co_filename
if (dbg.mainpyfile and 0 == len(dbg.mainpyfile)
and not re_bogus_file.match(filename)):
dbg.mainpyfile = filename
pass
exc_tb = exc_tb.tb_next
pass
dbg.core.processor.curframe = exc_tb.tb_frame
pass
if 0 == len(dbg.program_sys_argv):
# Fake program (run command) args since we weren't called with any
dbg.program_sys_argv = list(sys.argv[1:])
dbg.program_sys_argv[:0] = [dbg.mainpyfile]
# if 0 == len(dbg._sys_argv):
# # Fake script invocation (restart) args since we don't have any
# dbg._sys_argv = list(dbg.program_sys_argv)
# dbg._sys_argv[:0] = [__title__]
try:
# # FIXME: This can be called from except hook in which case we
# # need this. Dunno why though.
# try:
# _pydb_trace.set_trace(t.tb_frame)
# except:
# pass
# Possibly a bug in Python 2.5. Why f.f_lineno is
# not always equal to t.tb_lineno, I don't know.
f = exc_tb.tb_frame
if f and f.f_lineno != exc_tb.tb_lineno : f = f.f_back
dbg.core.processor.event_processor(f, 'exception', exc, 'Trepan3k:pm')
except DebuggerRestart:
while True:
sys.argv = list(dbg._program_sys_argv)
dbg.msg("Restarting %s with arguments:\n\t%s"
% (dbg.filename(dbg.mainpyfile),
" ".join(dbg._program_sys_argv[1:])))
try:
dbg.run_script(dbg.mainpyfile)
except DebuggerRestart:
pass
pass
except DebuggerQuit:
pass
return | [
"def",
"post_mortem",
"(",
"exc",
"=",
"None",
",",
"frameno",
"=",
"1",
",",
"dbg",
"=",
"None",
")",
":",
"if",
"dbg",
"is",
"None",
":",
"# Check for a global debugger object",
"if",
"Mdebugger",
".",
"debugger_obj",
"is",
"None",
":",
"Mdebugger",
".",
"debugger_obj",
"=",
"Mdebugger",
".",
"Trepan",
"(",
")",
"pass",
"dbg",
"=",
"Mdebugger",
".",
"debugger_obj",
"pass",
"re_bogus_file",
"=",
"re",
".",
"compile",
"(",
"\"^<.+>$\"",
")",
"if",
"exc",
"[",
"0",
"]",
"is",
"None",
":",
"# frameno+1 because we are about to add one more level of call",
"# in get_last_or_frame_exception",
"exc",
"=",
"get_last_or_frame_exception",
"(",
")",
"if",
"exc",
"[",
"0",
"]",
"is",
"None",
":",
"print",
"(",
"\"Can't find traceback for post_mortem \"",
"\"in sys.last_traceback or sys.exec_info()\"",
")",
"return",
"pass",
"exc_type",
",",
"exc_value",
",",
"exc_tb",
"=",
"exc",
"dbg",
".",
"core",
".",
"execution_status",
"=",
"(",
"'Terminated with unhandled exception %s'",
"%",
"exc_type",
")",
"# tb has least-recent traceback entry first. We want the most-recent",
"# entry. Also we'll pick out a mainpyfile name if it hasn't previously",
"# been set.",
"if",
"exc_tb",
"is",
"not",
"None",
":",
"while",
"exc_tb",
".",
"tb_next",
"is",
"not",
"None",
":",
"filename",
"=",
"exc_tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
"if",
"(",
"dbg",
".",
"mainpyfile",
"and",
"0",
"==",
"len",
"(",
"dbg",
".",
"mainpyfile",
")",
"and",
"not",
"re_bogus_file",
".",
"match",
"(",
"filename",
")",
")",
":",
"dbg",
".",
"mainpyfile",
"=",
"filename",
"pass",
"exc_tb",
"=",
"exc_tb",
".",
"tb_next",
"pass",
"dbg",
".",
"core",
".",
"processor",
".",
"curframe",
"=",
"exc_tb",
".",
"tb_frame",
"pass",
"if",
"0",
"==",
"len",
"(",
"dbg",
".",
"program_sys_argv",
")",
":",
"# Fake program (run command) args since we weren't called with any",
"dbg",
".",
"program_sys_argv",
"=",
"list",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"dbg",
".",
"program_sys_argv",
"[",
":",
"0",
"]",
"=",
"[",
"dbg",
".",
"mainpyfile",
"]",
"# if 0 == len(dbg._sys_argv):",
"# # Fake script invocation (restart) args since we don't have any",
"# dbg._sys_argv = list(dbg.program_sys_argv)",
"# dbg._sys_argv[:0] = [__title__]",
"try",
":",
"# # FIXME: This can be called from except hook in which case we",
"# # need this. Dunno why though.",
"# try:",
"# _pydb_trace.set_trace(t.tb_frame)",
"# except:",
"# pass",
"# Possibly a bug in Python 2.5. Why f.f_lineno is",
"# not always equal to t.tb_lineno, I don't know.",
"f",
"=",
"exc_tb",
".",
"tb_frame",
"if",
"f",
"and",
"f",
".",
"f_lineno",
"!=",
"exc_tb",
".",
"tb_lineno",
":",
"f",
"=",
"f",
".",
"f_back",
"dbg",
".",
"core",
".",
"processor",
".",
"event_processor",
"(",
"f",
",",
"'exception'",
",",
"exc",
",",
"'Trepan3k:pm'",
")",
"except",
"DebuggerRestart",
":",
"while",
"True",
":",
"sys",
".",
"argv",
"=",
"list",
"(",
"dbg",
".",
"_program_sys_argv",
")",
"dbg",
".",
"msg",
"(",
"\"Restarting %s with arguments:\\n\\t%s\"",
"%",
"(",
"dbg",
".",
"filename",
"(",
"dbg",
".",
"mainpyfile",
")",
",",
"\" \"",
".",
"join",
"(",
"dbg",
".",
"_program_sys_argv",
"[",
"1",
":",
"]",
")",
")",
")",
"try",
":",
"dbg",
".",
"run_script",
"(",
"dbg",
".",
"mainpyfile",
")",
"except",
"DebuggerRestart",
":",
"pass",
"pass",
"except",
"DebuggerQuit",
":",
"pass",
"return"
] | Enter debugger read loop after your program has crashed.
exc is a triple like you get back from sys.exc_info. If no exc
parameter, is supplied, the values from sys.last_type,
sys.last_value, sys.last_traceback are used. And if these don't
exist either we'll assume that sys.exc_info() contains what we
want and frameno is the index location of where we want to start.
'frameno' specifies how many frames to ignore in the traceback.
The default is 1, that is, we don't need to show the immediate
call into post_mortem. If you have wrapper functions that call
this one, you may want to increase frameno. | [
"Enter",
"debugger",
"read",
"loop",
"after",
"your",
"program",
"has",
"crashed",
"."
] | python | test |
dmaust/rounding | rounding/stochastic.py | https://github.com/dmaust/rounding/blob/06731dff803c30c0741e3199888e7e5266ad99cc/rounding/stochastic.py#L58-L66 | def sround(x, precision=0):
"""
Round a single number using default non-deterministic generator.
@param x: to round.
@param precision: decimal places to round.
"""
sr = StochasticRound(precision=precision)
return sr.round(x) | [
"def",
"sround",
"(",
"x",
",",
"precision",
"=",
"0",
")",
":",
"sr",
"=",
"StochasticRound",
"(",
"precision",
"=",
"precision",
")",
"return",
"sr",
".",
"round",
"(",
"x",
")"
] | Round a single number using default non-deterministic generator.
@param x: to round.
@param precision: decimal places to round. | [
"Round",
"a",
"single",
"number",
"using",
"default",
"non",
"-",
"deterministic",
"generator",
"."
] | python | train |
exosite-labs/pyonep | pyonep/onep.py | https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/onep.py#L501-L512 | def wait(self, auth, resource, options, defer=False):
""" This is a HTTP Long Polling API which allows a user to wait on specific resources to be
updated.
Args:
auth: <cik> for authentication
resource: <ResourceID> to specify what resource to wait on.
options: Options for the wait including a timeout (in ms), (max 5min) and start time
(null acts as when request is recieved)
"""
# let the server control the timeout
return self._call('wait', auth, [resource, options], defer, notimeout=True) | [
"def",
"wait",
"(",
"self",
",",
"auth",
",",
"resource",
",",
"options",
",",
"defer",
"=",
"False",
")",
":",
"# let the server control the timeout",
"return",
"self",
".",
"_call",
"(",
"'wait'",
",",
"auth",
",",
"[",
"resource",
",",
"options",
"]",
",",
"defer",
",",
"notimeout",
"=",
"True",
")"
] | This is a HTTP Long Polling API which allows a user to wait on specific resources to be
updated.
Args:
auth: <cik> for authentication
resource: <ResourceID> to specify what resource to wait on.
options: Options for the wait including a timeout (in ms), (max 5min) and start time
(null acts as when request is recieved) | [
"This",
"is",
"a",
"HTTP",
"Long",
"Polling",
"API",
"which",
"allows",
"a",
"user",
"to",
"wait",
"on",
"specific",
"resources",
"to",
"be",
"updated",
"."
] | python | train |
commontk/ctk-cli | ctk_cli/module.py | https://github.com/commontk/ctk-cli/blob/ddd8de62b586491ad6e6750133cc1f0e11f37b11/ctk_cli/module.py#L259-L265 | def parseValue(self, value):
"""Parse the given value and return result."""
if self.isVector():
return list(map(self._pythonType, value.split(',')))
if self.typ == 'boolean':
return _parseBool(value)
return self._pythonType(value) | [
"def",
"parseValue",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"isVector",
"(",
")",
":",
"return",
"list",
"(",
"map",
"(",
"self",
".",
"_pythonType",
",",
"value",
".",
"split",
"(",
"','",
")",
")",
")",
"if",
"self",
".",
"typ",
"==",
"'boolean'",
":",
"return",
"_parseBool",
"(",
"value",
")",
"return",
"self",
".",
"_pythonType",
"(",
"value",
")"
] | Parse the given value and return result. | [
"Parse",
"the",
"given",
"value",
"and",
"return",
"result",
"."
] | python | train |
dddomodossola/remi | remi/gui.py | https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/remi/gui.py#L955-L967 | def repr(self, changed_widgets=None):
"""It is used to automatically represent the object to HTML format
packs all the attributes, children and so on.
Args:
changed_widgets (dict): A dictionary containing a collection of tags that have to be updated.
The tag that have to be updated is the key, and the value is its textual repr.
"""
if changed_widgets is None:
changed_widgets={}
local_changed_widgets = {}
self._set_updated()
return ''.join(('<', self.type, '>\n', self.innerHTML(local_changed_widgets), '\n</', self.type, '>')) | [
"def",
"repr",
"(",
"self",
",",
"changed_widgets",
"=",
"None",
")",
":",
"if",
"changed_widgets",
"is",
"None",
":",
"changed_widgets",
"=",
"{",
"}",
"local_changed_widgets",
"=",
"{",
"}",
"self",
".",
"_set_updated",
"(",
")",
"return",
"''",
".",
"join",
"(",
"(",
"'<'",
",",
"self",
".",
"type",
",",
"'>\\n'",
",",
"self",
".",
"innerHTML",
"(",
"local_changed_widgets",
")",
",",
"'\\n</'",
",",
"self",
".",
"type",
",",
"'>'",
")",
")"
] | It is used to automatically represent the object to HTML format
packs all the attributes, children and so on.
Args:
changed_widgets (dict): A dictionary containing a collection of tags that have to be updated.
The tag that have to be updated is the key, and the value is its textual repr. | [
"It",
"is",
"used",
"to",
"automatically",
"represent",
"the",
"object",
"to",
"HTML",
"format",
"packs",
"all",
"the",
"attributes",
"children",
"and",
"so",
"on",
"."
] | python | train |
hover2pi/svo_filters | svo_filters/svo.py | https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L393-L415 | def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units | [
"def",
"flux_units",
"(",
"self",
",",
"units",
")",
":",
"# Check that the units are valid",
"dtypes",
"=",
"(",
"q",
".",
"core",
".",
"PrefixUnit",
",",
"q",
".",
"quantity",
".",
"Quantity",
",",
"q",
".",
"core",
".",
"CompositeUnit",
")",
"if",
"not",
"isinstance",
"(",
"units",
",",
"dtypes",
")",
":",
"raise",
"ValueError",
"(",
"units",
",",
"\"units not understood.\"",
")",
"# Check that the units changed",
"if",
"units",
"!=",
"self",
".",
"flux_units",
":",
"# Convert to new units",
"sfd",
"=",
"q",
".",
"spectral_density",
"(",
"self",
".",
"wave_eff",
")",
"self",
".",
"zp",
"=",
"self",
".",
"zp",
".",
"to",
"(",
"units",
",",
"equivalencies",
"=",
"sfd",
")",
"# Store new units",
"self",
".",
"_flux_units",
"=",
"units"
] | A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density | [
"A",
"setter",
"for",
"the",
"flux",
"units"
] | python | train |
apache/incubator-heron | heron/tools/tracker/src/python/handlers/topologieshandler.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/topologieshandler.py#L60-L108 | def get(self):
""" get method """
# Get all the values for parameter "cluster".
clusters = self.get_arguments(constants.PARAM_CLUSTER)
# Get all the values for parameter "environ".
environs = self.get_arguments(constants.PARAM_ENVIRON)
# Get role
role = self.get_argument_role()
ret = {}
topologies = self.tracker.topologies
for topology in topologies:
cluster = topology.cluster
environ = topology.environ
execution_state = topology.execution_state
if not cluster or not execution_state or not environ:
continue
topo_role = execution_state.role
if not topo_role:
continue
# This cluster is not asked for.
# Note that "if not clusters", then
# we show for all the clusters.
if clusters and cluster not in clusters:
continue
# This environ is not asked for.
# Note that "if not environs", then
# we show for all the environs.
if environs and environ not in environs:
continue
# This role is not asked for.
# Note that "if not role", then
# we show for all the roles.
if role and role != topo_role:
continue
if cluster not in ret:
ret[cluster] = {}
if topo_role not in ret[cluster]:
ret[cluster][topo_role] = {}
if environ not in ret[cluster][topo_role]:
ret[cluster][topo_role][environ] = []
ret[cluster][topo_role][environ].append(topology.name)
self.write_success_response(ret) | [
"def",
"get",
"(",
"self",
")",
":",
"# Get all the values for parameter \"cluster\".",
"clusters",
"=",
"self",
".",
"get_arguments",
"(",
"constants",
".",
"PARAM_CLUSTER",
")",
"# Get all the values for parameter \"environ\".",
"environs",
"=",
"self",
".",
"get_arguments",
"(",
"constants",
".",
"PARAM_ENVIRON",
")",
"# Get role",
"role",
"=",
"self",
".",
"get_argument_role",
"(",
")",
"ret",
"=",
"{",
"}",
"topologies",
"=",
"self",
".",
"tracker",
".",
"topologies",
"for",
"topology",
"in",
"topologies",
":",
"cluster",
"=",
"topology",
".",
"cluster",
"environ",
"=",
"topology",
".",
"environ",
"execution_state",
"=",
"topology",
".",
"execution_state",
"if",
"not",
"cluster",
"or",
"not",
"execution_state",
"or",
"not",
"environ",
":",
"continue",
"topo_role",
"=",
"execution_state",
".",
"role",
"if",
"not",
"topo_role",
":",
"continue",
"# This cluster is not asked for.",
"# Note that \"if not clusters\", then",
"# we show for all the clusters.",
"if",
"clusters",
"and",
"cluster",
"not",
"in",
"clusters",
":",
"continue",
"# This environ is not asked for.",
"# Note that \"if not environs\", then",
"# we show for all the environs.",
"if",
"environs",
"and",
"environ",
"not",
"in",
"environs",
":",
"continue",
"# This role is not asked for.",
"# Note that \"if not role\", then",
"# we show for all the roles.",
"if",
"role",
"and",
"role",
"!=",
"topo_role",
":",
"continue",
"if",
"cluster",
"not",
"in",
"ret",
":",
"ret",
"[",
"cluster",
"]",
"=",
"{",
"}",
"if",
"topo_role",
"not",
"in",
"ret",
"[",
"cluster",
"]",
":",
"ret",
"[",
"cluster",
"]",
"[",
"topo_role",
"]",
"=",
"{",
"}",
"if",
"environ",
"not",
"in",
"ret",
"[",
"cluster",
"]",
"[",
"topo_role",
"]",
":",
"ret",
"[",
"cluster",
"]",
"[",
"topo_role",
"]",
"[",
"environ",
"]",
"=",
"[",
"]",
"ret",
"[",
"cluster",
"]",
"[",
"topo_role",
"]",
"[",
"environ",
"]",
".",
"append",
"(",
"topology",
".",
"name",
")",
"self",
".",
"write_success_response",
"(",
"ret",
")"
] | get method | [
"get",
"method"
] | python | valid |
jmfederico/django-use-email-as-username | django_use_email_as_username/management/commands/create_custom_user_app.py | https://github.com/jmfederico/django-use-email-as-username/blob/401e404b822f7ba5b3ef34b06ce095e564f32912/django_use_email_as_username/management/commands/create_custom_user_app.py#L26-L30 | def handle(self, **options):
"""Call "startapp" to generate app with custom user model."""
template = os.path.dirname(os.path.abspath(__file__)) + "/app_template"
name = options.pop("name")
call_command("startapp", name, template=template, **options) | [
"def",
"handle",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"template",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"+",
"\"/app_template\"",
"name",
"=",
"options",
".",
"pop",
"(",
"\"name\"",
")",
"call_command",
"(",
"\"startapp\"",
",",
"name",
",",
"template",
"=",
"template",
",",
"*",
"*",
"options",
")"
] | Call "startapp" to generate app with custom user model. | [
"Call",
"startapp",
"to",
"generate",
"app",
"with",
"custom",
"user",
"model",
"."
] | python | train |
matousc89/padasip | padasip/filters/ap.py | https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/ap.py#L229-L285 | def run(self, d, x):
"""
This function filters multiple samples in a row.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
"""
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
self.n = len(x[0])
# prepare data
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
y = np.zeros(N)
e = np.zeros(N)
self.w_history = np.zeros((N,self.n))
# adaptation loop
for k in range(N):
self.w_history[k,:] = self.w
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x[k]
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d[k]
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
y[k] = self.y_mem[0]
e[k] = self.e_mem[0]
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw
return y, e, self.w_history | [
"def",
"run",
"(",
"self",
",",
"d",
",",
"x",
")",
":",
"# measure the data and check if the dimmension agree",
"N",
"=",
"len",
"(",
"x",
")",
"if",
"not",
"len",
"(",
"d",
")",
"==",
"N",
":",
"raise",
"ValueError",
"(",
"'The length of vector d and matrix x must agree.'",
")",
"self",
".",
"n",
"=",
"len",
"(",
"x",
"[",
"0",
"]",
")",
"# prepare data",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"d",
"=",
"np",
".",
"array",
"(",
"d",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to convert x or d to a numpy array'",
")",
"# create empty arrays",
"y",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"e",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"self",
".",
"w_history",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"self",
".",
"n",
")",
")",
"# adaptation loop",
"for",
"k",
"in",
"range",
"(",
"N",
")",
":",
"self",
".",
"w_history",
"[",
"k",
",",
":",
"]",
"=",
"self",
".",
"w",
"# create input matrix and target vector",
"self",
".",
"x_mem",
"[",
":",
",",
"1",
":",
"]",
"=",
"self",
".",
"x_mem",
"[",
":",
",",
":",
"-",
"1",
"]",
"self",
".",
"x_mem",
"[",
":",
",",
"0",
"]",
"=",
"x",
"[",
"k",
"]",
"self",
".",
"d_mem",
"[",
"1",
":",
"]",
"=",
"self",
".",
"d_mem",
"[",
":",
"-",
"1",
"]",
"self",
".",
"d_mem",
"[",
"0",
"]",
"=",
"d",
"[",
"k",
"]",
"# estimate output and error",
"self",
".",
"y_mem",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"x_mem",
".",
"T",
",",
"self",
".",
"w",
")",
"self",
".",
"e_mem",
"=",
"self",
".",
"d_mem",
"-",
"self",
".",
"y_mem",
"y",
"[",
"k",
"]",
"=",
"self",
".",
"y_mem",
"[",
"0",
"]",
"e",
"[",
"k",
"]",
"=",
"self",
".",
"e_mem",
"[",
"0",
"]",
"# update",
"dw_part1",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"x_mem",
".",
"T",
",",
"self",
".",
"x_mem",
")",
"+",
"self",
".",
"ide_eps",
"dw_part2",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"dw_part1",
",",
"self",
".",
"ide",
")",
"dw",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"x_mem",
",",
"np",
".",
"dot",
"(",
"dw_part2",
",",
"self",
".",
"e_mem",
")",
")",
"self",
".",
"w",
"+=",
"self",
".",
"mu",
"*",
"dw",
"return",
"y",
",",
"e",
",",
"self",
".",
"w_history"
] | This function filters multiple samples in a row.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample. | [
"This",
"function",
"filters",
"multiple",
"samples",
"in",
"a",
"row",
"."
] | python | train |
GearPlug/payu-python | payu/recurring.py | https://github.com/GearPlug/payu-python/blob/47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e/payu/recurring.py#L240-L303 | def create_subscription(self, *, customer_id, credit_card_token, plan_code, quantity=None, installments=None,
trial_days=None, immediate_payment=None, extra1=None, extra2=None, delivery_address=None,
notify_url=None, recurring_bill_items=None):
"""
Creating a new subscription of a client to a plan.
Args:
customer_id: Customer that will be associated to the subscription.
You can find more information in the "Customer" section of this page.
credit_card_token: Customer's credit card that is selected to make the payment.
You can find more information in the "Credit card" section of this page.
plan_code: Plan that will be associated to the subscription.
You can find more information in the "Plan" section of this page.
quantity: Total amount of plans that will be acquired with the subscription.
Numeric.
installments: Total amount of installments to defer the payment.
Numeric.
trial_days: Total amount of trial days of the subscription.
This variable has preference over the plan's trial days.
Numeric.
immediate_payment:
extra1:
extra2:
delivery_address:
notify_url:
recurring_bill_items:
Returns:
"""
payload = {
"quantity": quantity,
"installments": installments,
"trialDays": trial_days,
"immediatePayment": immediate_payment,
"extra1": extra1,
"extra2": extra2,
"customer": {
"id": customer_id,
"creditCards": [
{
"token": credit_card_token
}
]
},
"plan": {
"planCode": plan_code
},
"deliveryAddress": delivery_address,
"notifyUrl": notify_url,
"recurringBillItems": recurring_bill_items
}
return self.client._post(self.url + 'subscriptions', json=payload, headers=self.get_headers()) | [
"def",
"create_subscription",
"(",
"self",
",",
"*",
",",
"customer_id",
",",
"credit_card_token",
",",
"plan_code",
",",
"quantity",
"=",
"None",
",",
"installments",
"=",
"None",
",",
"trial_days",
"=",
"None",
",",
"immediate_payment",
"=",
"None",
",",
"extra1",
"=",
"None",
",",
"extra2",
"=",
"None",
",",
"delivery_address",
"=",
"None",
",",
"notify_url",
"=",
"None",
",",
"recurring_bill_items",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"\"quantity\"",
":",
"quantity",
",",
"\"installments\"",
":",
"installments",
",",
"\"trialDays\"",
":",
"trial_days",
",",
"\"immediatePayment\"",
":",
"immediate_payment",
",",
"\"extra1\"",
":",
"extra1",
",",
"\"extra2\"",
":",
"extra2",
",",
"\"customer\"",
":",
"{",
"\"id\"",
":",
"customer_id",
",",
"\"creditCards\"",
":",
"[",
"{",
"\"token\"",
":",
"credit_card_token",
"}",
"]",
"}",
",",
"\"plan\"",
":",
"{",
"\"planCode\"",
":",
"plan_code",
"}",
",",
"\"deliveryAddress\"",
":",
"delivery_address",
",",
"\"notifyUrl\"",
":",
"notify_url",
",",
"\"recurringBillItems\"",
":",
"recurring_bill_items",
"}",
"return",
"self",
".",
"client",
".",
"_post",
"(",
"self",
".",
"url",
"+",
"'subscriptions'",
",",
"json",
"=",
"payload",
",",
"headers",
"=",
"self",
".",
"get_headers",
"(",
")",
")"
] | Creating a new subscription of a client to a plan.
Args:
customer_id: Customer that will be associated to the subscription.
You can find more information in the "Customer" section of this page.
credit_card_token: Customer's credit card that is selected to make the payment.
You can find more information in the "Credit card" section of this page.
plan_code: Plan that will be associated to the subscription.
You can find more information in the "Plan" section of this page.
quantity: Total amount of plans that will be acquired with the subscription.
Numeric.
installments: Total amount of installments to defer the payment.
Numeric.
trial_days: Total amount of trial days of the subscription.
This variable has preference over the plan's trial days.
Numeric.
immediate_payment:
extra1:
extra2:
delivery_address:
notify_url:
recurring_bill_items:
Returns: | [
"Creating",
"a",
"new",
"subscription",
"of",
"a",
"client",
"to",
"a",
"plan",
"."
] | python | train |
jwodder/txtble | txtble/util.py | https://github.com/jwodder/txtble/blob/31d39ed6c15df13599c704a757cd36e1cd57cdd1/txtble/util.py#L94-L111 | def with_color_stripped(f):
"""
A function decorator for applying to `len` or imitators thereof that strips
ANSI color sequences from a string before passing it on. If any color
sequences are not followed by a reset sequence, an `UnterminatedColorError`
is raised.
"""
@wraps(f)
def colored_len(s):
s2 = re.sub(
COLOR_BEGIN_RGX + '(.*?)' + COLOR_END_RGX,
lambda m: re.sub(COLOR_BEGIN_RGX, '', m.group(1)),
s,
)
if re.search(COLOR_BEGIN_RGX, s2):
raise UnterminatedColorError(s)
return f(re.sub(COLOR_END_RGX, '', s2))
return colored_len | [
"def",
"with_color_stripped",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"colored_len",
"(",
"s",
")",
":",
"s2",
"=",
"re",
".",
"sub",
"(",
"COLOR_BEGIN_RGX",
"+",
"'(.*?)'",
"+",
"COLOR_END_RGX",
",",
"lambda",
"m",
":",
"re",
".",
"sub",
"(",
"COLOR_BEGIN_RGX",
",",
"''",
",",
"m",
".",
"group",
"(",
"1",
")",
")",
",",
"s",
",",
")",
"if",
"re",
".",
"search",
"(",
"COLOR_BEGIN_RGX",
",",
"s2",
")",
":",
"raise",
"UnterminatedColorError",
"(",
"s",
")",
"return",
"f",
"(",
"re",
".",
"sub",
"(",
"COLOR_END_RGX",
",",
"''",
",",
"s2",
")",
")",
"return",
"colored_len"
] | A function decorator for applying to `len` or imitators thereof that strips
ANSI color sequences from a string before passing it on. If any color
sequences are not followed by a reset sequence, an `UnterminatedColorError`
is raised. | [
"A",
"function",
"decorator",
"for",
"applying",
"to",
"len",
"or",
"imitators",
"thereof",
"that",
"strips",
"ANSI",
"color",
"sequences",
"from",
"a",
"string",
"before",
"passing",
"it",
"on",
".",
"If",
"any",
"color",
"sequences",
"are",
"not",
"followed",
"by",
"a",
"reset",
"sequence",
"an",
"UnterminatedColorError",
"is",
"raised",
"."
] | python | train |
spacetelescope/acstools | acstools/acszpt.py | https://github.com/spacetelescope/acstools/blob/bbf8dd080cefcbf88529ec87c420f9e1b8002554/acstools/acszpt.py#L206-L248 | def _check_inputs(self):
"""Check the inputs to ensure they are valid.
Returns
-------
status : bool
True if all inputs are valid, False if one is not.
"""
valid_detector = True
valid_filter = True
valid_date = True
# Determine the submitted detector is valid
if self.detector not in self._valid_detectors:
msg = ('{} is not a valid detector option.\n'
'Please choose one of the following:\n{}\n'
'{}'.format(self.detector,
'\n'.join(self._valid_detectors),
self._msg_div))
LOG.error(msg)
valid_detector = False
# Determine if the submitted filter is valid
if (self.filt is not None and valid_detector and
self.filt not in self.valid_filters[self.detector]):
msg = ('{} is not a valid filter for {}\n'
'Please choose one of the following:\n{}\n'
'{}'.format(self.filt, self.detector,
'\n'.join(self.valid_filters[self.detector]),
self._msg_div))
LOG.error(msg)
valid_filter = False
# Determine if the submitted date is valid
date_check = self._check_date()
if date_check is not None:
LOG.error('{}\n{}'.format(date_check, self._msg_div))
valid_date = False
if not valid_detector or not valid_filter or not valid_date:
return False
return True | [
"def",
"_check_inputs",
"(",
"self",
")",
":",
"valid_detector",
"=",
"True",
"valid_filter",
"=",
"True",
"valid_date",
"=",
"True",
"# Determine the submitted detector is valid",
"if",
"self",
".",
"detector",
"not",
"in",
"self",
".",
"_valid_detectors",
":",
"msg",
"=",
"(",
"'{} is not a valid detector option.\\n'",
"'Please choose one of the following:\\n{}\\n'",
"'{}'",
".",
"format",
"(",
"self",
".",
"detector",
",",
"'\\n'",
".",
"join",
"(",
"self",
".",
"_valid_detectors",
")",
",",
"self",
".",
"_msg_div",
")",
")",
"LOG",
".",
"error",
"(",
"msg",
")",
"valid_detector",
"=",
"False",
"# Determine if the submitted filter is valid",
"if",
"(",
"self",
".",
"filt",
"is",
"not",
"None",
"and",
"valid_detector",
"and",
"self",
".",
"filt",
"not",
"in",
"self",
".",
"valid_filters",
"[",
"self",
".",
"detector",
"]",
")",
":",
"msg",
"=",
"(",
"'{} is not a valid filter for {}\\n'",
"'Please choose one of the following:\\n{}\\n'",
"'{}'",
".",
"format",
"(",
"self",
".",
"filt",
",",
"self",
".",
"detector",
",",
"'\\n'",
".",
"join",
"(",
"self",
".",
"valid_filters",
"[",
"self",
".",
"detector",
"]",
")",
",",
"self",
".",
"_msg_div",
")",
")",
"LOG",
".",
"error",
"(",
"msg",
")",
"valid_filter",
"=",
"False",
"# Determine if the submitted date is valid",
"date_check",
"=",
"self",
".",
"_check_date",
"(",
")",
"if",
"date_check",
"is",
"not",
"None",
":",
"LOG",
".",
"error",
"(",
"'{}\\n{}'",
".",
"format",
"(",
"date_check",
",",
"self",
".",
"_msg_div",
")",
")",
"valid_date",
"=",
"False",
"if",
"not",
"valid_detector",
"or",
"not",
"valid_filter",
"or",
"not",
"valid_date",
":",
"return",
"False",
"return",
"True"
] | Check the inputs to ensure they are valid.
Returns
-------
status : bool
True if all inputs are valid, False if one is not. | [
"Check",
"the",
"inputs",
"to",
"ensure",
"they",
"are",
"valid",
"."
] | python | train |
productml/blurr | blurr/runner/spark_runner.py | https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/runner/spark_runner.py#L95-L115 | def get_record_rdd_from_json_files(self,
json_files: List[str],
data_processor: DataProcessor = SimpleJsonDataProcessor(),
spark_session: Optional['SparkSession'] = None) -> 'RDD':
"""
Reads the data from the given json_files path and converts them into the `Record`s format for
processing. `data_processor` is used to process the per event data in those files to convert
them into `Record`.
:param json_files: List of json file paths. Regular Spark path wildcards are accepted.
:param data_processor: `DataProcessor` to process each event in the json files.
:param spark_session: `SparkSession` to use for execution. If None is provided then a basic
`SparkSession` is created.
:return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in
`execute()`
"""
spark_context = get_spark_session(spark_session).sparkContext
raw_records: 'RDD' = spark_context.union(
[spark_context.textFile(file) for file in json_files])
return raw_records.mapPartitions(
lambda x: self.get_per_identity_records(x, data_processor)).groupByKey().mapValues(list) | [
"def",
"get_record_rdd_from_json_files",
"(",
"self",
",",
"json_files",
":",
"List",
"[",
"str",
"]",
",",
"data_processor",
":",
"DataProcessor",
"=",
"SimpleJsonDataProcessor",
"(",
")",
",",
"spark_session",
":",
"Optional",
"[",
"'SparkSession'",
"]",
"=",
"None",
")",
"->",
"'RDD'",
":",
"spark_context",
"=",
"get_spark_session",
"(",
"spark_session",
")",
".",
"sparkContext",
"raw_records",
":",
"'RDD'",
"=",
"spark_context",
".",
"union",
"(",
"[",
"spark_context",
".",
"textFile",
"(",
"file",
")",
"for",
"file",
"in",
"json_files",
"]",
")",
"return",
"raw_records",
".",
"mapPartitions",
"(",
"lambda",
"x",
":",
"self",
".",
"get_per_identity_records",
"(",
"x",
",",
"data_processor",
")",
")",
".",
"groupByKey",
"(",
")",
".",
"mapValues",
"(",
"list",
")"
] | Reads the data from the given json_files path and converts them into the `Record`s format for
processing. `data_processor` is used to process the per event data in those files to convert
them into `Record`.
:param json_files: List of json file paths. Regular Spark path wildcards are accepted.
:param data_processor: `DataProcessor` to process each event in the json files.
:param spark_session: `SparkSession` to use for execution. If None is provided then a basic
`SparkSession` is created.
:return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in
`execute()` | [
"Reads",
"the",
"data",
"from",
"the",
"given",
"json_files",
"path",
"and",
"converts",
"them",
"into",
"the",
"Record",
"s",
"format",
"for",
"processing",
".",
"data_processor",
"is",
"used",
"to",
"process",
"the",
"per",
"event",
"data",
"in",
"those",
"files",
"to",
"convert",
"them",
"into",
"Record",
"."
] | python | train |
frictionlessdata/datapackage-pipelines | datapackage_pipelines/web/server.py | https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/web/server.py#L292-L326 | def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color) | [
"def",
"badge_collection",
"(",
"pipeline_path",
")",
":",
"all_pipeline_ids",
"=",
"sorted",
"(",
"status",
".",
"all_pipeline_ids",
"(",
")",
")",
"if",
"not",
"pipeline_path",
".",
"startswith",
"(",
"'./'",
")",
":",
"pipeline_path",
"=",
"'./'",
"+",
"pipeline_path",
"# Filter pipeline ids to only include those that start with pipeline_path.",
"path_pipeline_ids",
"=",
"[",
"p",
"for",
"p",
"in",
"all_pipeline_ids",
"if",
"p",
".",
"startswith",
"(",
"pipeline_path",
")",
"]",
"statuses",
"=",
"[",
"]",
"for",
"pipeline_id",
"in",
"path_pipeline_ids",
":",
"pipeline_status",
"=",
"status",
".",
"get",
"(",
"pipeline_id",
")",
"if",
"pipeline_status",
"is",
"None",
":",
"abort",
"(",
"404",
")",
"status_text",
"=",
"pipeline_status",
".",
"state",
"(",
")",
".",
"lower",
"(",
")",
"statuses",
".",
"append",
"(",
"status_text",
")",
"status_color",
"=",
"'lightgray'",
"status_counter",
"=",
"Counter",
"(",
"statuses",
")",
"if",
"status_counter",
":",
"if",
"len",
"(",
"status_counter",
")",
"==",
"1",
"and",
"status_counter",
"[",
"'succeeded'",
"]",
">",
"0",
":",
"status_color",
"=",
"'brightgreen'",
"elif",
"status_counter",
"[",
"'failed'",
"]",
">",
"0",
":",
"status_color",
"=",
"'red'",
"elif",
"status_counter",
"[",
"'failed'",
"]",
"==",
"0",
":",
"status_color",
"=",
"'yellow'",
"status_text",
"=",
"', '",
".",
"join",
"(",
"[",
"'{} {}'",
".",
"format",
"(",
"v",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"status_counter",
".",
"items",
"(",
")",
"]",
")",
"else",
":",
"status_text",
"=",
"\"not found\"",
"return",
"_make_badge_response",
"(",
"'pipelines'",
",",
"status_text",
",",
"status_color",
")"
] | Status badge for a collection of pipelines. | [
"Status",
"badge",
"for",
"a",
"collection",
"of",
"pipelines",
"."
] | python | train |
mwarkentin/django-watchman | watchman/decorators.py | https://github.com/mwarkentin/django-watchman/blob/6ef98ba54dc52f27e7b42d42028b59dc67550268/watchman/decorators.py#L57-L111 | def token_required(view_func):
"""
Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized.
"""
def _parse_auth_header(auth_header):
"""
Parse the `Authorization` header
Expected format: `WATCHMAN-TOKEN Token="ABC123"`
"""
# TODO: Figure out full set of allowed characters
# http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
reg = re.compile('(\w+)[=] ?"?([\w-]+)"?')
header_dict = dict(reg.findall(auth_header))
return header_dict['Token']
def _get_passed_token(request):
"""
Try to get the passed token, starting with the header and fall back to `GET` param
"""
try:
auth_header = request.META['HTTP_AUTHORIZATION']
token = _parse_auth_header(auth_header)
except KeyError:
token = request.GET.get(settings.WATCHMAN_TOKEN_NAME)
return token
def _validate_token(request):
if settings.WATCHMAN_TOKENS:
watchman_tokens = settings.WATCHMAN_TOKENS.split(',')
elif settings.WATCHMAN_TOKEN:
watchman_tokens = [settings.WATCHMAN_TOKEN, ]
else:
return True
return _get_passed_token(request) in watchman_tokens
@csrf_exempt
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if _validate_token(request):
return view_func(request, *args, **kwargs)
return HttpResponseForbidden()
return _wrapped_view | [
"def",
"token_required",
"(",
"view_func",
")",
":",
"def",
"_parse_auth_header",
"(",
"auth_header",
")",
":",
"\"\"\"\n Parse the `Authorization` header\n\n Expected format: `WATCHMAN-TOKEN Token=\"ABC123\"`\n \"\"\"",
"# TODO: Figure out full set of allowed characters",
"# http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers",
"# https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2",
"# https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2",
"reg",
"=",
"re",
".",
"compile",
"(",
"'(\\w+)[=] ?\"?([\\w-]+)\"?'",
")",
"header_dict",
"=",
"dict",
"(",
"reg",
".",
"findall",
"(",
"auth_header",
")",
")",
"return",
"header_dict",
"[",
"'Token'",
"]",
"def",
"_get_passed_token",
"(",
"request",
")",
":",
"\"\"\"\n Try to get the passed token, starting with the header and fall back to `GET` param\n \"\"\"",
"try",
":",
"auth_header",
"=",
"request",
".",
"META",
"[",
"'HTTP_AUTHORIZATION'",
"]",
"token",
"=",
"_parse_auth_header",
"(",
"auth_header",
")",
"except",
"KeyError",
":",
"token",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"settings",
".",
"WATCHMAN_TOKEN_NAME",
")",
"return",
"token",
"def",
"_validate_token",
"(",
"request",
")",
":",
"if",
"settings",
".",
"WATCHMAN_TOKENS",
":",
"watchman_tokens",
"=",
"settings",
".",
"WATCHMAN_TOKENS",
".",
"split",
"(",
"','",
")",
"elif",
"settings",
".",
"WATCHMAN_TOKEN",
":",
"watchman_tokens",
"=",
"[",
"settings",
".",
"WATCHMAN_TOKEN",
",",
"]",
"else",
":",
"return",
"True",
"return",
"_get_passed_token",
"(",
"request",
")",
"in",
"watchman_tokens",
"@",
"csrf_exempt",
"@",
"wraps",
"(",
"view_func",
")",
"def",
"_wrapped_view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"_validate_token",
"(",
"request",
")",
":",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"HttpResponseForbidden",
"(",
")",
"return",
"_wrapped_view"
] | Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized. | [
"Decorator",
"which",
"ensures",
"that",
"one",
"of",
"the",
"WATCHMAN_TOKENS",
"is",
"provided",
"if",
"set",
"."
] | python | test |
DataBiosphere/dsub | dsub/lib/dsub_util.py | https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/lib/dsub_util.py#L233-L246 | def file_exists(file_path, credentials=None):
"""Check whether the file exists, on local disk or GCS.
Args:
file_path: The target file path; should have the 'gs://' prefix if in gcs.
credentials: Optional credential to be used to load the file from gcs.
Returns:
True if the file's there.
"""
if file_path.startswith('gs://'):
return _file_exists_in_gcs(file_path, credentials)
else:
return os.path.isfile(file_path) | [
"def",
"file_exists",
"(",
"file_path",
",",
"credentials",
"=",
"None",
")",
":",
"if",
"file_path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"return",
"_file_exists_in_gcs",
"(",
"file_path",
",",
"credentials",
")",
"else",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")"
] | Check whether the file exists, on local disk or GCS.
Args:
file_path: The target file path; should have the 'gs://' prefix if in gcs.
credentials: Optional credential to be used to load the file from gcs.
Returns:
True if the file's there. | [
"Check",
"whether",
"the",
"file",
"exists",
"on",
"local",
"disk",
"or",
"GCS",
"."
] | python | valid |
sci-bots/pygtkhelpers | pygtkhelpers/ui/extra_dialogs.py | https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/extra_dialogs.py#L27-L39 | def combobox_set_model_from_list(cb, items):
"""Setup a ComboBox or ComboBoxEntry based on a list of strings."""
cb.clear()
model = gtk.ListStore(str)
for i in items:
model.append([i])
cb.set_model(model)
if type(cb) == gtk.ComboBoxEntry:
cb.set_text_column(0)
elif type(cb) == gtk.ComboBox:
cell = gtk.CellRendererText()
cb.pack_start(cell, True)
cb.add_attribute(cell, 'text', 0) | [
"def",
"combobox_set_model_from_list",
"(",
"cb",
",",
"items",
")",
":",
"cb",
".",
"clear",
"(",
")",
"model",
"=",
"gtk",
".",
"ListStore",
"(",
"str",
")",
"for",
"i",
"in",
"items",
":",
"model",
".",
"append",
"(",
"[",
"i",
"]",
")",
"cb",
".",
"set_model",
"(",
"model",
")",
"if",
"type",
"(",
"cb",
")",
"==",
"gtk",
".",
"ComboBoxEntry",
":",
"cb",
".",
"set_text_column",
"(",
"0",
")",
"elif",
"type",
"(",
"cb",
")",
"==",
"gtk",
".",
"ComboBox",
":",
"cell",
"=",
"gtk",
".",
"CellRendererText",
"(",
")",
"cb",
".",
"pack_start",
"(",
"cell",
",",
"True",
")",
"cb",
".",
"add_attribute",
"(",
"cell",
",",
"'text'",
",",
"0",
")"
] | Setup a ComboBox or ComboBoxEntry based on a list of strings. | [
"Setup",
"a",
"ComboBox",
"or",
"ComboBoxEntry",
"based",
"on",
"a",
"list",
"of",
"strings",
"."
] | python | train |
pyopenapi/pyswagger | pyswagger/spec/base.py | https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/spec/base.py#L246-L257 | def update_field(self, f, obj):
""" update a field
:param str f: name of field to be updated.
:param obj: value of field to be updated.
"""
n = self.get_private_name(f)
if not hasattr(self, n):
raise AttributeError('{0} is not in {1}'.format(n, self.__class__.__name__))
setattr(self, n, obj)
self.__origin_keys.add(f) | [
"def",
"update_field",
"(",
"self",
",",
"f",
",",
"obj",
")",
":",
"n",
"=",
"self",
".",
"get_private_name",
"(",
"f",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"n",
")",
":",
"raise",
"AttributeError",
"(",
"'{0} is not in {1}'",
".",
"format",
"(",
"n",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"setattr",
"(",
"self",
",",
"n",
",",
"obj",
")",
"self",
".",
"__origin_keys",
".",
"add",
"(",
"f",
")"
] | update a field
:param str f: name of field to be updated.
:param obj: value of field to be updated. | [
"update",
"a",
"field"
] | python | train |
polysquare/polysquare-setuptools-lint | polysquare_setuptools_lint/__init__.py | https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L161-L166 | def _run_flake8(filename, stamp_file_name, show_lint_files):
"""Run flake8, cached by stamp_file_name."""
_debug_linter_status("flake8", filename, show_lint_files)
return _stamped_deps(stamp_file_name,
_run_flake8_internal,
filename) | [
"def",
"_run_flake8",
"(",
"filename",
",",
"stamp_file_name",
",",
"show_lint_files",
")",
":",
"_debug_linter_status",
"(",
"\"flake8\"",
",",
"filename",
",",
"show_lint_files",
")",
"return",
"_stamped_deps",
"(",
"stamp_file_name",
",",
"_run_flake8_internal",
",",
"filename",
")"
] | Run flake8, cached by stamp_file_name. | [
"Run",
"flake8",
"cached",
"by",
"stamp_file_name",
"."
] | python | train |
cltk/cltk | cltk/prosody/old_norse/verse.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/old_norse/verse.py#L293-L314 | def to_phonetics(self):
"""
Transcribing words in verse helps find alliteration.
"""
if len(self.long_lines) == 0:
logger.error("No text was imported")
self.syllabified_text = []
else:
transcriber = Transcriber(DIPHTHONGS_IPA, DIPHTHONGS_IPA_class, IPA_class, old_norse_rules)
transcribed_text = []
phonological_features_text = []
for i, long_line in enumerate(self.long_lines):
transcribed_text.append([])
phonological_features_text.append([])
for short_line in long_line:
assert isinstance(short_line, ShortLine) or isinstance(short_line, LongLine)
short_line.to_phonetics(transcriber)
transcribed_text[i].append(short_line.transcribed)
phonological_features_text[i].append(short_line.phonological_features_text)
self.transcribed_text = transcribed_text
self.phonological_features_text = phonological_features_text | [
"def",
"to_phonetics",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"long_lines",
")",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"\"No text was imported\"",
")",
"self",
".",
"syllabified_text",
"=",
"[",
"]",
"else",
":",
"transcriber",
"=",
"Transcriber",
"(",
"DIPHTHONGS_IPA",
",",
"DIPHTHONGS_IPA_class",
",",
"IPA_class",
",",
"old_norse_rules",
")",
"transcribed_text",
"=",
"[",
"]",
"phonological_features_text",
"=",
"[",
"]",
"for",
"i",
",",
"long_line",
"in",
"enumerate",
"(",
"self",
".",
"long_lines",
")",
":",
"transcribed_text",
".",
"append",
"(",
"[",
"]",
")",
"phonological_features_text",
".",
"append",
"(",
"[",
"]",
")",
"for",
"short_line",
"in",
"long_line",
":",
"assert",
"isinstance",
"(",
"short_line",
",",
"ShortLine",
")",
"or",
"isinstance",
"(",
"short_line",
",",
"LongLine",
")",
"short_line",
".",
"to_phonetics",
"(",
"transcriber",
")",
"transcribed_text",
"[",
"i",
"]",
".",
"append",
"(",
"short_line",
".",
"transcribed",
")",
"phonological_features_text",
"[",
"i",
"]",
".",
"append",
"(",
"short_line",
".",
"phonological_features_text",
")",
"self",
".",
"transcribed_text",
"=",
"transcribed_text",
"self",
".",
"phonological_features_text",
"=",
"phonological_features_text"
] | Transcribing words in verse helps find alliteration. | [
"Transcribing",
"words",
"in",
"verse",
"helps",
"find",
"alliteration",
"."
] | python | train |
has2k1/plotnine | plotnine/guides/guide_colorbar.py | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L295-L321 | def add_segmented_colorbar(da, colors, direction):
"""
Add 'non-rastered' colorbar to DrawingArea
"""
nbreak = len(colors)
if direction == 'vertical':
linewidth = da.height/nbreak
verts = [None] * nbreak
x1, x2 = 0, da.width
for i, color in enumerate(colors):
y1 = i * linewidth
y2 = y1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
linewidth = da.width/nbreak
verts = [None] * nbreak
y1, y2 = 0, da.height
for i, color in enumerate(colors):
x1 = i * linewidth
x2 = x1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
coll = mcoll.PolyCollection(verts,
facecolors=colors,
linewidth=0,
antialiased=False)
da.add_artist(coll) | [
"def",
"add_segmented_colorbar",
"(",
"da",
",",
"colors",
",",
"direction",
")",
":",
"nbreak",
"=",
"len",
"(",
"colors",
")",
"if",
"direction",
"==",
"'vertical'",
":",
"linewidth",
"=",
"da",
".",
"height",
"/",
"nbreak",
"verts",
"=",
"[",
"None",
"]",
"*",
"nbreak",
"x1",
",",
"x2",
"=",
"0",
",",
"da",
".",
"width",
"for",
"i",
",",
"color",
"in",
"enumerate",
"(",
"colors",
")",
":",
"y1",
"=",
"i",
"*",
"linewidth",
"y2",
"=",
"y1",
"+",
"linewidth",
"verts",
"[",
"i",
"]",
"=",
"(",
"(",
"x1",
",",
"y1",
")",
",",
"(",
"x1",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y1",
")",
")",
"else",
":",
"linewidth",
"=",
"da",
".",
"width",
"/",
"nbreak",
"verts",
"=",
"[",
"None",
"]",
"*",
"nbreak",
"y1",
",",
"y2",
"=",
"0",
",",
"da",
".",
"height",
"for",
"i",
",",
"color",
"in",
"enumerate",
"(",
"colors",
")",
":",
"x1",
"=",
"i",
"*",
"linewidth",
"x2",
"=",
"x1",
"+",
"linewidth",
"verts",
"[",
"i",
"]",
"=",
"(",
"(",
"x1",
",",
"y1",
")",
",",
"(",
"x1",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y1",
")",
")",
"coll",
"=",
"mcoll",
".",
"PolyCollection",
"(",
"verts",
",",
"facecolors",
"=",
"colors",
",",
"linewidth",
"=",
"0",
",",
"antialiased",
"=",
"False",
")",
"da",
".",
"add_artist",
"(",
"coll",
")"
] | Add 'non-rastered' colorbar to DrawingArea | [
"Add",
"non",
"-",
"rastered",
"colorbar",
"to",
"DrawingArea"
] | python | train |
psss/did | did/utils.py | https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/utils.py#L382-L387 | def enabled(self):
""" True if coloring is currently enabled """
# In auto-detection mode color enabled when terminal attached
if self._mode == COLOR_AUTO:
return sys.stdout.isatty()
return self._mode == COLOR_ON | [
"def",
"enabled",
"(",
"self",
")",
":",
"# In auto-detection mode color enabled when terminal attached",
"if",
"self",
".",
"_mode",
"==",
"COLOR_AUTO",
":",
"return",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
"return",
"self",
".",
"_mode",
"==",
"COLOR_ON"
] | True if coloring is currently enabled | [
"True",
"if",
"coloring",
"is",
"currently",
"enabled"
] | python | train |
ellmetha/django-machina | machina/apps/forum_conversation/views.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/views.py#L756-L776 | def get_success_url(self):
""" Returns the URL to redirect the user to upon valid form processing. """
messages.success(self.request, self.success_message)
if self.object.is_topic_head and self.object.is_topic_tail:
return reverse(
'forum:forum',
kwargs={
'slug': self.object.topic.forum.slug, 'pk': self.object.topic.forum.pk,
},
)
return reverse(
'forum_conversation:topic',
kwargs={
'forum_slug': self.object.topic.forum.slug,
'forum_pk': self.object.topic.forum.pk,
'slug': self.object.topic.slug,
'pk': self.object.topic.pk,
},
) | [
"def",
"get_success_url",
"(",
"self",
")",
":",
"messages",
".",
"success",
"(",
"self",
".",
"request",
",",
"self",
".",
"success_message",
")",
"if",
"self",
".",
"object",
".",
"is_topic_head",
"and",
"self",
".",
"object",
".",
"is_topic_tail",
":",
"return",
"reverse",
"(",
"'forum:forum'",
",",
"kwargs",
"=",
"{",
"'slug'",
":",
"self",
".",
"object",
".",
"topic",
".",
"forum",
".",
"slug",
",",
"'pk'",
":",
"self",
".",
"object",
".",
"topic",
".",
"forum",
".",
"pk",
",",
"}",
",",
")",
"return",
"reverse",
"(",
"'forum_conversation:topic'",
",",
"kwargs",
"=",
"{",
"'forum_slug'",
":",
"self",
".",
"object",
".",
"topic",
".",
"forum",
".",
"slug",
",",
"'forum_pk'",
":",
"self",
".",
"object",
".",
"topic",
".",
"forum",
".",
"pk",
",",
"'slug'",
":",
"self",
".",
"object",
".",
"topic",
".",
"slug",
",",
"'pk'",
":",
"self",
".",
"object",
".",
"topic",
".",
"pk",
",",
"}",
",",
")"
] | Returns the URL to redirect the user to upon valid form processing. | [
"Returns",
"the",
"URL",
"to",
"redirect",
"the",
"user",
"to",
"upon",
"valid",
"form",
"processing",
"."
] | python | train |
emory-libraries/eulxml | eulxml/xmlmap/cerp.py | https://github.com/emory-libraries/eulxml/blob/17d71c7d98c0cebda9932b7f13e72093805e1fe2/eulxml/xmlmap/cerp.py#L219-L314 | def from_email_message(cls, message, local_id=None):
'''
Convert an :class:`email.message.Message` or compatible message
object into a CERP XML :class:`eulxml.xmlmap.cerp.Message`. If an
id is specified, it will be stored in the Message <LocalId>.
:param message: `email.message.Message` object
:param id: optional message id to be set as `local_id`
:returns: :class:`eulxml.xmlmap.cerp.Message` instance populated
with message information
'''
result = cls()
if local_id is not None:
result.local_id = id
message_id = message.get('Message-Id')
if message_id:
result.message_id_supplied = True
result.message_id = message_id
result.mime_version = message.get('MIME-Version')
dates = message.get_all('Date', [])
result.orig_date_list.extend([parse_mail_date(d) for d in dates])
result.from_list.extend(message.get_all('From', []))
result.sender_list.extend(message.get_all('From', []))
try:
result.to_list.extend(message.get_all('To', []))
except UnicodeError:
print(repr(message['To']))
raise
result.cc_list.extend(message.get_all('Cc', []))
result.bcc_list.extend(message.get_all('Bcc', []))
result.in_reply_to_list.extend(message.get_all('In-Reply-To', []))
result.references_list.extend(message.get_all('References', []))
result.subject_list.extend(message.get_all('Subject', []))
result.comments_list.extend(message.get_all('Comments', []))
result.keywords_list.extend(message.get_all('Keywords', []))
headers = [ Header(name=key, value=val) for key, val in message.items() ]
result.headers.extend(headers)
# FIXME: skip multipart messages for now
if not message.is_multipart():
result.create_single_body()
# FIXME: this is a small subset of the actual elements CERP allows.
# we should add the rest of them, too.
# message.get_content_type() always returns something. only
# put it in the CERP if a Content-Type was explicitly specified.
if message['Content-Type']:
result.single_body.content_type_list.append(message.get_content_type())
if message.get_content_charset():
result.single_body.charset_list.append(message.get_content_charset())
if message.get_filename():
result.single_body.content_name_list.append(message.get_filename())
# FIXME: attaching the body_content only makes sense for text
# content types. we'll eventually need a better solution for
# non-text messages
result.single_body.create_body_content()
payload = message.get_payload(decode=False)
# if not unicode, attempt to convert
if isinstance(payload, six.binary_type):
charset = message.get_charset()
# decode according to the specified character set, if any
if charset is not None:
charset_decoder = codecs.getdecoder(str(charset))
payload, length = charset_decoder(payload)
# otherwise, just try to convert
else:
payload = u(payload)
# remove any control characters not allowed in XML
control_char_map = dict.fromkeys(range(32))
for i in [9, 10, 13]: # preserve horizontal tab, line feed, carriage return
del control_char_map[i]
payload = u(payload).translate(control_char_map)
result.single_body.body_content.content = payload
else:
# TODO: handle multipart
logger.warn('CERP conversion does not yet handle multipart')
# assume we've normalized newlines:
result.eol = EOLMAP[os.linesep]
return result | [
"def",
"from_email_message",
"(",
"cls",
",",
"message",
",",
"local_id",
"=",
"None",
")",
":",
"result",
"=",
"cls",
"(",
")",
"if",
"local_id",
"is",
"not",
"None",
":",
"result",
".",
"local_id",
"=",
"id",
"message_id",
"=",
"message",
".",
"get",
"(",
"'Message-Id'",
")",
"if",
"message_id",
":",
"result",
".",
"message_id_supplied",
"=",
"True",
"result",
".",
"message_id",
"=",
"message_id",
"result",
".",
"mime_version",
"=",
"message",
".",
"get",
"(",
"'MIME-Version'",
")",
"dates",
"=",
"message",
".",
"get_all",
"(",
"'Date'",
",",
"[",
"]",
")",
"result",
".",
"orig_date_list",
".",
"extend",
"(",
"[",
"parse_mail_date",
"(",
"d",
")",
"for",
"d",
"in",
"dates",
"]",
")",
"result",
".",
"from_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'From'",
",",
"[",
"]",
")",
")",
"result",
".",
"sender_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'From'",
",",
"[",
"]",
")",
")",
"try",
":",
"result",
".",
"to_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'To'",
",",
"[",
"]",
")",
")",
"except",
"UnicodeError",
":",
"print",
"(",
"repr",
"(",
"message",
"[",
"'To'",
"]",
")",
")",
"raise",
"result",
".",
"cc_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'Cc'",
",",
"[",
"]",
")",
")",
"result",
".",
"bcc_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'Bcc'",
",",
"[",
"]",
")",
")",
"result",
".",
"in_reply_to_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'In-Reply-To'",
",",
"[",
"]",
")",
")",
"result",
".",
"references_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'References'",
",",
"[",
"]",
")",
")",
"result",
".",
"subject_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'Subject'",
",",
"[",
"]",
")",
")",
"result",
".",
"comments_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'Comments'",
",",
"[",
"]",
")",
")",
"result",
".",
"keywords_list",
".",
"extend",
"(",
"message",
".",
"get_all",
"(",
"'Keywords'",
",",
"[",
"]",
")",
")",
"headers",
"=",
"[",
"Header",
"(",
"name",
"=",
"key",
",",
"value",
"=",
"val",
")",
"for",
"key",
",",
"val",
"in",
"message",
".",
"items",
"(",
")",
"]",
"result",
".",
"headers",
".",
"extend",
"(",
"headers",
")",
"# FIXME: skip multipart messages for now",
"if",
"not",
"message",
".",
"is_multipart",
"(",
")",
":",
"result",
".",
"create_single_body",
"(",
")",
"# FIXME: this is a small subset of the actual elements CERP allows.",
"# we should add the rest of them, too.",
"# message.get_content_type() always returns something. only",
"# put it in the CERP if a Content-Type was explicitly specified.",
"if",
"message",
"[",
"'Content-Type'",
"]",
":",
"result",
".",
"single_body",
".",
"content_type_list",
".",
"append",
"(",
"message",
".",
"get_content_type",
"(",
")",
")",
"if",
"message",
".",
"get_content_charset",
"(",
")",
":",
"result",
".",
"single_body",
".",
"charset_list",
".",
"append",
"(",
"message",
".",
"get_content_charset",
"(",
")",
")",
"if",
"message",
".",
"get_filename",
"(",
")",
":",
"result",
".",
"single_body",
".",
"content_name_list",
".",
"append",
"(",
"message",
".",
"get_filename",
"(",
")",
")",
"# FIXME: attaching the body_content only makes sense for text",
"# content types. we'll eventually need a better solution for",
"# non-text messages",
"result",
".",
"single_body",
".",
"create_body_content",
"(",
")",
"payload",
"=",
"message",
".",
"get_payload",
"(",
"decode",
"=",
"False",
")",
"# if not unicode, attempt to convert",
"if",
"isinstance",
"(",
"payload",
",",
"six",
".",
"binary_type",
")",
":",
"charset",
"=",
"message",
".",
"get_charset",
"(",
")",
"# decode according to the specified character set, if any",
"if",
"charset",
"is",
"not",
"None",
":",
"charset_decoder",
"=",
"codecs",
".",
"getdecoder",
"(",
"str",
"(",
"charset",
")",
")",
"payload",
",",
"length",
"=",
"charset_decoder",
"(",
"payload",
")",
"# otherwise, just try to convert",
"else",
":",
"payload",
"=",
"u",
"(",
"payload",
")",
"# remove any control characters not allowed in XML",
"control_char_map",
"=",
"dict",
".",
"fromkeys",
"(",
"range",
"(",
"32",
")",
")",
"for",
"i",
"in",
"[",
"9",
",",
"10",
",",
"13",
"]",
":",
"# preserve horizontal tab, line feed, carriage return",
"del",
"control_char_map",
"[",
"i",
"]",
"payload",
"=",
"u",
"(",
"payload",
")",
".",
"translate",
"(",
"control_char_map",
")",
"result",
".",
"single_body",
".",
"body_content",
".",
"content",
"=",
"payload",
"else",
":",
"# TODO: handle multipart",
"logger",
".",
"warn",
"(",
"'CERP conversion does not yet handle multipart'",
")",
"# assume we've normalized newlines:",
"result",
".",
"eol",
"=",
"EOLMAP",
"[",
"os",
".",
"linesep",
"]",
"return",
"result"
] | Convert an :class:`email.message.Message` or compatible message
object into a CERP XML :class:`eulxml.xmlmap.cerp.Message`. If an
id is specified, it will be stored in the Message <LocalId>.
:param message: `email.message.Message` object
:param id: optional message id to be set as `local_id`
:returns: :class:`eulxml.xmlmap.cerp.Message` instance populated
with message information | [
"Convert",
"an",
":",
"class",
":",
"email",
".",
"message",
".",
"Message",
"or",
"compatible",
"message",
"object",
"into",
"a",
"CERP",
"XML",
":",
"class",
":",
"eulxml",
".",
"xmlmap",
".",
"cerp",
".",
"Message",
".",
"If",
"an",
"id",
"is",
"specified",
"it",
"will",
"be",
"stored",
"in",
"the",
"Message",
"<LocalId",
">",
"."
] | python | train |
jupyterhub/jupyter-server-proxy | jupyter_server_proxy/handlers.py | https://github.com/jupyterhub/jupyter-server-proxy/blob/f12a090babe3c6e37a777b7e54c7b415de5c7e18/jupyter_server_proxy/handlers.py#L343-L377 | async def ensure_process(self):
"""
Start the process
"""
# We don't want multiple requests trying to start the process at the same time
# FIXME: Make sure this times out properly?
# Invariant here should be: when lock isn't being held, either 'proc' is in state &
# running, or not.
with (await self.state['proc_lock']):
if 'proc' not in self.state:
# FIXME: Prevent races here
# FIXME: Handle graceful exits of spawned processes here
cmd = self.get_cmd()
server_env = os.environ.copy()
# Set up extra environment variables for process
server_env.update(self.get_env())
timeout = self.get_timeout()
proc = SupervisedProcess(self.name, *cmd, env=server_env, ready_func=self._http_ready_func, ready_timeout=timeout, log=self.log)
self.state['proc'] = proc
try:
await proc.start()
is_ready = await proc.ready()
if not is_ready:
await proc.kill()
raise web.HTTPError(500, 'could not start {} in time'.format(self.name))
except:
# Make sure we remove proc from state in any error condition
del self.state['proc']
raise | [
"async",
"def",
"ensure_process",
"(",
"self",
")",
":",
"# We don't want multiple requests trying to start the process at the same time",
"# FIXME: Make sure this times out properly?",
"# Invariant here should be: when lock isn't being held, either 'proc' is in state &",
"# running, or not.",
"with",
"(",
"await",
"self",
".",
"state",
"[",
"'proc_lock'",
"]",
")",
":",
"if",
"'proc'",
"not",
"in",
"self",
".",
"state",
":",
"# FIXME: Prevent races here",
"# FIXME: Handle graceful exits of spawned processes here",
"cmd",
"=",
"self",
".",
"get_cmd",
"(",
")",
"server_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"# Set up extra environment variables for process",
"server_env",
".",
"update",
"(",
"self",
".",
"get_env",
"(",
")",
")",
"timeout",
"=",
"self",
".",
"get_timeout",
"(",
")",
"proc",
"=",
"SupervisedProcess",
"(",
"self",
".",
"name",
",",
"*",
"cmd",
",",
"env",
"=",
"server_env",
",",
"ready_func",
"=",
"self",
".",
"_http_ready_func",
",",
"ready_timeout",
"=",
"timeout",
",",
"log",
"=",
"self",
".",
"log",
")",
"self",
".",
"state",
"[",
"'proc'",
"]",
"=",
"proc",
"try",
":",
"await",
"proc",
".",
"start",
"(",
")",
"is_ready",
"=",
"await",
"proc",
".",
"ready",
"(",
")",
"if",
"not",
"is_ready",
":",
"await",
"proc",
".",
"kill",
"(",
")",
"raise",
"web",
".",
"HTTPError",
"(",
"500",
",",
"'could not start {} in time'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"except",
":",
"# Make sure we remove proc from state in any error condition",
"del",
"self",
".",
"state",
"[",
"'proc'",
"]",
"raise"
] | Start the process | [
"Start",
"the",
"process"
] | python | train |
BerkeleyAutomation/perception | perception/kinect2_sensor.py | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/kinect2_sensor.py#L279-L339 | def _frames_and_index_map(self, skip_registration=False):
"""Retrieve a new frame from the Kinect and return a ColorImage,
DepthImage, IrImage, and a map from depth pixels to color pixel indices.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame, and an
ndarray that maps pixels of the depth image to the index of the
corresponding pixel in the color image.
Raises
------
RuntimeError
If the Kinect stream is not running.
"""
if not self._running:
raise RuntimeError('Kinect2 device %s not runnning. Cannot read frames' %(self._device_num))
# read frames
frames = self._listener.waitForNewFrame()
unregistered_color = frames['color']
distorted_depth = frames['depth']
ir = frames['ir']
# apply color to depth registration
color_frame = self._color_frame
color = unregistered_color
depth = distorted_depth
color_depth_map = np.zeros([depth.height, depth.width]).astype(np.int32).ravel()
if not skip_registration and self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:
color_frame = self._ir_frame
depth = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Depth)
color = lf2.Frame(depth.width, depth.height, 4, lf2.FrameType.Color)
self._registration.apply(unregistered_color, distorted_depth, depth, color, color_depth_map=color_depth_map)
# convert to array (copy needed to prevent reference of deleted data
color_arr = copy.copy(color.asarray())
color_arr[:,:,[0,2]] = color_arr[:,:,[2,0]] # convert BGR to RGB
color_arr[:,:,0] = np.fliplr(color_arr[:,:,0])
color_arr[:,:,1] = np.fliplr(color_arr[:,:,1])
color_arr[:,:,2] = np.fliplr(color_arr[:,:,2])
color_arr[:,:,3] = np.fliplr(color_arr[:,:,3])
depth_arr = np.fliplr(copy.copy(depth.asarray()))
ir_arr = np.fliplr(copy.copy(ir.asarray()))
# convert meters
if self._depth_mode == Kinect2DepthMode.METERS:
depth_arr = depth_arr * MM_TO_METERS
# Release and return
self._listener.release(frames)
return (ColorImage(color_arr[:,:,:3], color_frame),
DepthImage(depth_arr, self._ir_frame),
IrImage(ir_arr.astype(np.uint16), self._ir_frame),
color_depth_map) | [
"def",
"_frames_and_index_map",
"(",
"self",
",",
"skip_registration",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_running",
":",
"raise",
"RuntimeError",
"(",
"'Kinect2 device %s not runnning. Cannot read frames'",
"%",
"(",
"self",
".",
"_device_num",
")",
")",
"# read frames",
"frames",
"=",
"self",
".",
"_listener",
".",
"waitForNewFrame",
"(",
")",
"unregistered_color",
"=",
"frames",
"[",
"'color'",
"]",
"distorted_depth",
"=",
"frames",
"[",
"'depth'",
"]",
"ir",
"=",
"frames",
"[",
"'ir'",
"]",
"# apply color to depth registration",
"color_frame",
"=",
"self",
".",
"_color_frame",
"color",
"=",
"unregistered_color",
"depth",
"=",
"distorted_depth",
"color_depth_map",
"=",
"np",
".",
"zeros",
"(",
"[",
"depth",
".",
"height",
",",
"depth",
".",
"width",
"]",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
".",
"ravel",
"(",
")",
"if",
"not",
"skip_registration",
"and",
"self",
".",
"_registration_mode",
"==",
"Kinect2RegistrationMode",
".",
"COLOR_TO_DEPTH",
":",
"color_frame",
"=",
"self",
".",
"_ir_frame",
"depth",
"=",
"lf2",
".",
"Frame",
"(",
"depth",
".",
"width",
",",
"depth",
".",
"height",
",",
"4",
",",
"lf2",
".",
"FrameType",
".",
"Depth",
")",
"color",
"=",
"lf2",
".",
"Frame",
"(",
"depth",
".",
"width",
",",
"depth",
".",
"height",
",",
"4",
",",
"lf2",
".",
"FrameType",
".",
"Color",
")",
"self",
".",
"_registration",
".",
"apply",
"(",
"unregistered_color",
",",
"distorted_depth",
",",
"depth",
",",
"color",
",",
"color_depth_map",
"=",
"color_depth_map",
")",
"# convert to array (copy needed to prevent reference of deleted data",
"color_arr",
"=",
"copy",
".",
"copy",
"(",
"color",
".",
"asarray",
"(",
")",
")",
"color_arr",
"[",
":",
",",
":",
",",
"[",
"0",
",",
"2",
"]",
"]",
"=",
"color_arr",
"[",
":",
",",
":",
",",
"[",
"2",
",",
"0",
"]",
"]",
"# convert BGR to RGB",
"color_arr",
"[",
":",
",",
":",
",",
"0",
"]",
"=",
"np",
".",
"fliplr",
"(",
"color_arr",
"[",
":",
",",
":",
",",
"0",
"]",
")",
"color_arr",
"[",
":",
",",
":",
",",
"1",
"]",
"=",
"np",
".",
"fliplr",
"(",
"color_arr",
"[",
":",
",",
":",
",",
"1",
"]",
")",
"color_arr",
"[",
":",
",",
":",
",",
"2",
"]",
"=",
"np",
".",
"fliplr",
"(",
"color_arr",
"[",
":",
",",
":",
",",
"2",
"]",
")",
"color_arr",
"[",
":",
",",
":",
",",
"3",
"]",
"=",
"np",
".",
"fliplr",
"(",
"color_arr",
"[",
":",
",",
":",
",",
"3",
"]",
")",
"depth_arr",
"=",
"np",
".",
"fliplr",
"(",
"copy",
".",
"copy",
"(",
"depth",
".",
"asarray",
"(",
")",
")",
")",
"ir_arr",
"=",
"np",
".",
"fliplr",
"(",
"copy",
".",
"copy",
"(",
"ir",
".",
"asarray",
"(",
")",
")",
")",
"# convert meters",
"if",
"self",
".",
"_depth_mode",
"==",
"Kinect2DepthMode",
".",
"METERS",
":",
"depth_arr",
"=",
"depth_arr",
"*",
"MM_TO_METERS",
"# Release and return",
"self",
".",
"_listener",
".",
"release",
"(",
"frames",
")",
"return",
"(",
"ColorImage",
"(",
"color_arr",
"[",
":",
",",
":",
",",
":",
"3",
"]",
",",
"color_frame",
")",
",",
"DepthImage",
"(",
"depth_arr",
",",
"self",
".",
"_ir_frame",
")",
",",
"IrImage",
"(",
"ir_arr",
".",
"astype",
"(",
"np",
".",
"uint16",
")",
",",
"self",
".",
"_ir_frame",
")",
",",
"color_depth_map",
")"
] | Retrieve a new frame from the Kinect and return a ColorImage,
DepthImage, IrImage, and a map from depth pixels to color pixel indices.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame, and an
ndarray that maps pixels of the depth image to the index of the
corresponding pixel in the color image.
Raises
------
RuntimeError
If the Kinect stream is not running. | [
"Retrieve",
"a",
"new",
"frame",
"from",
"the",
"Kinect",
"and",
"return",
"a",
"ColorImage",
"DepthImage",
"IrImage",
"and",
"a",
"map",
"from",
"depth",
"pixels",
"to",
"color",
"pixel",
"indices",
"."
] | python | train |
Josef-Friedrich/phrydy | phrydy/mediafile.py | https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L2189-L2207 | def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
size = os.path.getsize(self.path)
return int(size * 8 / self.length) | [
"def",
"bitrate",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"mgfile",
".",
"info",
",",
"'bitrate'",
")",
"and",
"self",
".",
"mgfile",
".",
"info",
".",
"bitrate",
":",
"# Many formats provide it explicitly.",
"return",
"self",
".",
"mgfile",
".",
"info",
".",
"bitrate",
"else",
":",
"# Otherwise, we calculate bitrate from the file size. (This",
"# is the case for all of the lossless formats.)",
"if",
"not",
"self",
".",
"length",
":",
"# Avoid division by zero if length is not available.",
"return",
"0",
"size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"path",
")",
"return",
"int",
"(",
"size",
"*",
"8",
"/",
"self",
".",
"length",
")"
] | The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size. | [
"The",
"number",
"of",
"bits",
"per",
"seconds",
"used",
"in",
"the",
"audio",
"coding",
"(",
"an",
"int",
")",
".",
"If",
"this",
"is",
"provided",
"explicitly",
"by",
"the",
"compressed",
"file",
"format",
"this",
"is",
"a",
"precise",
"reflection",
"of",
"the",
"encoding",
".",
"Otherwise",
"it",
"is",
"estimated",
"from",
"the",
"on",
"-",
"disk",
"file",
"size",
".",
"In",
"this",
"case",
"some",
"imprecision",
"is",
"possible",
"because",
"the",
"file",
"header",
"is",
"incorporated",
"in",
"the",
"file",
"size",
"."
] | python | train |
omza/azurestoragewrap | azurestoragewrap/queue.py | https://github.com/omza/azurestoragewrap/blob/976878e95d82ff0f7d8a00a5e4a7a3fb6268ab08/azurestoragewrap/queue.py#L62-L73 | def getmessage(self) -> str:
""" parse self into unicode string as message content """
image = {}
for key, default in vars(self.__class__).items():
if not key.startswith('_') and key !='' and (not key in vars(QueueMessage).items()):
if isinstance(default, datetime.date):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._dateformat)
if isinstance(default, datetime.datetime):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._datetimeformat)
else:
image[key] = getattr(self, key, default)
return str(image) | [
"def",
"getmessage",
"(",
"self",
")",
"->",
"str",
":",
"image",
"=",
"{",
"}",
"for",
"key",
",",
"default",
"in",
"vars",
"(",
"self",
".",
"__class__",
")",
".",
"items",
"(",
")",
":",
"if",
"not",
"key",
".",
"startswith",
"(",
"'_'",
")",
"and",
"key",
"!=",
"''",
"and",
"(",
"not",
"key",
"in",
"vars",
"(",
"QueueMessage",
")",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"default",
",",
"datetime",
".",
"date",
")",
":",
"image",
"[",
"key",
"]",
"=",
"safe_cast",
"(",
"getattr",
"(",
"self",
",",
"key",
",",
"default",
")",
",",
"str",
",",
"dformat",
"=",
"self",
".",
"_dateformat",
")",
"if",
"isinstance",
"(",
"default",
",",
"datetime",
".",
"datetime",
")",
":",
"image",
"[",
"key",
"]",
"=",
"safe_cast",
"(",
"getattr",
"(",
"self",
",",
"key",
",",
"default",
")",
",",
"str",
",",
"dformat",
"=",
"self",
".",
"_datetimeformat",
")",
"else",
":",
"image",
"[",
"key",
"]",
"=",
"getattr",
"(",
"self",
",",
"key",
",",
"default",
")",
"return",
"str",
"(",
"image",
")"
] | parse self into unicode string as message content | [
"parse",
"self",
"into",
"unicode",
"string",
"as",
"message",
"content"
] | python | train |
djgagne/hagelslag | hagelslag/processing/ObjectMatcher.py | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/ObjectMatcher.py#L334-L348 | def nonoverlap(item_a, time_a, item_b, time_b, max_value):
"""
Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value) | [
"def",
"nonoverlap",
"(",
"item_a",
",",
"time_a",
",",
"item_b",
",",
"time_b",
",",
"max_value",
")",
":",
"return",
"np",
".",
"minimum",
"(",
"1",
"-",
"item_a",
".",
"count_overlap",
"(",
"time_a",
",",
"item_b",
",",
"time_b",
")",
",",
"max_value",
")",
"/",
"float",
"(",
"max_value",
")"
] | Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. | [
"Percentage",
"of",
"pixels",
"in",
"each",
"object",
"that",
"do",
"not",
"overlap",
"with",
"the",
"other",
"object"
] | python | train |
allenai/allennlp | allennlp/semparse/domain_languages/nlvr_language.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L125-L199 | def get_agenda_for_sentence(self, sentence: str) -> List[str]:
"""
Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The
``agenda`` can be used while by a parser to guide the decoder. sequences as possible. This
is a simplistic mapping at this point, and can be expanded.
Parameters
----------
sentence : ``str``
The sentence for which an agenda will be produced.
"""
agenda = []
sentence = sentence.lower()
if sentence.startswith("there is a box") or sentence.startswith("there is a tower "):
agenda.append(self.terminal_productions["box_exists"])
elif sentence.startswith("there is a "):
agenda.append(self.terminal_productions["object_exists"])
if "<Set[Box]:bool> -> box_exists" not in agenda:
# These are object filters and do not apply if we have a box_exists at the top.
if "touch" in sentence:
if "top" in sentence:
agenda.append(self.terminal_productions["touch_top"])
elif "bottom" in sentence or "base" in sentence:
agenda.append(self.terminal_productions["touch_bottom"])
elif "corner" in sentence:
agenda.append(self.terminal_productions["touch_corner"])
elif "right" in sentence:
agenda.append(self.terminal_productions["touch_right"])
elif "left" in sentence:
agenda.append(self.terminal_productions["touch_left"])
elif "wall" in sentence or "edge" in sentence:
agenda.append(self.terminal_productions["touch_wall"])
else:
agenda.append(self.terminal_productions["touch_object"])
else:
# The words "top" and "bottom" may be referring to top and bottom blocks in a tower.
if "top" in sentence:
agenda.append(self.terminal_productions["top"])
elif "bottom" in sentence or "base" in sentence:
agenda.append(self.terminal_productions["bottom"])
if " not " in sentence:
agenda.append(self.terminal_productions["negate_filter"])
if " contains " in sentence or " has " in sentence:
agenda.append(self.terminal_productions["all_boxes"])
# This takes care of shapes, colors, top, bottom, big, small etc.
for constant, production in self.terminal_productions.items():
# TODO(pradeep): Deal with constant names with underscores.
if "top" in constant or "bottom" in constant:
# We already dealt with top, bottom, touch_top and touch_bottom above.
continue
if constant in sentence:
if "<Set[Object]:Set[Object]> ->" in production and "<Set[Box]:bool> -> box_exists" in agenda:
if constant in ["square", "circle", "triangle"]:
agenda.append(self.terminal_productions[f"shape_{constant}"])
elif constant in ["yellow", "blue", "black"]:
agenda.append(self.terminal_productions[f"color_{constant}"])
else:
continue
else:
agenda.append(production)
# TODO (pradeep): Rules for "member_*" productions ("tower" or "box" followed by a color,
# shape or number...)
number_productions = self._get_number_productions(sentence)
for production in number_productions:
agenda.append(production)
if not agenda:
# None of the rules above was triggered!
if "box" in sentence:
agenda.append(self.terminal_productions["all_boxes"])
else:
agenda.append(self.terminal_productions["all_objects"])
return agenda | [
"def",
"get_agenda_for_sentence",
"(",
"self",
",",
"sentence",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"agenda",
"=",
"[",
"]",
"sentence",
"=",
"sentence",
".",
"lower",
"(",
")",
"if",
"sentence",
".",
"startswith",
"(",
"\"there is a box\"",
")",
"or",
"sentence",
".",
"startswith",
"(",
"\"there is a tower \"",
")",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"box_exists\"",
"]",
")",
"elif",
"sentence",
".",
"startswith",
"(",
"\"there is a \"",
")",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"object_exists\"",
"]",
")",
"if",
"\"<Set[Box]:bool> -> box_exists\"",
"not",
"in",
"agenda",
":",
"# These are object filters and do not apply if we have a box_exists at the top.",
"if",
"\"touch\"",
"in",
"sentence",
":",
"if",
"\"top\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"touch_top\"",
"]",
")",
"elif",
"\"bottom\"",
"in",
"sentence",
"or",
"\"base\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"touch_bottom\"",
"]",
")",
"elif",
"\"corner\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"touch_corner\"",
"]",
")",
"elif",
"\"right\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"touch_right\"",
"]",
")",
"elif",
"\"left\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"touch_left\"",
"]",
")",
"elif",
"\"wall\"",
"in",
"sentence",
"or",
"\"edge\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"touch_wall\"",
"]",
")",
"else",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"touch_object\"",
"]",
")",
"else",
":",
"# The words \"top\" and \"bottom\" may be referring to top and bottom blocks in a tower.",
"if",
"\"top\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"top\"",
"]",
")",
"elif",
"\"bottom\"",
"in",
"sentence",
"or",
"\"base\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"bottom\"",
"]",
")",
"if",
"\" not \"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"negate_filter\"",
"]",
")",
"if",
"\" contains \"",
"in",
"sentence",
"or",
"\" has \"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"all_boxes\"",
"]",
")",
"# This takes care of shapes, colors, top, bottom, big, small etc.",
"for",
"constant",
",",
"production",
"in",
"self",
".",
"terminal_productions",
".",
"items",
"(",
")",
":",
"# TODO(pradeep): Deal with constant names with underscores.",
"if",
"\"top\"",
"in",
"constant",
"or",
"\"bottom\"",
"in",
"constant",
":",
"# We already dealt with top, bottom, touch_top and touch_bottom above.",
"continue",
"if",
"constant",
"in",
"sentence",
":",
"if",
"\"<Set[Object]:Set[Object]> ->\"",
"in",
"production",
"and",
"\"<Set[Box]:bool> -> box_exists\"",
"in",
"agenda",
":",
"if",
"constant",
"in",
"[",
"\"square\"",
",",
"\"circle\"",
",",
"\"triangle\"",
"]",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"f\"shape_{constant}\"",
"]",
")",
"elif",
"constant",
"in",
"[",
"\"yellow\"",
",",
"\"blue\"",
",",
"\"black\"",
"]",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"f\"color_{constant}\"",
"]",
")",
"else",
":",
"continue",
"else",
":",
"agenda",
".",
"append",
"(",
"production",
")",
"# TODO (pradeep): Rules for \"member_*\" productions (\"tower\" or \"box\" followed by a color,",
"# shape or number...)",
"number_productions",
"=",
"self",
".",
"_get_number_productions",
"(",
"sentence",
")",
"for",
"production",
"in",
"number_productions",
":",
"agenda",
".",
"append",
"(",
"production",
")",
"if",
"not",
"agenda",
":",
"# None of the rules above was triggered!",
"if",
"\"box\"",
"in",
"sentence",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"all_boxes\"",
"]",
")",
"else",
":",
"agenda",
".",
"append",
"(",
"self",
".",
"terminal_productions",
"[",
"\"all_objects\"",
"]",
")",
"return",
"agenda"
] | Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The
``agenda`` can be used while by a parser to guide the decoder. sequences as possible. This
is a simplistic mapping at this point, and can be expanded.
Parameters
----------
sentence : ``str``
The sentence for which an agenda will be produced. | [
"Given",
"a",
"sentence",
"returns",
"a",
"list",
"of",
"actions",
"the",
"sentence",
"triggers",
"as",
"an",
"agenda",
".",
"The",
"agenda",
"can",
"be",
"used",
"while",
"by",
"a",
"parser",
"to",
"guide",
"the",
"decoder",
".",
"sequences",
"as",
"possible",
".",
"This",
"is",
"a",
"simplistic",
"mapping",
"at",
"this",
"point",
"and",
"can",
"be",
"expanded",
"."
] | python | train |
Jaymon/prom | prom/interface/base.py | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/base.py#L233-L253 | def transaction(self, connection=None, **kwargs):
"""
a simple context manager useful for when you want to wrap a bunch of db calls in a transaction
http://docs.python.org/2/library/contextlib.html
http://docs.python.org/release/2.5/whatsnew/pep-343.html
example --
with self.transaction()
# do a bunch of calls
# those db calls will be committed by this line
"""
with self.connection(connection) as connection:
name = connection.transaction_name()
connection.transaction_start(name)
try:
yield connection
connection.transaction_stop()
except Exception as e:
connection.transaction_fail(name)
self.raise_error(e) | [
"def",
"transaction",
"(",
"self",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"connection",
"(",
"connection",
")",
"as",
"connection",
":",
"name",
"=",
"connection",
".",
"transaction_name",
"(",
")",
"connection",
".",
"transaction_start",
"(",
"name",
")",
"try",
":",
"yield",
"connection",
"connection",
".",
"transaction_stop",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"connection",
".",
"transaction_fail",
"(",
"name",
")",
"self",
".",
"raise_error",
"(",
"e",
")"
] | a simple context manager useful for when you want to wrap a bunch of db calls in a transaction
http://docs.python.org/2/library/contextlib.html
http://docs.python.org/release/2.5/whatsnew/pep-343.html
example --
with self.transaction()
# do a bunch of calls
# those db calls will be committed by this line | [
"a",
"simple",
"context",
"manager",
"useful",
"for",
"when",
"you",
"want",
"to",
"wrap",
"a",
"bunch",
"of",
"db",
"calls",
"in",
"a",
"transaction",
"http",
":",
"//",
"docs",
".",
"python",
".",
"org",
"/",
"2",
"/",
"library",
"/",
"contextlib",
".",
"html",
"http",
":",
"//",
"docs",
".",
"python",
".",
"org",
"/",
"release",
"/",
"2",
".",
"5",
"/",
"whatsnew",
"/",
"pep",
"-",
"343",
".",
"html"
] | python | train |
ethereum/pyethereum | ethereum/slogging.py | https://github.com/ethereum/pyethereum/blob/b704a5c6577863edc539a1ec3d2620a443b950fb/ethereum/slogging.py#L349-L356 | def DEBUG(msg, *args, **kwargs):
"""temporary logger during development that is always on"""
logger = getLogger("DEBUG")
if len(logger.handlers) == 0:
logger.addHandler(StreamHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
logger.DEV(msg, *args, **kwargs) | [
"def",
"DEBUG",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
"=",
"getLogger",
"(",
"\"DEBUG\"",
")",
"if",
"len",
"(",
"logger",
".",
"handlers",
")",
"==",
"0",
":",
"logger",
".",
"addHandler",
"(",
"StreamHandler",
"(",
")",
")",
"logger",
".",
"propagate",
"=",
"False",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"logger",
".",
"DEV",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | temporary logger during development that is always on | [
"temporary",
"logger",
"during",
"development",
"that",
"is",
"always",
"on"
] | python | train |
mabuchilab/QNET | src/qnet/__init__.py | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/__init__.py#L23-L52 | def _git_version():
"""If installed with 'pip installe -e .' from inside a git repo, the
current git revision as a string"""
import subprocess
import os
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
FNULL = open(os.devnull, 'w')
cwd = os.path.dirname(os.path.realpath(__file__))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=FNULL, env=env, cwd=cwd)
out = proc.communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
return out.strip().decode('ascii')
except OSError:
return "unknown" | [
"def",
"_git_version",
"(",
")",
":",
"import",
"subprocess",
"import",
"os",
"def",
"_minimal_ext_cmd",
"(",
"cmd",
")",
":",
"# construct minimal environment",
"env",
"=",
"{",
"}",
"for",
"k",
"in",
"[",
"'SYSTEMROOT'",
",",
"'PATH'",
"]",
":",
"v",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"k",
")",
"if",
"v",
"is",
"not",
"None",
":",
"env",
"[",
"k",
"]",
"=",
"v",
"# LANGUAGE is used on win32",
"env",
"[",
"'LANGUAGE'",
"]",
"=",
"'C'",
"env",
"[",
"'LANG'",
"]",
"=",
"'C'",
"env",
"[",
"'LC_ALL'",
"]",
"=",
"'C'",
"FNULL",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"cwd",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"FNULL",
",",
"env",
"=",
"env",
",",
"cwd",
"=",
"cwd",
")",
"out",
"=",
"proc",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"return",
"out",
"try",
":",
"out",
"=",
"_minimal_ext_cmd",
"(",
"[",
"'git'",
",",
"'rev-parse'",
",",
"'HEAD'",
"]",
")",
"return",
"out",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"'ascii'",
")",
"except",
"OSError",
":",
"return",
"\"unknown\""
] | If installed with 'pip installe -e .' from inside a git repo, the
current git revision as a string | [
"If",
"installed",
"with",
"pip",
"installe",
"-",
"e",
".",
"from",
"inside",
"a",
"git",
"repo",
"the",
"current",
"git",
"revision",
"as",
"a",
"string"
] | python | train |
tensorflow/probability | tensorflow_probability/python/distributions/onehot_categorical.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/onehot_categorical.py#L241-L258 | def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.
Args:
a: instance of a OneHotCategorical distribution object.
b: instance of a OneHotCategorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_categorical_categorical"):
# sum(p ln(p / q))
return tf.reduce_sum(
input_tensor=tf.nn.softmax(a.logits) *
(tf.nn.log_softmax(a.logits) - tf.nn.log_softmax(b.logits)),
axis=-1) | [
"def",
"_kl_categorical_categorical",
"(",
"a",
",",
"b",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"kl_categorical_categorical\"",
")",
":",
"# sum(p ln(p / q))",
"return",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"a",
".",
"logits",
")",
"*",
"(",
"tf",
".",
"nn",
".",
"log_softmax",
"(",
"a",
".",
"logits",
")",
"-",
"tf",
".",
"nn",
".",
"log_softmax",
"(",
"b",
".",
"logits",
")",
")",
",",
"axis",
"=",
"-",
"1",
")"
] | Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.
Args:
a: instance of a OneHotCategorical distribution object.
b: instance of a OneHotCategorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b) | [
"Calculate",
"the",
"batched",
"KL",
"divergence",
"KL",
"(",
"a",
"||",
"b",
")",
"with",
"a",
"b",
"OneHotCategorical",
"."
] | python | test |
awslabs/sockeye | sockeye/inference.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L2209-L2231 | def _get_best_word_indices_for_kth_hypotheses(ks: np.ndarray, all_hyp_indices: np.ndarray) -> np.ndarray:
"""
Traverses the matrix of best hypotheses indices collected during beam search in reversed order by
using the kth hypotheses index as a backpointer.
Returns an array containing the indices into the best_word_indices collected during beam search to extract
the kth hypotheses.
:param ks: The kth-best hypotheses to extract. Supports multiple for batch_size > 1. Shape: (batch,).
:param all_hyp_indices: All best hypotheses indices list collected in beam search. Shape: (batch * beam, steps).
:return: Array of indices into the best_word_indices collected in beam search
that extract the kth-best hypothesis. Shape: (batch,).
"""
batch_size = ks.shape[0]
num_steps = all_hyp_indices.shape[1]
result = np.zeros((batch_size, num_steps - 1), dtype=all_hyp_indices.dtype)
# first index into the history of the desired hypotheses.
pointer = all_hyp_indices[ks, -1]
# for each column/step follow the pointer, starting from the penultimate column/step
num_steps = all_hyp_indices.shape[1]
for step in range(num_steps - 2, -1, -1):
result[:, step] = pointer
pointer = all_hyp_indices[pointer, step]
return result | [
"def",
"_get_best_word_indices_for_kth_hypotheses",
"(",
"ks",
":",
"np",
".",
"ndarray",
",",
"all_hyp_indices",
":",
"np",
".",
"ndarray",
")",
"->",
"np",
".",
"ndarray",
":",
"batch_size",
"=",
"ks",
".",
"shape",
"[",
"0",
"]",
"num_steps",
"=",
"all_hyp_indices",
".",
"shape",
"[",
"1",
"]",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"batch_size",
",",
"num_steps",
"-",
"1",
")",
",",
"dtype",
"=",
"all_hyp_indices",
".",
"dtype",
")",
"# first index into the history of the desired hypotheses.",
"pointer",
"=",
"all_hyp_indices",
"[",
"ks",
",",
"-",
"1",
"]",
"# for each column/step follow the pointer, starting from the penultimate column/step",
"num_steps",
"=",
"all_hyp_indices",
".",
"shape",
"[",
"1",
"]",
"for",
"step",
"in",
"range",
"(",
"num_steps",
"-",
"2",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"result",
"[",
":",
",",
"step",
"]",
"=",
"pointer",
"pointer",
"=",
"all_hyp_indices",
"[",
"pointer",
",",
"step",
"]",
"return",
"result"
] | Traverses the matrix of best hypotheses indices collected during beam search in reversed order by
using the kth hypotheses index as a backpointer.
Returns an array containing the indices into the best_word_indices collected during beam search to extract
the kth hypotheses.
:param ks: The kth-best hypotheses to extract. Supports multiple for batch_size > 1. Shape: (batch,).
:param all_hyp_indices: All best hypotheses indices list collected in beam search. Shape: (batch * beam, steps).
:return: Array of indices into the best_word_indices collected in beam search
that extract the kth-best hypothesis. Shape: (batch,). | [
"Traverses",
"the",
"matrix",
"of",
"best",
"hypotheses",
"indices",
"collected",
"during",
"beam",
"search",
"in",
"reversed",
"order",
"by",
"using",
"the",
"kth",
"hypotheses",
"index",
"as",
"a",
"backpointer",
".",
"Returns",
"an",
"array",
"containing",
"the",
"indices",
"into",
"the",
"best_word_indices",
"collected",
"during",
"beam",
"search",
"to",
"extract",
"the",
"kth",
"hypotheses",
"."
] | python | train |
Miserlou/Zappa | zappa/core.py | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1985-L2014 | def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath='(none)' if base_path is None else base_path
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(
restApiId=api['id']
) | [
"def",
"undeploy_api_gateway",
"(",
"self",
",",
"lambda_name",
",",
"domain_name",
"=",
"None",
",",
"base_path",
"=",
"None",
")",
":",
"print",
"(",
"\"Deleting API Gateway..\"",
")",
"api_id",
"=",
"self",
".",
"get_api_id",
"(",
"lambda_name",
")",
"if",
"domain_name",
":",
"# XXX - Remove Route53 smartly here?",
"# XXX - This doesn't raise, but doesn't work either.",
"try",
":",
"self",
".",
"apigateway_client",
".",
"delete_base_path_mapping",
"(",
"domainName",
"=",
"domain_name",
",",
"basePath",
"=",
"'(none)'",
"if",
"base_path",
"is",
"None",
"else",
"base_path",
")",
"except",
"Exception",
"as",
"e",
":",
"# We may not have actually set up the domain.",
"pass",
"was_deleted",
"=",
"self",
".",
"delete_stack",
"(",
"lambda_name",
",",
"wait",
"=",
"True",
")",
"if",
"not",
"was_deleted",
":",
"# try erasing it with the older method",
"for",
"api",
"in",
"self",
".",
"get_rest_apis",
"(",
"lambda_name",
")",
":",
"self",
".",
"apigateway_client",
".",
"delete_rest_api",
"(",
"restApiId",
"=",
"api",
"[",
"'id'",
"]",
")"
] | Delete a deployed REST API Gateway. | [
"Delete",
"a",
"deployed",
"REST",
"API",
"Gateway",
"."
] | python | train |
cloud9ers/gurumate | environment/share/doc/ipython/examples/parallel/davinci/pwordfreq.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/davinci/pwordfreq.py#L20-L37 | def pwordfreq(view, fnames):
"""Parallel word frequency counter.
view - An IPython DirectView
fnames - The filenames containing the split data.
"""
assert len(fnames) == len(view.targets)
view.scatter('fname', fnames, flatten=True)
ar = view.apply(wordfreq, Reference('fname'))
freqs_list = ar.get()
word_set = set()
for f in freqs_list:
word_set.update(f.keys())
freqs = dict(zip(word_set, repeat(0)))
for f in freqs_list:
for word, count in f.iteritems():
freqs[word] += count
return freqs | [
"def",
"pwordfreq",
"(",
"view",
",",
"fnames",
")",
":",
"assert",
"len",
"(",
"fnames",
")",
"==",
"len",
"(",
"view",
".",
"targets",
")",
"view",
".",
"scatter",
"(",
"'fname'",
",",
"fnames",
",",
"flatten",
"=",
"True",
")",
"ar",
"=",
"view",
".",
"apply",
"(",
"wordfreq",
",",
"Reference",
"(",
"'fname'",
")",
")",
"freqs_list",
"=",
"ar",
".",
"get",
"(",
")",
"word_set",
"=",
"set",
"(",
")",
"for",
"f",
"in",
"freqs_list",
":",
"word_set",
".",
"update",
"(",
"f",
".",
"keys",
"(",
")",
")",
"freqs",
"=",
"dict",
"(",
"zip",
"(",
"word_set",
",",
"repeat",
"(",
"0",
")",
")",
")",
"for",
"f",
"in",
"freqs_list",
":",
"for",
"word",
",",
"count",
"in",
"f",
".",
"iteritems",
"(",
")",
":",
"freqs",
"[",
"word",
"]",
"+=",
"count",
"return",
"freqs"
] | Parallel word frequency counter.
view - An IPython DirectView
fnames - The filenames containing the split data. | [
"Parallel",
"word",
"frequency",
"counter",
".",
"view",
"-",
"An",
"IPython",
"DirectView",
"fnames",
"-",
"The",
"filenames",
"containing",
"the",
"split",
"data",
"."
] | python | test |
materialsproject/pymatgen | pymatgen/io/qchem/outputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/qchem/outputs.py#L586-L624 | def _read_pcm_information(self):
"""
Parses information from PCM solvent calculations.
"""
temp_dict = read_pattern(
self.text, {
"g_electrostatic": r"\s*G_electrostatic\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_cavitation": r"\s*G_cavitation\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_dispersion": r"\s*G_dispersion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_repulsion": r"\s*G_repulsion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"total_contribution_pcm": r"\s*Total\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
}
)
if temp_dict.get("g_electrostatic") is None:
self.data["g_electrostatic"] = None
else:
self.data["g_electrostatic"] = float(temp_dict.get("g_electrostatic")[0][0])
if temp_dict.get("g_cavitation") is None:
self.data["g_cavitation"] = None
else:
self.data["g_cavitation"] = float(temp_dict.get("g_cavitation")[0][0])
if temp_dict.get("g_dispersion") is None:
self.data["g_dispersion"] = None
else:
self.data["g_dispersion"] = float(temp_dict.get("g_dispersion")[0][0])
if temp_dict.get("g_repulsion") is None:
self.data["g_repulsion"] = None
else:
self.data["g_repulsion"] = float(temp_dict.get("g_repulsion")[0][0])
if temp_dict.get("total_contribution_pcm") is None:
self.data["total_contribution_pcm"] = []
else:
self.data["total_contribution_pcm"] = float(temp_dict.get("total_contribution_pcm")[0][0]) | [
"def",
"_read_pcm_information",
"(",
"self",
")",
":",
"temp_dict",
"=",
"read_pattern",
"(",
"self",
".",
"text",
",",
"{",
"\"g_electrostatic\"",
":",
"r\"\\s*G_electrostatic\\s+=\\s+([\\d\\-\\.]+)\\s+hartree\\s+=\\s+([\\d\\-\\.]+)\\s+kcal/mol\\s*\"",
",",
"\"g_cavitation\"",
":",
"r\"\\s*G_cavitation\\s+=\\s+([\\d\\-\\.]+)\\s+hartree\\s+=\\s+([\\d\\-\\.]+)\\s+kcal/mol\\s*\"",
",",
"\"g_dispersion\"",
":",
"r\"\\s*G_dispersion\\s+=\\s+([\\d\\-\\.]+)\\s+hartree\\s+=\\s+([\\d\\-\\.]+)\\s+kcal/mol\\s*\"",
",",
"\"g_repulsion\"",
":",
"r\"\\s*G_repulsion\\s+=\\s+([\\d\\-\\.]+)\\s+hartree\\s+=\\s+([\\d\\-\\.]+)\\s+kcal/mol\\s*\"",
",",
"\"total_contribution_pcm\"",
":",
"r\"\\s*Total\\s+=\\s+([\\d\\-\\.]+)\\s+hartree\\s+=\\s+([\\d\\-\\.]+)\\s+kcal/mol\\s*\"",
",",
"}",
")",
"if",
"temp_dict",
".",
"get",
"(",
"\"g_electrostatic\"",
")",
"is",
"None",
":",
"self",
".",
"data",
"[",
"\"g_electrostatic\"",
"]",
"=",
"None",
"else",
":",
"self",
".",
"data",
"[",
"\"g_electrostatic\"",
"]",
"=",
"float",
"(",
"temp_dict",
".",
"get",
"(",
"\"g_electrostatic\"",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"if",
"temp_dict",
".",
"get",
"(",
"\"g_cavitation\"",
")",
"is",
"None",
":",
"self",
".",
"data",
"[",
"\"g_cavitation\"",
"]",
"=",
"None",
"else",
":",
"self",
".",
"data",
"[",
"\"g_cavitation\"",
"]",
"=",
"float",
"(",
"temp_dict",
".",
"get",
"(",
"\"g_cavitation\"",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"if",
"temp_dict",
".",
"get",
"(",
"\"g_dispersion\"",
")",
"is",
"None",
":",
"self",
".",
"data",
"[",
"\"g_dispersion\"",
"]",
"=",
"None",
"else",
":",
"self",
".",
"data",
"[",
"\"g_dispersion\"",
"]",
"=",
"float",
"(",
"temp_dict",
".",
"get",
"(",
"\"g_dispersion\"",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"if",
"temp_dict",
".",
"get",
"(",
"\"g_repulsion\"",
")",
"is",
"None",
":",
"self",
".",
"data",
"[",
"\"g_repulsion\"",
"]",
"=",
"None",
"else",
":",
"self",
".",
"data",
"[",
"\"g_repulsion\"",
"]",
"=",
"float",
"(",
"temp_dict",
".",
"get",
"(",
"\"g_repulsion\"",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"if",
"temp_dict",
".",
"get",
"(",
"\"total_contribution_pcm\"",
")",
"is",
"None",
":",
"self",
".",
"data",
"[",
"\"total_contribution_pcm\"",
"]",
"=",
"[",
"]",
"else",
":",
"self",
".",
"data",
"[",
"\"total_contribution_pcm\"",
"]",
"=",
"float",
"(",
"temp_dict",
".",
"get",
"(",
"\"total_contribution_pcm\"",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")"
] | Parses information from PCM solvent calculations. | [
"Parses",
"information",
"from",
"PCM",
"solvent",
"calculations",
"."
] | python | train |
jupyter-widgets/ipywidgets | ipywidgets/widgets/widget.py | https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget.py#L266-L284 | def register(name=''):
"For backwards compatibility, we support @register(name) syntax."
def reg(widget):
"""A decorator registering a widget class in the widget registry."""
w = widget.class_traits()
Widget.widget_types.register(w['_model_module'].default_value,
w['_model_module_version'].default_value,
w['_model_name'].default_value,
w['_view_module'].default_value,
w['_view_module_version'].default_value,
w['_view_name'].default_value,
widget)
return widget
if isinstance(name, string_types):
import warnings
warnings.warn("Widget registration using a string name has been deprecated. Widget registration now uses a plain `@register` decorator.", DeprecationWarning)
return reg
else:
return reg(name) | [
"def",
"register",
"(",
"name",
"=",
"''",
")",
":",
"def",
"reg",
"(",
"widget",
")",
":",
"\"\"\"A decorator registering a widget class in the widget registry.\"\"\"",
"w",
"=",
"widget",
".",
"class_traits",
"(",
")",
"Widget",
".",
"widget_types",
".",
"register",
"(",
"w",
"[",
"'_model_module'",
"]",
".",
"default_value",
",",
"w",
"[",
"'_model_module_version'",
"]",
".",
"default_value",
",",
"w",
"[",
"'_model_name'",
"]",
".",
"default_value",
",",
"w",
"[",
"'_view_module'",
"]",
".",
"default_value",
",",
"w",
"[",
"'_view_module_version'",
"]",
".",
"default_value",
",",
"w",
"[",
"'_view_name'",
"]",
".",
"default_value",
",",
"widget",
")",
"return",
"widget",
"if",
"isinstance",
"(",
"name",
",",
"string_types",
")",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"\"Widget registration using a string name has been deprecated. Widget registration now uses a plain `@register` decorator.\"",
",",
"DeprecationWarning",
")",
"return",
"reg",
"else",
":",
"return",
"reg",
"(",
"name",
")"
] | For backwards compatibility, we support @register(name) syntax. | [
"For",
"backwards",
"compatibility",
"we",
"support"
] | python | train |
Karaage-Cluster/karaage | karaage/common/create_update.py | https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/common/create_update.py#L12-L21 | def apply_extra_context(extra_context, context):
"""
Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context.
"""
for key, value in six.iteritems(extra_context):
if callable(value):
context[key] = value()
else:
context[key] = value | [
"def",
"apply_extra_context",
"(",
"extra_context",
",",
"context",
")",
":",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"extra_context",
")",
":",
"if",
"callable",
"(",
"value",
")",
":",
"context",
"[",
"key",
"]",
"=",
"value",
"(",
")",
"else",
":",
"context",
"[",
"key",
"]",
"=",
"value"
] | Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context. | [
"Adds",
"items",
"from",
"extra_context",
"dict",
"to",
"context",
".",
"If",
"a",
"value",
"in",
"extra_context",
"is",
"callable",
"then",
"it",
"is",
"called",
"and",
"the",
"result",
"is",
"added",
"to",
"context",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py#L84-L96 | def get_port_channel_detail_output_lacp_aggregator_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
lacp = ET.SubElement(output, "lacp")
aggregator_mode = ET.SubElement(lacp, "aggregator-mode")
aggregator_mode.text = kwargs.pop('aggregator_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_port_channel_detail_output_lacp_aggregator_mode",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_port_channel_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_port_channel_detail\"",
")",
"config",
"=",
"get_port_channel_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_port_channel_detail",
",",
"\"output\"",
")",
"lacp",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"lacp\"",
")",
"aggregator_mode",
"=",
"ET",
".",
"SubElement",
"(",
"lacp",
",",
"\"aggregator-mode\"",
")",
"aggregator_mode",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'aggregator_mode'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
pytroll/satpy | satpy/readers/goes_imager_hrit.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/goes_imager_hrit.py#L430-L443 | def _calibrate(self, data):
"""Calibrate *data*."""
idx = self.mda['calibration_parameters']['indices']
val = self.mda['calibration_parameters']['values']
data.data = da.where(data.data == 0, np.nan, data.data)
ddata = data.data.map_blocks(np.interp, idx, val, dtype=val.dtype)
res = xr.DataArray(ddata,
dims=data.dims, attrs=data.attrs,
coords=data.coords)
res = res.clip(min=0)
units = {'percent': '%'}
unit = self.mda['calibration_parameters'][b'_UNIT']
res.attrs['units'] = units.get(unit, unit)
return res | [
"def",
"_calibrate",
"(",
"self",
",",
"data",
")",
":",
"idx",
"=",
"self",
".",
"mda",
"[",
"'calibration_parameters'",
"]",
"[",
"'indices'",
"]",
"val",
"=",
"self",
".",
"mda",
"[",
"'calibration_parameters'",
"]",
"[",
"'values'",
"]",
"data",
".",
"data",
"=",
"da",
".",
"where",
"(",
"data",
".",
"data",
"==",
"0",
",",
"np",
".",
"nan",
",",
"data",
".",
"data",
")",
"ddata",
"=",
"data",
".",
"data",
".",
"map_blocks",
"(",
"np",
".",
"interp",
",",
"idx",
",",
"val",
",",
"dtype",
"=",
"val",
".",
"dtype",
")",
"res",
"=",
"xr",
".",
"DataArray",
"(",
"ddata",
",",
"dims",
"=",
"data",
".",
"dims",
",",
"attrs",
"=",
"data",
".",
"attrs",
",",
"coords",
"=",
"data",
".",
"coords",
")",
"res",
"=",
"res",
".",
"clip",
"(",
"min",
"=",
"0",
")",
"units",
"=",
"{",
"'percent'",
":",
"'%'",
"}",
"unit",
"=",
"self",
".",
"mda",
"[",
"'calibration_parameters'",
"]",
"[",
"b'_UNIT'",
"]",
"res",
".",
"attrs",
"[",
"'units'",
"]",
"=",
"units",
".",
"get",
"(",
"unit",
",",
"unit",
")",
"return",
"res"
] | Calibrate *data*. | [
"Calibrate",
"*",
"data",
"*",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/objects/service.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/service.py#L1756-L1785 | def register_service_dependencies(service, servicedependencies):
"""
Registers a service dependencies.
:param service: The service to register
:type service:
:param servicedependencies: The servicedependencies container
:type servicedependencies:
:return: None
"""
# We explode service_dependencies into Servicedependency
# We just create serviceDep with goods values (as STRING!),
# the link pass will be done after
sdeps = [d.strip() for d in getattr(service, "service_dependencies", [])]
# %2=0 are for hosts, !=0 are for service_description
i = 0
hname = ''
for elt in sdeps:
if i % 2 == 0: # host
hname = elt
else: # description
desc = elt
# we can register it (service) (depend on) -> (hname, desc)
# If we do not have enough data for service, it'service no use
if hasattr(service, 'service_description') and hasattr(service, 'host_name'):
if hname == '':
hname = service.host_name
servicedependencies.add_service_dependency(
service.host_name, service.service_description, hname, desc)
i += 1 | [
"def",
"register_service_dependencies",
"(",
"service",
",",
"servicedependencies",
")",
":",
"# We explode service_dependencies into Servicedependency",
"# We just create serviceDep with goods values (as STRING!),",
"# the link pass will be done after",
"sdeps",
"=",
"[",
"d",
".",
"strip",
"(",
")",
"for",
"d",
"in",
"getattr",
"(",
"service",
",",
"\"service_dependencies\"",
",",
"[",
"]",
")",
"]",
"# %2=0 are for hosts, !=0 are for service_description",
"i",
"=",
"0",
"hname",
"=",
"''",
"for",
"elt",
"in",
"sdeps",
":",
"if",
"i",
"%",
"2",
"==",
"0",
":",
"# host",
"hname",
"=",
"elt",
"else",
":",
"# description",
"desc",
"=",
"elt",
"# we can register it (service) (depend on) -> (hname, desc)",
"# If we do not have enough data for service, it'service no use",
"if",
"hasattr",
"(",
"service",
",",
"'service_description'",
")",
"and",
"hasattr",
"(",
"service",
",",
"'host_name'",
")",
":",
"if",
"hname",
"==",
"''",
":",
"hname",
"=",
"service",
".",
"host_name",
"servicedependencies",
".",
"add_service_dependency",
"(",
"service",
".",
"host_name",
",",
"service",
".",
"service_description",
",",
"hname",
",",
"desc",
")",
"i",
"+=",
"1"
] | Registers a service dependencies.
:param service: The service to register
:type service:
:param servicedependencies: The servicedependencies container
:type servicedependencies:
:return: None | [
"Registers",
"a",
"service",
"dependencies",
"."
] | python | train |
rande/python-simple-ioc | ioc/locator.py | https://github.com/rande/python-simple-ioc/blob/36ddf667c1213a07a53cd4cdd708d02494e5190b/ioc/locator.py#L26-L38 | def split_resource_path(resource):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in resource.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise ResourceNotFound(resource)
elif piece and piece != '.':
pieces.append(piece)
return pieces | [
"def",
"split_resource_path",
"(",
"resource",
")",
":",
"pieces",
"=",
"[",
"]",
"for",
"piece",
"in",
"resource",
".",
"split",
"(",
"'/'",
")",
":",
"if",
"path",
".",
"sep",
"in",
"piece",
"or",
"(",
"path",
".",
"altsep",
"and",
"path",
".",
"altsep",
"in",
"piece",
")",
"or",
"piece",
"==",
"path",
".",
"pardir",
":",
"raise",
"ResourceNotFound",
"(",
"resource",
")",
"elif",
"piece",
"and",
"piece",
"!=",
"'.'",
":",
"pieces",
".",
"append",
"(",
"piece",
")",
"return",
"pieces"
] | Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error. | [
"Split",
"a",
"path",
"into",
"segments",
"and",
"perform",
"a",
"sanity",
"check",
".",
"If",
"it",
"detects",
"..",
"in",
"the",
"path",
"it",
"will",
"raise",
"a",
"TemplateNotFound",
"error",
"."
] | python | train |
explosion/spaCy | spacy/_ml.py | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/_ml.py#L693-L709 | def masked_language_model(vocab, model, mask_prob=0.15):
"""Convert a model into a BERT-style masked language model"""
random_words = _RandomWords(vocab)
def mlm_forward(docs, drop=0.0):
mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob)
mask = model.ops.asarray(mask).reshape((mask.shape[0], 1))
output, backprop = model.begin_update(docs, drop=drop)
def mlm_backward(d_output, sgd=None):
d_output *= 1 - mask
return backprop(d_output, sgd=sgd)
return output, mlm_backward
return wrap(mlm_forward, model) | [
"def",
"masked_language_model",
"(",
"vocab",
",",
"model",
",",
"mask_prob",
"=",
"0.15",
")",
":",
"random_words",
"=",
"_RandomWords",
"(",
"vocab",
")",
"def",
"mlm_forward",
"(",
"docs",
",",
"drop",
"=",
"0.0",
")",
":",
"mask",
",",
"docs",
"=",
"_apply_mask",
"(",
"docs",
",",
"random_words",
",",
"mask_prob",
"=",
"mask_prob",
")",
"mask",
"=",
"model",
".",
"ops",
".",
"asarray",
"(",
"mask",
")",
".",
"reshape",
"(",
"(",
"mask",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"output",
",",
"backprop",
"=",
"model",
".",
"begin_update",
"(",
"docs",
",",
"drop",
"=",
"drop",
")",
"def",
"mlm_backward",
"(",
"d_output",
",",
"sgd",
"=",
"None",
")",
":",
"d_output",
"*=",
"1",
"-",
"mask",
"return",
"backprop",
"(",
"d_output",
",",
"sgd",
"=",
"sgd",
")",
"return",
"output",
",",
"mlm_backward",
"return",
"wrap",
"(",
"mlm_forward",
",",
"model",
")"
] | Convert a model into a BERT-style masked language model | [
"Convert",
"a",
"model",
"into",
"a",
"BERT",
"-",
"style",
"masked",
"language",
"model"
] | python | train |
reorx/torext | torext/sql.py | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/sql.py#L378-L382 | def coerce(cls, key, value):
"""Convert plain list to MutationList"""
self = MutationList((MutationObj.coerce(key, v) for v in value))
self._key = key
return self | [
"def",
"coerce",
"(",
"cls",
",",
"key",
",",
"value",
")",
":",
"self",
"=",
"MutationList",
"(",
"(",
"MutationObj",
".",
"coerce",
"(",
"key",
",",
"v",
")",
"for",
"v",
"in",
"value",
")",
")",
"self",
".",
"_key",
"=",
"key",
"return",
"self"
] | Convert plain list to MutationList | [
"Convert",
"plain",
"list",
"to",
"MutationList"
] | python | train |
Pytwitcher/pytwitcherapi | src/pytwitcherapi/session.py | https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L543-L556 | def get_channel_access_token(self, channel):
"""Return the token and sig for the given channel
:param channel: the channel or channel name to get the access token for
:type channel: :class:`channel` | :class:`str`
:returns: The token and sig for the given channel
:rtype: (:class:`unicode`, :class:`unicode`)
:raises: None
"""
if isinstance(channel, models.Channel):
channel = channel.name
r = self.oldapi_request(
'GET', 'channels/%s/access_token' % channel).json()
return r['token'], r['sig'] | [
"def",
"get_channel_access_token",
"(",
"self",
",",
"channel",
")",
":",
"if",
"isinstance",
"(",
"channel",
",",
"models",
".",
"Channel",
")",
":",
"channel",
"=",
"channel",
".",
"name",
"r",
"=",
"self",
".",
"oldapi_request",
"(",
"'GET'",
",",
"'channels/%s/access_token'",
"%",
"channel",
")",
".",
"json",
"(",
")",
"return",
"r",
"[",
"'token'",
"]",
",",
"r",
"[",
"'sig'",
"]"
] | Return the token and sig for the given channel
:param channel: the channel or channel name to get the access token for
:type channel: :class:`channel` | :class:`str`
:returns: The token and sig for the given channel
:rtype: (:class:`unicode`, :class:`unicode`)
:raises: None | [
"Return",
"the",
"token",
"and",
"sig",
"for",
"the",
"given",
"channel"
] | python | train |
kibitzr/kibitzr | kibitzr/storage.py | https://github.com/kibitzr/kibitzr/blob/749da312488f1dda1ed1093cf4c95aaac0a604f7/kibitzr/storage.py#L62-L67 | def write(self, content):
"""Save content on disk"""
with io.open(self.target, 'w', encoding='utf-8') as fp:
fp.write(content)
if not content.endswith(u'\n'):
fp.write(u'\n') | [
"def",
"write",
"(",
"self",
",",
"content",
")",
":",
"with",
"io",
".",
"open",
"(",
"self",
".",
"target",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"content",
")",
"if",
"not",
"content",
".",
"endswith",
"(",
"u'\\n'",
")",
":",
"fp",
".",
"write",
"(",
"u'\\n'",
")"
] | Save content on disk | [
"Save",
"content",
"on",
"disk"
] | python | train |
envi-idl/envipyarclib | envipyarclib/gptool/parameter/builder.py | https://github.com/envi-idl/envipyarclib/blob/90135652510c3d53c5f51177252c1fea2639bf22/envipyarclib/gptool/parameter/builder.py#L100-L163 | def create_param_info(task_params, parameter_map):
"""
Builds the code block for the GPTool GetParameterInfo method based on the input task_params.
:param task_params: A list of task parameters to map to GPTool parameters.
:return: A string representing the code block to the GPTool GetParameterInfo method.
"""
gp_params = []
gp_param_list = []
gp_param_idx_list = []
gp_param_idx = 0
for task_param in task_params:
# Setup to gp_param dictionary used to substitute against the parameter info template.
gp_param = {}
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
if len(task_param['dimensions'].split(',')) > 1:
raise UnknownDataTypeError('Only one-dimensional arrays are supported.')
data_type += 'ARRAY'
if data_type in parameter_map:
gp_param['dataType'] = parameter_map[data_type].data_type
else:
# No Mapping exists for this data type!
raise UnknownDataTypeError('Unable to map task datatype: ' +
data_type +
'. A template must be created.')
gp_param['name'] = task_param['name']
gp_param['displayName'] = task_param['display_name']
gp_param['direction'] = _DIRECTION_MAP[task_param['direction']]
gp_param['paramType'] = 'Required' if task_param['required'] else 'Optional'
# ENVI/IDL output type translates to a derived output type in Arc
if gp_param['direction'] is 'Output':
gp_param['paramType'] = 'Derived'
gp_param['multiValue'] = True if 'dimensions' in task_param else False
# Substitute values into the template
gp_params.append(parameter_map[data_type].get_parameter(task_param).substitute(gp_param))
# Convert the default value
if 'default_value' in task_param:
gp_param['defaultValue'] = task_param['default_value']
gp_params.append(parameter_map[data_type].default_value().substitute(gp_param))
# Convert any choicelist
if 'choice_list' in task_param:
gp_param['choiceList'] = task_param['choice_list']
gp_params.append(_CHOICELIST_TEMPLATE.substitute(gp_param))
# Construct the parameter list and indicies for future reference
for param_name in parameter_map[data_type].parameter_names(task_param):
gp_param_list.append(param_name.substitute(gp_param))
gp_param_idx_list.append(_PARAM_INDEX_TEMPLATE.substitute(
{'name': param_name.substitute(gp_param),
'idx': gp_param_idx}))
gp_param_idx += 1
# Construct the final parameter string
gp_params.append(_PARAM_RETURN_TEMPLATE.substitute({'paramList': convert_list(gp_param_list)}))
return ''.join((''.join(gp_params), ''.join(gp_param_idx_list))) | [
"def",
"create_param_info",
"(",
"task_params",
",",
"parameter_map",
")",
":",
"gp_params",
"=",
"[",
"]",
"gp_param_list",
"=",
"[",
"]",
"gp_param_idx_list",
"=",
"[",
"]",
"gp_param_idx",
"=",
"0",
"for",
"task_param",
"in",
"task_params",
":",
"# Setup to gp_param dictionary used to substitute against the parameter info template.",
"gp_param",
"=",
"{",
"}",
"# Convert DataType",
"data_type",
"=",
"task_param",
"[",
"'type'",
"]",
".",
"upper",
"(",
")",
"if",
"'dimensions'",
"in",
"task_param",
":",
"if",
"len",
"(",
"task_param",
"[",
"'dimensions'",
"]",
".",
"split",
"(",
"','",
")",
")",
">",
"1",
":",
"raise",
"UnknownDataTypeError",
"(",
"'Only one-dimensional arrays are supported.'",
")",
"data_type",
"+=",
"'ARRAY'",
"if",
"data_type",
"in",
"parameter_map",
":",
"gp_param",
"[",
"'dataType'",
"]",
"=",
"parameter_map",
"[",
"data_type",
"]",
".",
"data_type",
"else",
":",
"# No Mapping exists for this data type!",
"raise",
"UnknownDataTypeError",
"(",
"'Unable to map task datatype: '",
"+",
"data_type",
"+",
"'. A template must be created.'",
")",
"gp_param",
"[",
"'name'",
"]",
"=",
"task_param",
"[",
"'name'",
"]",
"gp_param",
"[",
"'displayName'",
"]",
"=",
"task_param",
"[",
"'display_name'",
"]",
"gp_param",
"[",
"'direction'",
"]",
"=",
"_DIRECTION_MAP",
"[",
"task_param",
"[",
"'direction'",
"]",
"]",
"gp_param",
"[",
"'paramType'",
"]",
"=",
"'Required'",
"if",
"task_param",
"[",
"'required'",
"]",
"else",
"'Optional'",
"# ENVI/IDL output type translates to a derived output type in Arc",
"if",
"gp_param",
"[",
"'direction'",
"]",
"is",
"'Output'",
":",
"gp_param",
"[",
"'paramType'",
"]",
"=",
"'Derived'",
"gp_param",
"[",
"'multiValue'",
"]",
"=",
"True",
"if",
"'dimensions'",
"in",
"task_param",
"else",
"False",
"# Substitute values into the template",
"gp_params",
".",
"append",
"(",
"parameter_map",
"[",
"data_type",
"]",
".",
"get_parameter",
"(",
"task_param",
")",
".",
"substitute",
"(",
"gp_param",
")",
")",
"# Convert the default value",
"if",
"'default_value'",
"in",
"task_param",
":",
"gp_param",
"[",
"'defaultValue'",
"]",
"=",
"task_param",
"[",
"'default_value'",
"]",
"gp_params",
".",
"append",
"(",
"parameter_map",
"[",
"data_type",
"]",
".",
"default_value",
"(",
")",
".",
"substitute",
"(",
"gp_param",
")",
")",
"# Convert any choicelist",
"if",
"'choice_list'",
"in",
"task_param",
":",
"gp_param",
"[",
"'choiceList'",
"]",
"=",
"task_param",
"[",
"'choice_list'",
"]",
"gp_params",
".",
"append",
"(",
"_CHOICELIST_TEMPLATE",
".",
"substitute",
"(",
"gp_param",
")",
")",
"# Construct the parameter list and indicies for future reference",
"for",
"param_name",
"in",
"parameter_map",
"[",
"data_type",
"]",
".",
"parameter_names",
"(",
"task_param",
")",
":",
"gp_param_list",
".",
"append",
"(",
"param_name",
".",
"substitute",
"(",
"gp_param",
")",
")",
"gp_param_idx_list",
".",
"append",
"(",
"_PARAM_INDEX_TEMPLATE",
".",
"substitute",
"(",
"{",
"'name'",
":",
"param_name",
".",
"substitute",
"(",
"gp_param",
")",
",",
"'idx'",
":",
"gp_param_idx",
"}",
")",
")",
"gp_param_idx",
"+=",
"1",
"# Construct the final parameter string",
"gp_params",
".",
"append",
"(",
"_PARAM_RETURN_TEMPLATE",
".",
"substitute",
"(",
"{",
"'paramList'",
":",
"convert_list",
"(",
"gp_param_list",
")",
"}",
")",
")",
"return",
"''",
".",
"join",
"(",
"(",
"''",
".",
"join",
"(",
"gp_params",
")",
",",
"''",
".",
"join",
"(",
"gp_param_idx_list",
")",
")",
")"
] | Builds the code block for the GPTool GetParameterInfo method based on the input task_params.
:param task_params: A list of task parameters to map to GPTool parameters.
:return: A string representing the code block to the GPTool GetParameterInfo method. | [
"Builds",
"the",
"code",
"block",
"for",
"the",
"GPTool",
"GetParameterInfo",
"method",
"based",
"on",
"the",
"input",
"task_params",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.