repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/qt/console/history_console_widget.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/history_console_widget.py#L135-L164 | def history_previous(self, substring='', as_prefix=True):
""" If possible, set the input buffer to a previous history item.
Parameters:
-----------
substring : str, optional
If specified, search for an item with this substring.
as_prefix : bool, optional
If True, the substring must match at the beginning (default).
Returns:
--------
Whether the input buffer was changed.
"""
index = self._history_index
replace = False
while index > 0:
index -= 1
history = self._get_edited_history(index)
if (as_prefix and history.startswith(substring)) \
or (not as_prefix and substring in history):
replace = True
break
if replace:
self._store_edits()
self._history_index = index
self.input_buffer = history
return replace | [
"def",
"history_previous",
"(",
"self",
",",
"substring",
"=",
"''",
",",
"as_prefix",
"=",
"True",
")",
":",
"index",
"=",
"self",
".",
"_history_index",
"replace",
"=",
"False",
"while",
"index",
">",
"0",
":",
"index",
"-=",
"1",
"history",
"=",
"self",
".",
"_get_edited_history",
"(",
"index",
")",
"if",
"(",
"as_prefix",
"and",
"history",
".",
"startswith",
"(",
"substring",
")",
")",
"or",
"(",
"not",
"as_prefix",
"and",
"substring",
"in",
"history",
")",
":",
"replace",
"=",
"True",
"break",
"if",
"replace",
":",
"self",
".",
"_store_edits",
"(",
")",
"self",
".",
"_history_index",
"=",
"index",
"self",
".",
"input_buffer",
"=",
"history",
"return",
"replace"
] | If possible, set the input buffer to a previous history item.
Parameters:
-----------
substring : str, optional
If specified, search for an item with this substring.
as_prefix : bool, optional
If True, the substring must match at the beginning (default).
Returns:
--------
Whether the input buffer was changed. | [
"If",
"possible",
"set",
"the",
"input",
"buffer",
"to",
"a",
"previous",
"history",
"item",
"."
] | python | test |
zeaphoo/reston | reston/core/apk.py | https://github.com/zeaphoo/reston/blob/96502487b2259572df55237c9526f92627465088/reston/core/apk.py#L536-L569 | def get_main_activity(self):
"""
Return the name of the main activity
:rtype: string
"""
x = set()
y = set()
for i in self.xml:
activities_and_aliases = self.xml[i].getElementsByTagName("activity") + \
self.xml[i].getElementsByTagName("activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.getAttributeNS(NS_ANDROID_URI, "enabled")
if activityEnabled is not None and activityEnabled != "" and activityEnabled == "false":
continue
for sitem in item.getElementsByTagName("action"):
val = sitem.getAttributeNS(NS_ANDROID_URI, "name")
if val == "android.intent.action.MAIN":
x.add(item.getAttributeNS(NS_ANDROID_URI, "name"))
for sitem in item.getElementsByTagName("category"):
val = sitem.getAttributeNS(NS_ANDROID_URI, "name")
if val == "android.intent.category.LAUNCHER":
y.add(item.getAttributeNS(NS_ANDROID_URI, "name"))
z = x.intersection(y)
if len(z) > 0:
return self.format_value(z.pop())
return None | [
"def",
"get_main_activity",
"(",
"self",
")",
":",
"x",
"=",
"set",
"(",
")",
"y",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"self",
".",
"xml",
":",
"activities_and_aliases",
"=",
"self",
".",
"xml",
"[",
"i",
"]",
".",
"getElementsByTagName",
"(",
"\"activity\"",
")",
"+",
"self",
".",
"xml",
"[",
"i",
"]",
".",
"getElementsByTagName",
"(",
"\"activity-alias\"",
")",
"for",
"item",
"in",
"activities_and_aliases",
":",
"# Some applications have more than one MAIN activity.",
"# For example: paid and free content",
"activityEnabled",
"=",
"item",
".",
"getAttributeNS",
"(",
"NS_ANDROID_URI",
",",
"\"enabled\"",
")",
"if",
"activityEnabled",
"is",
"not",
"None",
"and",
"activityEnabled",
"!=",
"\"\"",
"and",
"activityEnabled",
"==",
"\"false\"",
":",
"continue",
"for",
"sitem",
"in",
"item",
".",
"getElementsByTagName",
"(",
"\"action\"",
")",
":",
"val",
"=",
"sitem",
".",
"getAttributeNS",
"(",
"NS_ANDROID_URI",
",",
"\"name\"",
")",
"if",
"val",
"==",
"\"android.intent.action.MAIN\"",
":",
"x",
".",
"add",
"(",
"item",
".",
"getAttributeNS",
"(",
"NS_ANDROID_URI",
",",
"\"name\"",
")",
")",
"for",
"sitem",
"in",
"item",
".",
"getElementsByTagName",
"(",
"\"category\"",
")",
":",
"val",
"=",
"sitem",
".",
"getAttributeNS",
"(",
"NS_ANDROID_URI",
",",
"\"name\"",
")",
"if",
"val",
"==",
"\"android.intent.category.LAUNCHER\"",
":",
"y",
".",
"add",
"(",
"item",
".",
"getAttributeNS",
"(",
"NS_ANDROID_URI",
",",
"\"name\"",
")",
")",
"z",
"=",
"x",
".",
"intersection",
"(",
"y",
")",
"if",
"len",
"(",
"z",
")",
">",
"0",
":",
"return",
"self",
".",
"format_value",
"(",
"z",
".",
"pop",
"(",
")",
")",
"return",
"None"
] | Return the name of the main activity
:rtype: string | [
"Return",
"the",
"name",
"of",
"the",
"main",
"activity"
] | python | train |
lemieuxl/pyGenClean | pyGenClean/run_data_clean_up.py | https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/run_data_clean_up.py#L3280-L3293 | def all_files_exist(file_list):
"""Check if all files exist.
:param file_list: the names of files to check.
:type file_list: list
:returns: ``True`` if all files exist, ``False`` otherwise.
"""
all_exist = True
for filename in file_list:
all_exist = all_exist and os.path.isfile(filename)
return all_exist | [
"def",
"all_files_exist",
"(",
"file_list",
")",
":",
"all_exist",
"=",
"True",
"for",
"filename",
"in",
"file_list",
":",
"all_exist",
"=",
"all_exist",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
"return",
"all_exist"
] | Check if all files exist.
:param file_list: the names of files to check.
:type file_list: list
:returns: ``True`` if all files exist, ``False`` otherwise. | [
"Check",
"if",
"all",
"files",
"exist",
"."
] | python | train |
intake/intake | intake/gui/source/gui.py | https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/gui/source/gui.py#L112-L121 | def callback(self, sources):
"""When a source is selected, enable widgets that depend on that condition
and do done_callback"""
enable = bool(sources)
if not enable:
self.plot_widget.value = False
enable_widget(self.plot_widget, enable)
if self.done_callback:
self.done_callback(sources) | [
"def",
"callback",
"(",
"self",
",",
"sources",
")",
":",
"enable",
"=",
"bool",
"(",
"sources",
")",
"if",
"not",
"enable",
":",
"self",
".",
"plot_widget",
".",
"value",
"=",
"False",
"enable_widget",
"(",
"self",
".",
"plot_widget",
",",
"enable",
")",
"if",
"self",
".",
"done_callback",
":",
"self",
".",
"done_callback",
"(",
"sources",
")"
] | When a source is selected, enable widgets that depend on that condition
and do done_callback | [
"When",
"a",
"source",
"is",
"selected",
"enable",
"widgets",
"that",
"depend",
"on",
"that",
"condition",
"and",
"do",
"done_callback"
] | python | train |
LionelR/pyair | pyair/xair.py | https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L73-L90 | def _format(noms):
"""
Formate une donnée d'entrée pour être exploitable dans les fonctions liste_*
et get_*.
Paramètres:
noms: chaîne de caractère, liste ou tuples de chaînes de caractères ou
pandas.Series de chaînes de caractères.
Retourne:
Une chaînes de caractères dont chaque élément est séparé du suivant par les
caractères ',' (simples quotes comprises)
"""
if isinstance(noms, (list, tuple, pd.Series)):
noms = ','.join(noms)
noms = noms.replace(",", "','")
return noms | [
"def",
"_format",
"(",
"noms",
")",
":",
"if",
"isinstance",
"(",
"noms",
",",
"(",
"list",
",",
"tuple",
",",
"pd",
".",
"Series",
")",
")",
":",
"noms",
"=",
"','",
".",
"join",
"(",
"noms",
")",
"noms",
"=",
"noms",
".",
"replace",
"(",
"\",\"",
",",
"\"','\"",
")",
"return",
"noms"
] | Formate une donnée d'entrée pour être exploitable dans les fonctions liste_*
et get_*.
Paramètres:
noms: chaîne de caractère, liste ou tuples de chaînes de caractères ou
pandas.Series de chaînes de caractères.
Retourne:
Une chaînes de caractères dont chaque élément est séparé du suivant par les
caractères ',' (simples quotes comprises) | [
"Formate",
"une",
"donnée",
"d",
"entrée",
"pour",
"être",
"exploitable",
"dans",
"les",
"fonctions",
"liste_",
"*",
"et",
"get_",
"*",
"."
] | python | valid |
Capitains/flask-capitains-nemo | flask_nemo/query/interface.py | https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/query/interface.py#L42-L56 | def process(self, nemo):
""" Register nemo and parses annotations
.. note:: Process parses the annotation and extends informations about the target URNs by retrieving resource in range
:param nemo: Nemo
"""
self.__nemo__ = nemo
for annotation in self.__annotations__:
annotation.target.expanded = frozenset(
self.__getinnerreffs__(
objectId=annotation.target.objectId,
subreference=annotation.target.subreference
)
) | [
"def",
"process",
"(",
"self",
",",
"nemo",
")",
":",
"self",
".",
"__nemo__",
"=",
"nemo",
"for",
"annotation",
"in",
"self",
".",
"__annotations__",
":",
"annotation",
".",
"target",
".",
"expanded",
"=",
"frozenset",
"(",
"self",
".",
"__getinnerreffs__",
"(",
"objectId",
"=",
"annotation",
".",
"target",
".",
"objectId",
",",
"subreference",
"=",
"annotation",
".",
"target",
".",
"subreference",
")",
")"
] | Register nemo and parses annotations
.. note:: Process parses the annotation and extends informations about the target URNs by retrieving resource in range
:param nemo: Nemo | [
"Register",
"nemo",
"and",
"parses",
"annotations"
] | python | valid |
mar10/wsgidav | wsgidav/dav_provider.py | https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/dav_provider.py#L432-L479 | def get_descendants(
self,
collections=True,
resources=True,
depth_first=False,
depth="infinity",
add_self=False,
):
"""Return a list _DAVResource objects of a collection (children,
grand-children, ...).
This default implementation calls self.get_member_list() recursively.
This function may also be called for non-collections (with add_self=True).
:Parameters:
depth_first : bool
use <False>, to list containers before content.
(e.g. when moving / copying branches.)
Use <True>, to list content before containers.
(e.g. when deleting branches.)
depth : string
'0' | '1' | 'infinity'
"""
assert depth in ("0", "1", "infinity")
res = []
if add_self and not depth_first:
res.append(self)
if depth != "0" and self.is_collection:
for child in self.get_member_list():
if not child:
self.get_member_list()
want = (collections and child.is_collection) or (
resources and not child.is_collection
)
if want and not depth_first:
res.append(child)
if child.is_collection and depth == "infinity":
res.extend(
child.get_descendants(
collections, resources, depth_first, depth, add_self=False
)
)
if want and depth_first:
res.append(child)
if add_self and depth_first:
res.append(self)
return res | [
"def",
"get_descendants",
"(",
"self",
",",
"collections",
"=",
"True",
",",
"resources",
"=",
"True",
",",
"depth_first",
"=",
"False",
",",
"depth",
"=",
"\"infinity\"",
",",
"add_self",
"=",
"False",
",",
")",
":",
"assert",
"depth",
"in",
"(",
"\"0\"",
",",
"\"1\"",
",",
"\"infinity\"",
")",
"res",
"=",
"[",
"]",
"if",
"add_self",
"and",
"not",
"depth_first",
":",
"res",
".",
"append",
"(",
"self",
")",
"if",
"depth",
"!=",
"\"0\"",
"and",
"self",
".",
"is_collection",
":",
"for",
"child",
"in",
"self",
".",
"get_member_list",
"(",
")",
":",
"if",
"not",
"child",
":",
"self",
".",
"get_member_list",
"(",
")",
"want",
"=",
"(",
"collections",
"and",
"child",
".",
"is_collection",
")",
"or",
"(",
"resources",
"and",
"not",
"child",
".",
"is_collection",
")",
"if",
"want",
"and",
"not",
"depth_first",
":",
"res",
".",
"append",
"(",
"child",
")",
"if",
"child",
".",
"is_collection",
"and",
"depth",
"==",
"\"infinity\"",
":",
"res",
".",
"extend",
"(",
"child",
".",
"get_descendants",
"(",
"collections",
",",
"resources",
",",
"depth_first",
",",
"depth",
",",
"add_self",
"=",
"False",
")",
")",
"if",
"want",
"and",
"depth_first",
":",
"res",
".",
"append",
"(",
"child",
")",
"if",
"add_self",
"and",
"depth_first",
":",
"res",
".",
"append",
"(",
"self",
")",
"return",
"res"
] | Return a list _DAVResource objects of a collection (children,
grand-children, ...).
This default implementation calls self.get_member_list() recursively.
This function may also be called for non-collections (with add_self=True).
:Parameters:
depth_first : bool
use <False>, to list containers before content.
(e.g. when moving / copying branches.)
Use <True>, to list content before containers.
(e.g. when deleting branches.)
depth : string
'0' | '1' | 'infinity' | [
"Return",
"a",
"list",
"_DAVResource",
"objects",
"of",
"a",
"collection",
"(",
"children",
"grand",
"-",
"children",
"...",
")",
"."
] | python | valid |
avihad/twistes | twistes/client.py | https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/client.py#L380-L405 | def create(self, index, doc_type, body, id=None, **query_params):
"""
Adds a typed JSON document in a specific index, making it searchable.
Behind the scenes this method calls index(..., op_type='create')
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
:param index: The name of the index
:param doc_type: The type of the document
:param body: The document
:param id: Document ID
:arg consistency: Explicit write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg parent: ID of the parent document
:arg refresh: Refresh the index after performing the operation
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
query_params['op_type'] = 'create'
result = yield self.index(index, doc_type, body, id=id, params=query_params)
returnValue(result) | [
"def",
"create",
"(",
"self",
",",
"index",
",",
"doc_type",
",",
"body",
",",
"id",
"=",
"None",
",",
"*",
"*",
"query_params",
")",
":",
"query_params",
"[",
"'op_type'",
"]",
"=",
"'create'",
"result",
"=",
"yield",
"self",
".",
"index",
"(",
"index",
",",
"doc_type",
",",
"body",
",",
"id",
"=",
"id",
",",
"params",
"=",
"query_params",
")",
"returnValue",
"(",
"result",
")"
] | Adds a typed JSON document in a specific index, making it searchable.
Behind the scenes this method calls index(..., op_type='create')
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
:param index: The name of the index
:param doc_type: The type of the document
:param body: The document
:param id: Document ID
:arg consistency: Explicit write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg parent: ID of the parent document
:arg refresh: Refresh the index after performing the operation
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force' | [
"Adds",
"a",
"typed",
"JSON",
"document",
"in",
"a",
"specific",
"index",
"making",
"it",
"searchable",
".",
"Behind",
"the",
"scenes",
"this",
"method",
"calls",
"index",
"(",
"...",
"op_type",
"=",
"create",
")",
"<http",
":",
"//",
"www",
".",
"elastic",
".",
"co",
"/",
"guide",
"/",
"en",
"/",
"elasticsearch",
"/",
"reference",
"/",
"current",
"/",
"docs",
"-",
"index_",
".",
"html",
">",
"_",
":",
"param",
"index",
":",
"The",
"name",
"of",
"the",
"index",
":",
"param",
"doc_type",
":",
"The",
"type",
"of",
"the",
"document",
":",
"param",
"body",
":",
"The",
"document",
":",
"param",
"id",
":",
"Document",
"ID",
":",
"arg",
"consistency",
":",
"Explicit",
"write",
"consistency",
"setting",
"for",
"the",
"operation",
"valid",
"choices",
"are",
":",
"one",
"quorum",
"all",
":",
"arg",
"op_type",
":",
"Explicit",
"operation",
"type",
"default",
"index",
"valid",
"choices",
"are",
":",
"index",
"create",
":",
"arg",
"parent",
":",
"ID",
"of",
"the",
"parent",
"document",
":",
"arg",
"refresh",
":",
"Refresh",
"the",
"index",
"after",
"performing",
"the",
"operation",
":",
"arg",
"routing",
":",
"Specific",
"routing",
"value",
":",
"arg",
"timeout",
":",
"Explicit",
"operation",
"timeout",
":",
"arg",
"timestamp",
":",
"Explicit",
"timestamp",
"for",
"the",
"document",
":",
"arg",
"ttl",
":",
"Expiration",
"time",
"for",
"the",
"document",
":",
"arg",
"version",
":",
"Explicit",
"version",
"number",
"for",
"concurrency",
"control",
":",
"arg",
"version_type",
":",
"Specific",
"version",
"type",
"valid",
"choices",
"are",
":",
"internal",
"external",
"external_gte",
"force"
] | python | train |
raiden-network/raiden | raiden/storage/migrations/v20_to_v21.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/storage/migrations/v20_to_v21.py#L52-L100 | def _update_statechanges(storage: SQLiteStorage):
"""
Update each ContractReceiveChannelNew's channel_state member
by setting the `mediation_fee` that was added to the NettingChannelState
"""
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceiveChannelNew'),
],
)
for state_changes_batch in batch_query:
updated_state_changes = list()
for state_change in state_changes_batch:
data = json.loads(state_change.data)
msg = 'v20 ContractReceiveChannelNew channel state should not contain medation_fee'
assert 'mediation_fee' not in data['channel_state'], msg
data['channel_state']['mediation_fee'] = '0'
updated_state_changes.append((
json.dumps(data),
state_change.state_change_identifier,
))
storage.update_state_changes(updated_state_changes)
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator'),
],
)
for state_changes_batch in batch_query:
updated_state_changes = list()
for state_change in state_changes_batch:
data = json.loads(state_change.data)
msg = 'v20 ActionInitInitiator transfer should not contain allocated_fee'
assert 'allocated_fee' not in data['transfer'], msg
data['transfer']['allocated_fee'] = '0'
updated_state_changes.append((
json.dumps(data),
state_change.state_change_identifier,
))
storage.update_state_changes(updated_state_changes) | [
"def",
"_update_statechanges",
"(",
"storage",
":",
"SQLiteStorage",
")",
":",
"batch_size",
"=",
"50",
"batch_query",
"=",
"storage",
".",
"batch_query_state_changes",
"(",
"batch_size",
"=",
"batch_size",
",",
"filters",
"=",
"[",
"(",
"'_type'",
",",
"'raiden.transfer.state_change.ContractReceiveChannelNew'",
")",
",",
"]",
",",
")",
"for",
"state_changes_batch",
"in",
"batch_query",
":",
"updated_state_changes",
"=",
"list",
"(",
")",
"for",
"state_change",
"in",
"state_changes_batch",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"state_change",
".",
"data",
")",
"msg",
"=",
"'v20 ContractReceiveChannelNew channel state should not contain medation_fee'",
"assert",
"'mediation_fee'",
"not",
"in",
"data",
"[",
"'channel_state'",
"]",
",",
"msg",
"data",
"[",
"'channel_state'",
"]",
"[",
"'mediation_fee'",
"]",
"=",
"'0'",
"updated_state_changes",
".",
"append",
"(",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"state_change",
".",
"state_change_identifier",
",",
")",
")",
"storage",
".",
"update_state_changes",
"(",
"updated_state_changes",
")",
"batch_query",
"=",
"storage",
".",
"batch_query_state_changes",
"(",
"batch_size",
"=",
"batch_size",
",",
"filters",
"=",
"[",
"(",
"'_type'",
",",
"'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator'",
")",
",",
"]",
",",
")",
"for",
"state_changes_batch",
"in",
"batch_query",
":",
"updated_state_changes",
"=",
"list",
"(",
")",
"for",
"state_change",
"in",
"state_changes_batch",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"state_change",
".",
"data",
")",
"msg",
"=",
"'v20 ActionInitInitiator transfer should not contain allocated_fee'",
"assert",
"'allocated_fee'",
"not",
"in",
"data",
"[",
"'transfer'",
"]",
",",
"msg",
"data",
"[",
"'transfer'",
"]",
"[",
"'allocated_fee'",
"]",
"=",
"'0'",
"updated_state_changes",
".",
"append",
"(",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"state_change",
".",
"state_change_identifier",
",",
")",
")",
"storage",
".",
"update_state_changes",
"(",
"updated_state_changes",
")"
] | Update each ContractReceiveChannelNew's channel_state member
by setting the `mediation_fee` that was added to the NettingChannelState | [
"Update",
"each",
"ContractReceiveChannelNew",
"s",
"channel_state",
"member",
"by",
"setting",
"the",
"mediation_fee",
"that",
"was",
"added",
"to",
"the",
"NettingChannelState"
] | python | train |
buriburisuri/sugartensor | sugartensor/sg_activation.py | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_activation.py#L29-L40 | def sg_leaky_relu(x, opt):
r""""See [Xu, et al. 2015](https://arxiv.org/pdf/1505.00853v2.pdf)
Args:
x: A tensor
opt:
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type and shape as `x`.
"""
return tf.where(tf.greater(x, 0), x, 0.01 * x, name=opt.name) | [
"def",
"sg_leaky_relu",
"(",
"x",
",",
"opt",
")",
":",
"return",
"tf",
".",
"where",
"(",
"tf",
".",
"greater",
"(",
"x",
",",
"0",
")",
",",
"x",
",",
"0.01",
"*",
"x",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r""""See [Xu, et al. 2015](https://arxiv.org/pdf/1505.00853v2.pdf)
Args:
x: A tensor
opt:
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type and shape as `x`. | [
"r",
"See",
"[",
"Xu",
"et",
"al",
".",
"2015",
"]",
"(",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1505",
".",
"00853v2",
".",
"pdf",
")"
] | python | train |
Parsely/birding | src/birding/follow.py | https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/follow.py#L58-L68 | def follow_fd(fd):
"""Dump each line of input to stdio."""
dump = Dump()
for line in fd:
if not line.strip():
continue
with flushing(sys.stdout, sys.stderr):
status = load(line)
if status:
dump(status) | [
"def",
"follow_fd",
"(",
"fd",
")",
":",
"dump",
"=",
"Dump",
"(",
")",
"for",
"line",
"in",
"fd",
":",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"with",
"flushing",
"(",
"sys",
".",
"stdout",
",",
"sys",
".",
"stderr",
")",
":",
"status",
"=",
"load",
"(",
"line",
")",
"if",
"status",
":",
"dump",
"(",
"status",
")"
] | Dump each line of input to stdio. | [
"Dump",
"each",
"line",
"of",
"input",
"to",
"stdio",
"."
] | python | train |
boriel/zxbasic | asmparse.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L1394-L1397 | def p_preprocessor_line_line(p):
""" preproc_line : _LINE INTEGER
"""
p.lexer.lineno = int(p[2]) + p.lexer.lineno - p.lineno(2) | [
"def",
"p_preprocessor_line_line",
"(",
"p",
")",
":",
"p",
".",
"lexer",
".",
"lineno",
"=",
"int",
"(",
"p",
"[",
"2",
"]",
")",
"+",
"p",
".",
"lexer",
".",
"lineno",
"-",
"p",
".",
"lineno",
"(",
"2",
")"
] | preproc_line : _LINE INTEGER | [
"preproc_line",
":",
"_LINE",
"INTEGER"
] | python | train |
diging/tethne | tethne/readers/wos.py | https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L242-L253 | def postprocess_subject(self, entry):
"""
Parse subject keywords.
Subject keywords are usually semicolon-delimited.
"""
if type(entry.subject) not in [str, unicode]:
subject = u' '.join([unicode(k) for k in entry.subject])
else:
subject = entry.subject
entry.subject = [k.strip().upper() for k in subject.split(';')] | [
"def",
"postprocess_subject",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"subject",
")",
"not",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"subject",
"=",
"u' '",
".",
"join",
"(",
"[",
"unicode",
"(",
"k",
")",
"for",
"k",
"in",
"entry",
".",
"subject",
"]",
")",
"else",
":",
"subject",
"=",
"entry",
".",
"subject",
"entry",
".",
"subject",
"=",
"[",
"k",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"for",
"k",
"in",
"subject",
".",
"split",
"(",
"';'",
")",
"]"
] | Parse subject keywords.
Subject keywords are usually semicolon-delimited. | [
"Parse",
"subject",
"keywords",
"."
] | python | train |
payplug/payplug-python | payplug/notifications.py | https://github.com/payplug/payplug-python/blob/42dec9d6bff420dd0c26e51a84dd000adff04331/payplug/notifications.py#L8-L34 | def treat(request_body):
"""
Treat a notification and guarantee its authenticity.
:param request_body: The request body in plain text.
:type request_body: string
:return: A safe APIResource
:rtype: APIResource
"""
# Python 3+ support
if isinstance(request_body, six.binary_type):
request_body = request_body.decode('utf-8')
try:
data = json.loads(request_body)
except ValueError:
raise exceptions.UnknownAPIResource('Request body is malformed JSON.')
unsafe_api_resource = APIResource.factory(data)
try:
consistent_api_resource = unsafe_api_resource.get_consistent_resource()
except AttributeError:
raise exceptions.UnknownAPIResource('The API resource provided is invalid.')
return consistent_api_resource | [
"def",
"treat",
"(",
"request_body",
")",
":",
"# Python 3+ support",
"if",
"isinstance",
"(",
"request_body",
",",
"six",
".",
"binary_type",
")",
":",
"request_body",
"=",
"request_body",
".",
"decode",
"(",
"'utf-8'",
")",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"request_body",
")",
"except",
"ValueError",
":",
"raise",
"exceptions",
".",
"UnknownAPIResource",
"(",
"'Request body is malformed JSON.'",
")",
"unsafe_api_resource",
"=",
"APIResource",
".",
"factory",
"(",
"data",
")",
"try",
":",
"consistent_api_resource",
"=",
"unsafe_api_resource",
".",
"get_consistent_resource",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"exceptions",
".",
"UnknownAPIResource",
"(",
"'The API resource provided is invalid.'",
")",
"return",
"consistent_api_resource"
] | Treat a notification and guarantee its authenticity.
:param request_body: The request body in plain text.
:type request_body: string
:return: A safe APIResource
:rtype: APIResource | [
"Treat",
"a",
"notification",
"and",
"guarantee",
"its",
"authenticity",
"."
] | python | train |
bastibe/PySoundCard | pysoundcard.py | https://github.com/bastibe/PySoundCard/blob/fb16460b75a1bb416089ebecdf700fa954faa5b7/pysoundcard.py#L427-L438 | def stop(self):
"""Terminate audio processing.
This waits until all pending audio buffers have been played
before it returns. If successful, the stream is considered
inactive.
"""
err = _pa.Pa_StopStream(self._stream)
if err == _pa.paStreamIsStopped:
return
self._handle_error(err) | [
"def",
"stop",
"(",
"self",
")",
":",
"err",
"=",
"_pa",
".",
"Pa_StopStream",
"(",
"self",
".",
"_stream",
")",
"if",
"err",
"==",
"_pa",
".",
"paStreamIsStopped",
":",
"return",
"self",
".",
"_handle_error",
"(",
"err",
")"
] | Terminate audio processing.
This waits until all pending audio buffers have been played
before it returns. If successful, the stream is considered
inactive. | [
"Terminate",
"audio",
"processing",
"."
] | python | train |
OnroerendErfgoed/skosprovider_getty | skosprovider_getty/providers.py | https://github.com/OnroerendErfgoed/skosprovider_getty/blob/5aa0b5a8525d607e07b631499ff31bac7a0348b7/skosprovider_getty/providers.py#L227-L275 | def _get_answer(self, query, **kwargs):
# send request to getty
""" Returns the results of the Sparql query to a :class:`lst` of concepts and collections.
The return :class:`lst` can be empty.
:param query (str): Sparql query
:returns: A :class:`lst` of concepts and collections. Each of these
is a dict with the following keys:
* id: id within the conceptscheme
* uri: :term:`uri` of the concept or collection
* type: concept or collection
* label: A label to represent the concept or collection.
"""
request = self.base_url + "sparql.json"
try:
res = self.session.get(request, params={"query": query})
except ConnectionError as e:
raise ProviderUnavailableException("Request could not be executed - Request: %s - Params: %s" % (request, query))
if res.status_code == 404:
raise ProviderUnavailableException("Service not found (status_code 404) - Request: %s - Params: %s" % (request, query))
if not res.encoding:
res.encoding = 'utf-8'
r = res.json()
d = {}
for result in r["results"]["bindings"]:
uri = result["Subject"]["value"]
if "Term" in result:
label = result["Term"]["value"]
else:
label = "<not available>"
item = {
'id': result["Id"]["value"],
'uri': uri,
'type': result["Type"]["value"].rsplit('#', 1)[1],
'label': label,
'lang': result["Lang"]["value"]
}
if uri not in d:
d[uri] = item
if tags.tag(d[uri]['lang']).format == tags.tag(self._get_language(**kwargs)).format:
pass
elif tags.tag(item['lang']).format == tags.tag(self._get_language(**kwargs)).format:
d[uri] = item
elif tags.tag(item['lang']).language and (tags.tag(item['lang']).language.format == tags.tag(self._get_language()).language.format):
d[uri] = item
elif tags.tag(item['lang']).format == tags.tag('en').format:
d[uri] = item
return list(d.values()) | [
"def",
"_get_answer",
"(",
"self",
",",
"query",
",",
"*",
"*",
"kwargs",
")",
":",
"# send request to getty",
"request",
"=",
"self",
".",
"base_url",
"+",
"\"sparql.json\"",
"try",
":",
"res",
"=",
"self",
".",
"session",
".",
"get",
"(",
"request",
",",
"params",
"=",
"{",
"\"query\"",
":",
"query",
"}",
")",
"except",
"ConnectionError",
"as",
"e",
":",
"raise",
"ProviderUnavailableException",
"(",
"\"Request could not be executed - Request: %s - Params: %s\"",
"%",
"(",
"request",
",",
"query",
")",
")",
"if",
"res",
".",
"status_code",
"==",
"404",
":",
"raise",
"ProviderUnavailableException",
"(",
"\"Service not found (status_code 404) - Request: %s - Params: %s\"",
"%",
"(",
"request",
",",
"query",
")",
")",
"if",
"not",
"res",
".",
"encoding",
":",
"res",
".",
"encoding",
"=",
"'utf-8'",
"r",
"=",
"res",
".",
"json",
"(",
")",
"d",
"=",
"{",
"}",
"for",
"result",
"in",
"r",
"[",
"\"results\"",
"]",
"[",
"\"bindings\"",
"]",
":",
"uri",
"=",
"result",
"[",
"\"Subject\"",
"]",
"[",
"\"value\"",
"]",
"if",
"\"Term\"",
"in",
"result",
":",
"label",
"=",
"result",
"[",
"\"Term\"",
"]",
"[",
"\"value\"",
"]",
"else",
":",
"label",
"=",
"\"<not available>\"",
"item",
"=",
"{",
"'id'",
":",
"result",
"[",
"\"Id\"",
"]",
"[",
"\"value\"",
"]",
",",
"'uri'",
":",
"uri",
",",
"'type'",
":",
"result",
"[",
"\"Type\"",
"]",
"[",
"\"value\"",
"]",
".",
"rsplit",
"(",
"'#'",
",",
"1",
")",
"[",
"1",
"]",
",",
"'label'",
":",
"label",
",",
"'lang'",
":",
"result",
"[",
"\"Lang\"",
"]",
"[",
"\"value\"",
"]",
"}",
"if",
"uri",
"not",
"in",
"d",
":",
"d",
"[",
"uri",
"]",
"=",
"item",
"if",
"tags",
".",
"tag",
"(",
"d",
"[",
"uri",
"]",
"[",
"'lang'",
"]",
")",
".",
"format",
"==",
"tags",
".",
"tag",
"(",
"self",
".",
"_get_language",
"(",
"*",
"*",
"kwargs",
")",
")",
".",
"format",
":",
"pass",
"elif",
"tags",
".",
"tag",
"(",
"item",
"[",
"'lang'",
"]",
")",
".",
"format",
"==",
"tags",
".",
"tag",
"(",
"self",
".",
"_get_language",
"(",
"*",
"*",
"kwargs",
")",
")",
".",
"format",
":",
"d",
"[",
"uri",
"]",
"=",
"item",
"elif",
"tags",
".",
"tag",
"(",
"item",
"[",
"'lang'",
"]",
")",
".",
"language",
"and",
"(",
"tags",
".",
"tag",
"(",
"item",
"[",
"'lang'",
"]",
")",
".",
"language",
".",
"format",
"==",
"tags",
".",
"tag",
"(",
"self",
".",
"_get_language",
"(",
")",
")",
".",
"language",
".",
"format",
")",
":",
"d",
"[",
"uri",
"]",
"=",
"item",
"elif",
"tags",
".",
"tag",
"(",
"item",
"[",
"'lang'",
"]",
")",
".",
"format",
"==",
"tags",
".",
"tag",
"(",
"'en'",
")",
".",
"format",
":",
"d",
"[",
"uri",
"]",
"=",
"item",
"return",
"list",
"(",
"d",
".",
"values",
"(",
")",
")"
] | Returns the results of the Sparql query to a :class:`lst` of concepts and collections.
The return :class:`lst` can be empty.
:param query (str): Sparql query
:returns: A :class:`lst` of concepts and collections. Each of these
is a dict with the following keys:
* id: id within the conceptscheme
* uri: :term:`uri` of the concept or collection
* type: concept or collection
* label: A label to represent the concept or collection. | [
"Returns",
"the",
"results",
"of",
"the",
"Sparql",
"query",
"to",
"a",
":",
"class",
":",
"lst",
"of",
"concepts",
"and",
"collections",
".",
"The",
"return",
":",
"class",
":",
"lst",
"can",
"be",
"empty",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/transforms/chain.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/transforms/chain.py#L251-L295 | def source_changed(self, event):
"""Generate a simplified chain by joining adjacent transforms.
"""
# bail out early if the chain is empty
transforms = self._chain.transforms[:]
if len(transforms) == 0:
self.transforms = []
return
# If the change signal comes from a transform that already appears in
# our simplified transform list, then there is no need to re-simplify.
if event is not None:
for source in event.sources[::-1]:
if source in self.transforms:
self.update(event)
return
# First flatten the chain by expanding all nested chains
new_chain = []
while len(transforms) > 0:
tr = transforms.pop(0)
if isinstance(tr, ChainTransform) and not tr.dynamic:
transforms = tr.transforms[:] + transforms
else:
new_chain.append(tr)
# Now combine together all compatible adjacent transforms
cont = True
tr = new_chain
while cont:
new_tr = [tr[0]]
cont = False
for t2 in tr[1:]:
t1 = new_tr[-1]
pr = t1 * t2
if (not t1.dynamic and not t2.dynamic and not
isinstance(pr, ChainTransform)):
cont = True
new_tr.pop()
new_tr.append(pr)
else:
new_tr.append(t2)
tr = new_tr
self.transforms = tr | [
"def",
"source_changed",
"(",
"self",
",",
"event",
")",
":",
"# bail out early if the chain is empty",
"transforms",
"=",
"self",
".",
"_chain",
".",
"transforms",
"[",
":",
"]",
"if",
"len",
"(",
"transforms",
")",
"==",
"0",
":",
"self",
".",
"transforms",
"=",
"[",
"]",
"return",
"# If the change signal comes from a transform that already appears in",
"# our simplified transform list, then there is no need to re-simplify.",
"if",
"event",
"is",
"not",
"None",
":",
"for",
"source",
"in",
"event",
".",
"sources",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"source",
"in",
"self",
".",
"transforms",
":",
"self",
".",
"update",
"(",
"event",
")",
"return",
"# First flatten the chain by expanding all nested chains",
"new_chain",
"=",
"[",
"]",
"while",
"len",
"(",
"transforms",
")",
">",
"0",
":",
"tr",
"=",
"transforms",
".",
"pop",
"(",
"0",
")",
"if",
"isinstance",
"(",
"tr",
",",
"ChainTransform",
")",
"and",
"not",
"tr",
".",
"dynamic",
":",
"transforms",
"=",
"tr",
".",
"transforms",
"[",
":",
"]",
"+",
"transforms",
"else",
":",
"new_chain",
".",
"append",
"(",
"tr",
")",
"# Now combine together all compatible adjacent transforms",
"cont",
"=",
"True",
"tr",
"=",
"new_chain",
"while",
"cont",
":",
"new_tr",
"=",
"[",
"tr",
"[",
"0",
"]",
"]",
"cont",
"=",
"False",
"for",
"t2",
"in",
"tr",
"[",
"1",
":",
"]",
":",
"t1",
"=",
"new_tr",
"[",
"-",
"1",
"]",
"pr",
"=",
"t1",
"*",
"t2",
"if",
"(",
"not",
"t1",
".",
"dynamic",
"and",
"not",
"t2",
".",
"dynamic",
"and",
"not",
"isinstance",
"(",
"pr",
",",
"ChainTransform",
")",
")",
":",
"cont",
"=",
"True",
"new_tr",
".",
"pop",
"(",
")",
"new_tr",
".",
"append",
"(",
"pr",
")",
"else",
":",
"new_tr",
".",
"append",
"(",
"t2",
")",
"tr",
"=",
"new_tr",
"self",
".",
"transforms",
"=",
"tr"
] | Generate a simplified chain by joining adjacent transforms. | [
"Generate",
"a",
"simplified",
"chain",
"by",
"joining",
"adjacent",
"transforms",
"."
] | python | train |
TissueMAPS/TmDeploy | elasticluster/elasticluster/cluster.py | https://github.com/TissueMAPS/TmDeploy/blob/f891b4ffb21431988bc4a063ae871da3bf284a45/elasticluster/elasticluster/cluster.py#L477-L499 | def _start_node(node):
"""
Start the given node VM.
:return: bool -- True on success, False otherwise
"""
log.debug("_start_node: working on node `%s`", node.name)
# FIXME: the following check is not optimal yet. When a node is still
# in a starting state, it will start another node here, since the
# `is_alive` method will only check for running nodes (see issue #13)
if node.is_alive():
log.info("Not starting node `%s` which is "
"already up&running.", node.name)
return True
else:
try:
node.start()
log.info("Node `%s` has been started.", node.name)
return True
except Exception as err:
log.exception("Could not start node `%s`: %s -- %s",
node.name, err, err.__class__)
return False | [
"def",
"_start_node",
"(",
"node",
")",
":",
"log",
".",
"debug",
"(",
"\"_start_node: working on node `%s`\"",
",",
"node",
".",
"name",
")",
"# FIXME: the following check is not optimal yet. When a node is still",
"# in a starting state, it will start another node here, since the",
"# `is_alive` method will only check for running nodes (see issue #13)",
"if",
"node",
".",
"is_alive",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"Not starting node `%s` which is \"",
"\"already up&running.\"",
",",
"node",
".",
"name",
")",
"return",
"True",
"else",
":",
"try",
":",
"node",
".",
"start",
"(",
")",
"log",
".",
"info",
"(",
"\"Node `%s` has been started.\"",
",",
"node",
".",
"name",
")",
"return",
"True",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"exception",
"(",
"\"Could not start node `%s`: %s -- %s\"",
",",
"node",
".",
"name",
",",
"err",
",",
"err",
".",
"__class__",
")",
"return",
"False"
] | Start the given node VM.
:return: bool -- True on success, False otherwise | [
"Start",
"the",
"given",
"node",
"VM",
"."
] | python | train |
allenai/allennlp | allennlp/semparse/worlds/wikitables_world.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/wikitables_world.py#L124-L134 | def _remove_action_from_type(valid_actions: Dict[str, List[str]],
type_: str,
filter_function: Callable[[str], bool]) -> None:
"""
Finds the production rule matching the filter function in the given type's valid action
list, and removes it. If there is more than one matching function, we crash.
"""
action_list = valid_actions[type_]
matching_action_index = [i for i, action in enumerate(action_list) if filter_function(action)]
assert len(matching_action_index) == 1, "Filter function didn't find one action"
action_list.pop(matching_action_index[0]) | [
"def",
"_remove_action_from_type",
"(",
"valid_actions",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
",",
"type_",
":",
"str",
",",
"filter_function",
":",
"Callable",
"[",
"[",
"str",
"]",
",",
"bool",
"]",
")",
"->",
"None",
":",
"action_list",
"=",
"valid_actions",
"[",
"type_",
"]",
"matching_action_index",
"=",
"[",
"i",
"for",
"i",
",",
"action",
"in",
"enumerate",
"(",
"action_list",
")",
"if",
"filter_function",
"(",
"action",
")",
"]",
"assert",
"len",
"(",
"matching_action_index",
")",
"==",
"1",
",",
"\"Filter function didn't find one action\"",
"action_list",
".",
"pop",
"(",
"matching_action_index",
"[",
"0",
"]",
")"
] | Finds the production rule matching the filter function in the given type's valid action
list, and removes it. If there is more than one matching function, we crash. | [
"Finds",
"the",
"production",
"rule",
"matching",
"the",
"filter",
"function",
"in",
"the",
"given",
"type",
"s",
"valid",
"action",
"list",
"and",
"removes",
"it",
".",
"If",
"there",
"is",
"more",
"than",
"one",
"matching",
"function",
"we",
"crash",
"."
] | python | train |
hughsie/python-appstream | appstream/component.py | https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L78-L108 | def _parse_tree(self, node):
""" Parse a <review> object """
if 'date' in node.attrib:
dt = dateutil.parser.parse(node.attrib['date'])
self.date = int(dt.strftime("%s"))
if 'id' in node.attrib:
self.id = node.attrib['id']
if 'karma' in node.attrib:
self.karma = int(node.attrib['karma'])
if 'score' in node.attrib:
self.score = int(node.attrib['score'])
if 'rating' in node.attrib:
self.rating = int(node.attrib['rating'])
for c3 in node:
if c3.tag == 'lang':
self.locale = c3.text
if c3.tag == 'version':
self.version = c3.text
if c3.tag == 'reviewer_id':
self.reviewer_id = c3.text
if c3.tag == 'reviewer_name':
self.reviewer_name = c3.text
if c3.tag == 'summary':
self.summary = c3.text
if c3.tag == 'description':
self.description = _parse_desc(c3)
if c3.tag == 'metadata':
for c4 in c3:
if c4.tag == 'value':
if 'key' in c4.attrib:
self.metadata[c4.attrib['key']] = c4.text | [
"def",
"_parse_tree",
"(",
"self",
",",
"node",
")",
":",
"if",
"'date'",
"in",
"node",
".",
"attrib",
":",
"dt",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"node",
".",
"attrib",
"[",
"'date'",
"]",
")",
"self",
".",
"date",
"=",
"int",
"(",
"dt",
".",
"strftime",
"(",
"\"%s\"",
")",
")",
"if",
"'id'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"id",
"=",
"node",
".",
"attrib",
"[",
"'id'",
"]",
"if",
"'karma'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"karma",
"=",
"int",
"(",
"node",
".",
"attrib",
"[",
"'karma'",
"]",
")",
"if",
"'score'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"score",
"=",
"int",
"(",
"node",
".",
"attrib",
"[",
"'score'",
"]",
")",
"if",
"'rating'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"rating",
"=",
"int",
"(",
"node",
".",
"attrib",
"[",
"'rating'",
"]",
")",
"for",
"c3",
"in",
"node",
":",
"if",
"c3",
".",
"tag",
"==",
"'lang'",
":",
"self",
".",
"locale",
"=",
"c3",
".",
"text",
"if",
"c3",
".",
"tag",
"==",
"'version'",
":",
"self",
".",
"version",
"=",
"c3",
".",
"text",
"if",
"c3",
".",
"tag",
"==",
"'reviewer_id'",
":",
"self",
".",
"reviewer_id",
"=",
"c3",
".",
"text",
"if",
"c3",
".",
"tag",
"==",
"'reviewer_name'",
":",
"self",
".",
"reviewer_name",
"=",
"c3",
".",
"text",
"if",
"c3",
".",
"tag",
"==",
"'summary'",
":",
"self",
".",
"summary",
"=",
"c3",
".",
"text",
"if",
"c3",
".",
"tag",
"==",
"'description'",
":",
"self",
".",
"description",
"=",
"_parse_desc",
"(",
"c3",
")",
"if",
"c3",
".",
"tag",
"==",
"'metadata'",
":",
"for",
"c4",
"in",
"c3",
":",
"if",
"c4",
".",
"tag",
"==",
"'value'",
":",
"if",
"'key'",
"in",
"c4",
".",
"attrib",
":",
"self",
".",
"metadata",
"[",
"c4",
".",
"attrib",
"[",
"'key'",
"]",
"]",
"=",
"c4",
".",
"text"
] | Parse a <review> object | [
"Parse",
"a",
"<review",
">",
"object"
] | python | train |
atztogo/phonopy | phonopy/api_phonopy.py | https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/api_phonopy.py#L1695-L1717 | def get_partial_DOS(self):
"""Return frequency points and partial DOS as a tuple.
Projection is done to atoms and may be also done along directions
depending on the parameters at run_partial_dos.
Returns
-------
A tuple with (frequency_points, partial_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
partial_dos:
shape=(frequency_sampling_points, projections), dtype='double'
"""
warnings.warn("Phonopy.get_partial_DOS is deprecated. "
"Use Phonopy.get_projected_dos_dict.",
DeprecationWarning)
pdos = self.get_projected_dos_dict()
return pdos['frequency_points'], pdos['projected_dos'] | [
"def",
"get_partial_DOS",
"(",
"self",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Phonopy.get_partial_DOS is deprecated. \"",
"\"Use Phonopy.get_projected_dos_dict.\"",
",",
"DeprecationWarning",
")",
"pdos",
"=",
"self",
".",
"get_projected_dos_dict",
"(",
")",
"return",
"pdos",
"[",
"'frequency_points'",
"]",
",",
"pdos",
"[",
"'projected_dos'",
"]"
] | Return frequency points and partial DOS as a tuple.
Projection is done to atoms and may be also done along directions
depending on the parameters at run_partial_dos.
Returns
-------
A tuple with (frequency_points, partial_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
partial_dos:
shape=(frequency_sampling_points, projections), dtype='double' | [
"Return",
"frequency",
"points",
"and",
"partial",
"DOS",
"as",
"a",
"tuple",
"."
] | python | train |
adamheins/r12 | r12/shell.py | https://github.com/adamheins/r12/blob/ff78178332140930bf46a94a0b15ee082bb92491/r12/shell.py#L180-L188 | def do_status(self, arg):
''' Print information about the arm. '''
info = self.arm.get_info()
max_len = len(max(info.keys(), key=len))
print(self.style.theme('\nArm Status'))
for key, value in info.items():
print(self.style.help(key.ljust(max_len + 2), str(value)))
print() | [
"def",
"do_status",
"(",
"self",
",",
"arg",
")",
":",
"info",
"=",
"self",
".",
"arm",
".",
"get_info",
"(",
")",
"max_len",
"=",
"len",
"(",
"max",
"(",
"info",
".",
"keys",
"(",
")",
",",
"key",
"=",
"len",
")",
")",
"print",
"(",
"self",
".",
"style",
".",
"theme",
"(",
"'\\nArm Status'",
")",
")",
"for",
"key",
",",
"value",
"in",
"info",
".",
"items",
"(",
")",
":",
"print",
"(",
"self",
".",
"style",
".",
"help",
"(",
"key",
".",
"ljust",
"(",
"max_len",
"+",
"2",
")",
",",
"str",
"(",
"value",
")",
")",
")",
"print",
"(",
")"
] | Print information about the arm. | [
"Print",
"information",
"about",
"the",
"arm",
"."
] | python | train |
freelancer/freelancer-sdk-python | freelancersdk/resources/users/users.py | https://github.com/freelancer/freelancer-sdk-python/blob/e09034936d6f13b3909a9464ee329c81c1834941/freelancersdk/resources/users/users.py#L62-L77 | def add_user_jobs(session, job_ids):
"""
Add a list of jobs to the currently authenticated user
"""
jobs_data = {
'jobs[]': job_ids
}
response = make_post_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise UserJobsNotAddedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | [
"def",
"add_user_jobs",
"(",
"session",
",",
"job_ids",
")",
":",
"jobs_data",
"=",
"{",
"'jobs[]'",
":",
"job_ids",
"}",
"response",
"=",
"make_post_request",
"(",
"session",
",",
"'self/jobs'",
",",
"json_data",
"=",
"jobs_data",
")",
"json_data",
"=",
"response",
".",
"json",
"(",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"json_data",
"[",
"'status'",
"]",
"else",
":",
"raise",
"UserJobsNotAddedException",
"(",
"message",
"=",
"json_data",
"[",
"'message'",
"]",
",",
"error_code",
"=",
"json_data",
"[",
"'error_code'",
"]",
",",
"request_id",
"=",
"json_data",
"[",
"'request_id'",
"]",
")"
] | Add a list of jobs to the currently authenticated user | [
"Add",
"a",
"list",
"of",
"jobs",
"to",
"the",
"currently",
"authenticated",
"user"
] | python | valid |
brocade/pynos | pynos/versions/base/lldp.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/lldp.py#L92-L110 | def get_lldp_neighbors_request(last_ifindex, rbridge_id):
""" Creates a new Netconf request based on the last received or if
rbridge_id is specifed
ifindex when the hasMore flag is true
"""
request_lldp = ET.Element(
'get-lldp-neighbor-detail',
xmlns="urn:brocade.com:mgmt:brocade-lldp-ext"
)
if rbridge_id is not None:
rbridge_el = ET.SubElement(request_lldp, "rbridge-id")
rbridge_el.text = rbridge_id
elif last_ifindex != '':
last_received_int = ET.SubElement(request_lldp,
"last-rcvd-ifindex")
last_received_int.text = last_ifindex
return request_lldp | [
"def",
"get_lldp_neighbors_request",
"(",
"last_ifindex",
",",
"rbridge_id",
")",
":",
"request_lldp",
"=",
"ET",
".",
"Element",
"(",
"'get-lldp-neighbor-detail'",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-lldp-ext\"",
")",
"if",
"rbridge_id",
"is",
"not",
"None",
":",
"rbridge_el",
"=",
"ET",
".",
"SubElement",
"(",
"request_lldp",
",",
"\"rbridge-id\"",
")",
"rbridge_el",
".",
"text",
"=",
"rbridge_id",
"elif",
"last_ifindex",
"!=",
"''",
":",
"last_received_int",
"=",
"ET",
".",
"SubElement",
"(",
"request_lldp",
",",
"\"last-rcvd-ifindex\"",
")",
"last_received_int",
".",
"text",
"=",
"last_ifindex",
"return",
"request_lldp"
] | Creates a new Netconf request based on the last received or if
rbridge_id is specifed
ifindex when the hasMore flag is true | [
"Creates",
"a",
"new",
"Netconf",
"request",
"based",
"on",
"the",
"last",
"received",
"or",
"if",
"rbridge_id",
"is",
"specifed",
"ifindex",
"when",
"the",
"hasMore",
"flag",
"is",
"true"
] | python | train |
hasgeek/coaster | coaster/sqlalchemy/mixins.py | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/sqlalchemy/mixins.py#L305-L311 | def _set_fields(self, fields):
"""Helper method for :meth:`upsert` in the various subclasses"""
for f in fields:
if hasattr(self, f):
setattr(self, f, fields[f])
else:
raise TypeError("'{arg}' is an invalid argument for {instance_type}".format(arg=f, instance_type=self.__class__.__name__)) | [
"def",
"_set_fields",
"(",
"self",
",",
"fields",
")",
":",
"for",
"f",
"in",
"fields",
":",
"if",
"hasattr",
"(",
"self",
",",
"f",
")",
":",
"setattr",
"(",
"self",
",",
"f",
",",
"fields",
"[",
"f",
"]",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"'{arg}' is an invalid argument for {instance_type}\"",
".",
"format",
"(",
"arg",
"=",
"f",
",",
"instance_type",
"=",
"self",
".",
"__class__",
".",
"__name__",
")",
")"
] | Helper method for :meth:`upsert` in the various subclasses | [
"Helper",
"method",
"for",
":",
"meth",
":",
"upsert",
"in",
"the",
"various",
"subclasses"
] | python | train |
acorg/dark-matter | dark/html.py | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/html.py#L274-L293 | def _writeFASTA(self, i, image):
"""
Write a FASTA file containing the set of reads that hit a sequence.
@param i: The number of the image in self._images.
@param image: A member of self._images.
@return: A C{str}, either 'fasta' or 'fastq' indicating the format
of the reads in C{self._titlesAlignments}.
"""
if isinstance(self._titlesAlignments.readsAlignments.reads,
FastqReads):
format_ = 'fastq'
else:
format_ = 'fasta'
filename = '%s/%d.%s' % (self._outputDir, i, format_)
titleAlignments = self._titlesAlignments[image['title']]
with open(filename, 'w') as fp:
for titleAlignment in titleAlignments:
fp.write(titleAlignment.read.toString(format_))
return format_ | [
"def",
"_writeFASTA",
"(",
"self",
",",
"i",
",",
"image",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_titlesAlignments",
".",
"readsAlignments",
".",
"reads",
",",
"FastqReads",
")",
":",
"format_",
"=",
"'fastq'",
"else",
":",
"format_",
"=",
"'fasta'",
"filename",
"=",
"'%s/%d.%s'",
"%",
"(",
"self",
".",
"_outputDir",
",",
"i",
",",
"format_",
")",
"titleAlignments",
"=",
"self",
".",
"_titlesAlignments",
"[",
"image",
"[",
"'title'",
"]",
"]",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fp",
":",
"for",
"titleAlignment",
"in",
"titleAlignments",
":",
"fp",
".",
"write",
"(",
"titleAlignment",
".",
"read",
".",
"toString",
"(",
"format_",
")",
")",
"return",
"format_"
] | Write a FASTA file containing the set of reads that hit a sequence.
@param i: The number of the image in self._images.
@param image: A member of self._images.
@return: A C{str}, either 'fasta' or 'fastq' indicating the format
of the reads in C{self._titlesAlignments}. | [
"Write",
"a",
"FASTA",
"file",
"containing",
"the",
"set",
"of",
"reads",
"that",
"hit",
"a",
"sequence",
"."
] | python | train |
hvac/hvac | hvac/api/auth_methods/mfa.py | https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/auth_methods/mfa.py#L28-L65 | def configure(self, mount_point, mfa_type='duo', force=False):
"""Configure MFA for a supported method.
This endpoint allows you to turn on multi-factor authentication with a given backend.
Currently only Duo is supported.
Supported methods:
POST: /auth/{mount_point}/mfa_config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param mfa_type: Enables MFA with given backend (available: duo)
:type mfa_type: str | unicode
:param force: If True, make the "mfa_config" request regardless of circumstance. If False (the default), verify
the provided mount_point is available and one of the types of methods supported by this feature.
:type force: bool
:return: The response of the configure MFA request.
:rtype: requests.Response
"""
if mfa_type != 'duo' and not force:
# The situation described via this exception is not likely to change in the future.
# However we provided that flexibility here just in case.
error_msg = 'Unsupported mfa_type argument provided "{arg}", supported types: "{mfa_types}"'
raise exceptions.ParamValidationError(error_msg.format(
mfa_types=','.join(SUPPORTED_MFA_TYPES),
arg=mfa_type,
))
params = {
'type': mfa_type,
}
api_path = '/v1/auth/{mount_point}/mfa_config'.format(
mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
) | [
"def",
"configure",
"(",
"self",
",",
"mount_point",
",",
"mfa_type",
"=",
"'duo'",
",",
"force",
"=",
"False",
")",
":",
"if",
"mfa_type",
"!=",
"'duo'",
"and",
"not",
"force",
":",
"# The situation described via this exception is not likely to change in the future.",
"# However we provided that flexibility here just in case.",
"error_msg",
"=",
"'Unsupported mfa_type argument provided \"{arg}\", supported types: \"{mfa_types}\"'",
"raise",
"exceptions",
".",
"ParamValidationError",
"(",
"error_msg",
".",
"format",
"(",
"mfa_types",
"=",
"','",
".",
"join",
"(",
"SUPPORTED_MFA_TYPES",
")",
",",
"arg",
"=",
"mfa_type",
",",
")",
")",
"params",
"=",
"{",
"'type'",
":",
"mfa_type",
",",
"}",
"api_path",
"=",
"'/v1/auth/{mount_point}/mfa_config'",
".",
"format",
"(",
"mount_point",
"=",
"mount_point",
")",
"return",
"self",
".",
"_adapter",
".",
"post",
"(",
"url",
"=",
"api_path",
",",
"json",
"=",
"params",
",",
")"
] | Configure MFA for a supported method.
This endpoint allows you to turn on multi-factor authentication with a given backend.
Currently only Duo is supported.
Supported methods:
POST: /auth/{mount_point}/mfa_config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param mfa_type: Enables MFA with given backend (available: duo)
:type mfa_type: str | unicode
:param force: If True, make the "mfa_config" request regardless of circumstance. If False (the default), verify
the provided mount_point is available and one of the types of methods supported by this feature.
:type force: bool
:return: The response of the configure MFA request.
:rtype: requests.Response | [
"Configure",
"MFA",
"for",
"a",
"supported",
"method",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/brocade_dot1ag_rpc/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/brocade_dot1ag_rpc/__init__.py#L95-L116 | def _set_get_show_cfm(self, v, load=False):
"""
Setter method for get_show_cfm, mapped from YANG variable /brocade_dot1ag_rpc/get_show_cfm (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_show_cfm is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_show_cfm() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_show_cfm.get_show_cfm, is_leaf=True, yang_name="get-show-cfm", rest_name="get-show-cfm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'dot1agSummaryShowCfm'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_show_cfm must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_show_cfm.get_show_cfm, is_leaf=True, yang_name="get-show-cfm", rest_name="get-show-cfm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'dot1agSummaryShowCfm'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='rpc', is_config=True)""",
})
self.__get_show_cfm = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_get_show_cfm",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"get_show_cfm",
".",
"get_show_cfm",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"get-show-cfm\"",
",",
"rest_name",
"=",
"\"get-show-cfm\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"False",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'hidden'",
":",
"u'rpccmd'",
",",
"u'actionpoint'",
":",
"u'dot1agSummaryShowCfm'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-dot1ag'",
",",
"defining_module",
"=",
"'brocade-dot1ag'",
",",
"yang_type",
"=",
"'rpc'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"get_show_cfm must be of a type compatible with rpc\"\"\"",
",",
"'defined-type'",
":",
"\"rpc\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=get_show_cfm.get_show_cfm, is_leaf=True, yang_name=\"get-show-cfm\", rest_name=\"get-show-cfm\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'dot1agSummaryShowCfm'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='rpc', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__get_show_cfm",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for get_show_cfm, mapped from YANG variable /brocade_dot1ag_rpc/get_show_cfm (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_show_cfm is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_show_cfm() directly. | [
"Setter",
"method",
"for",
"get_show_cfm",
"mapped",
"from",
"YANG",
"variable",
"/",
"brocade_dot1ag_rpc",
"/",
"get_show_cfm",
"(",
"rpc",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_get_show_cfm",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_get_show_cfm",
"()",
"directly",
"."
] | python | train |
dougalsutherland/skl-groups | skl_groups/summaries/bag_of_words.py | https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/summaries/bag_of_words.py#L115-L134 | def fit_transform(self, X):
'''
Compute clustering and transform a list of bag features into its
bag-of-words representation. Like calling fit(X) and then transform(X),
but more efficient.
Parameters
----------
X : :class:`skl_groups.features.Features` or list of bag feature arrays
New data to transform.
Returns
-------
X_new : integer array, shape [len(X), kmeans.n_clusters]
X transformed into the new space.
'''
X = as_features(X, stack=True)
self.kmeans_fit_ = copy(self.kmeans)
assignments = self.kmeans_fit_.fit_predict(X.stacked_features)
return self._group_assignments(X, assignments) | [
"def",
"fit_transform",
"(",
"self",
",",
"X",
")",
":",
"X",
"=",
"as_features",
"(",
"X",
",",
"stack",
"=",
"True",
")",
"self",
".",
"kmeans_fit_",
"=",
"copy",
"(",
"self",
".",
"kmeans",
")",
"assignments",
"=",
"self",
".",
"kmeans_fit_",
".",
"fit_predict",
"(",
"X",
".",
"stacked_features",
")",
"return",
"self",
".",
"_group_assignments",
"(",
"X",
",",
"assignments",
")"
] | Compute clustering and transform a list of bag features into its
bag-of-words representation. Like calling fit(X) and then transform(X),
but more efficient.
Parameters
----------
X : :class:`skl_groups.features.Features` or list of bag feature arrays
New data to transform.
Returns
-------
X_new : integer array, shape [len(X), kmeans.n_clusters]
X transformed into the new space. | [
"Compute",
"clustering",
"and",
"transform",
"a",
"list",
"of",
"bag",
"features",
"into",
"its",
"bag",
"-",
"of",
"-",
"words",
"representation",
".",
"Like",
"calling",
"fit",
"(",
"X",
")",
"and",
"then",
"transform",
"(",
"X",
")",
"but",
"more",
"efficient",
"."
] | python | valid |
StagPython/StagPy | stagpy/rprof.py | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/rprof.py#L165-L196 | def plot_every_step(sdat, lovs):
"""Plot profiles at each time step.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps.
"""
sovs = misc.set_of_vars(lovs)
for step in sdat.walk.filter(rprof=True):
rprofs = {}
rads = {}
metas = {}
for rvar in sovs:
rprof, rad, meta = get_rprof(step, rvar)
rprofs[rvar] = rprof
metas[rvar] = meta
if rad is not None:
rads[rvar] = rad
rprofs['bounds'] = misc.get_rbounds(step)
rcmb, rsurf = misc.get_rbounds(step)
rprofs['bounds'] = (step.sdat.scale(rcmb, 'm')[0],
step.sdat.scale(rsurf, 'm')[0])
rprofs['r'] = get_rprof(step, 'r')[0] + rprofs['bounds'][0]
stepstr = str(step.istep)
_plot_rprof_list(sdat, lovs, rprofs, metas, stepstr, rads) | [
"def",
"plot_every_step",
"(",
"sdat",
",",
"lovs",
")",
":",
"sovs",
"=",
"misc",
".",
"set_of_vars",
"(",
"lovs",
")",
"for",
"step",
"in",
"sdat",
".",
"walk",
".",
"filter",
"(",
"rprof",
"=",
"True",
")",
":",
"rprofs",
"=",
"{",
"}",
"rads",
"=",
"{",
"}",
"metas",
"=",
"{",
"}",
"for",
"rvar",
"in",
"sovs",
":",
"rprof",
",",
"rad",
",",
"meta",
"=",
"get_rprof",
"(",
"step",
",",
"rvar",
")",
"rprofs",
"[",
"rvar",
"]",
"=",
"rprof",
"metas",
"[",
"rvar",
"]",
"=",
"meta",
"if",
"rad",
"is",
"not",
"None",
":",
"rads",
"[",
"rvar",
"]",
"=",
"rad",
"rprofs",
"[",
"'bounds'",
"]",
"=",
"misc",
".",
"get_rbounds",
"(",
"step",
")",
"rcmb",
",",
"rsurf",
"=",
"misc",
".",
"get_rbounds",
"(",
"step",
")",
"rprofs",
"[",
"'bounds'",
"]",
"=",
"(",
"step",
".",
"sdat",
".",
"scale",
"(",
"rcmb",
",",
"'m'",
")",
"[",
"0",
"]",
",",
"step",
".",
"sdat",
".",
"scale",
"(",
"rsurf",
",",
"'m'",
")",
"[",
"0",
"]",
")",
"rprofs",
"[",
"'r'",
"]",
"=",
"get_rprof",
"(",
"step",
",",
"'r'",
")",
"[",
"0",
"]",
"+",
"rprofs",
"[",
"'bounds'",
"]",
"[",
"0",
"]",
"stepstr",
"=",
"str",
"(",
"step",
".",
"istep",
")",
"_plot_rprof_list",
"(",
"sdat",
",",
"lovs",
",",
"rprofs",
",",
"metas",
",",
"stepstr",
",",
"rads",
")"
] | Plot profiles at each time step.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
lovs (nested list of str): nested list of profile names such as
the one produced by :func:`stagpy.misc.list_of_vars`.
Other Parameters:
conf.core.snapshots: the slice of snapshots.
conf.conf.timesteps: the slice of timesteps. | [
"Plot",
"profiles",
"at",
"each",
"time",
"step",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/utils/metrics.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L652-L667 | def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all):
"""Create metrics accumulators and averager for Eager mode.
Args:
metric_names: list<str> from Metrics enum
weights_fn: function that takes labels and returns a weights mask. Defaults
to weights of all 1, i.e. common_layers.weights_all. Use
common_layers.weights_nonzero if labels have 0-padding.
Returns:
(accum_fn(predictions, targets) => None,
result_fn() => dict<str metric_name, float avg_val>
"""
metric_fns = dict(
[(name, METRICS_FNS[name]) for name in metric_names])
return create_eager_metrics_internal(metric_fns, weights_fn) | [
"def",
"create_eager_metrics",
"(",
"metric_names",
",",
"weights_fn",
"=",
"common_layers",
".",
"weights_all",
")",
":",
"metric_fns",
"=",
"dict",
"(",
"[",
"(",
"name",
",",
"METRICS_FNS",
"[",
"name",
"]",
")",
"for",
"name",
"in",
"metric_names",
"]",
")",
"return",
"create_eager_metrics_internal",
"(",
"metric_fns",
",",
"weights_fn",
")"
] | Create metrics accumulators and averager for Eager mode.
Args:
metric_names: list<str> from Metrics enum
weights_fn: function that takes labels and returns a weights mask. Defaults
to weights of all 1, i.e. common_layers.weights_all. Use
common_layers.weights_nonzero if labels have 0-padding.
Returns:
(accum_fn(predictions, targets) => None,
result_fn() => dict<str metric_name, float avg_val> | [
"Create",
"metrics",
"accumulators",
"and",
"averager",
"for",
"Eager",
"mode",
"."
] | python | train |
facetoe/zenpy | zenpy/lib/api_objects/__init__.py | https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api_objects/__init__.py#L4228-L4233 | def sharing_agreements(self):
"""
| Comment: The ids of the sharing agreements used for this ticket
"""
if self.api and self.sharing_agreement_ids:
return self.api._get_sharing_agreements(self.sharing_agreement_ids) | [
"def",
"sharing_agreements",
"(",
"self",
")",
":",
"if",
"self",
".",
"api",
"and",
"self",
".",
"sharing_agreement_ids",
":",
"return",
"self",
".",
"api",
".",
"_get_sharing_agreements",
"(",
"self",
".",
"sharing_agreement_ids",
")"
] | | Comment: The ids of the sharing agreements used for this ticket | [
"|",
"Comment",
":",
"The",
"ids",
"of",
"the",
"sharing",
"agreements",
"used",
"for",
"this",
"ticket"
] | python | train |
keepkey/python-keepkey | keepkeylib/transport.py | https://github.com/keepkey/python-keepkey/blob/8318e3a8c4025d499342130ce4305881a325c013/keepkeylib/transport.py#L71-L83 | def read(self):
"""
If there is data available to be read from the transport, reads the data and tries to parse it as a protobuf message. If the parsing succeeds, return a protobuf object.
Otherwise, returns None.
"""
if not self.ready_to_read():
return None
data = self._read()
if data is None:
return None
return self._parse_message(data) | [
"def",
"read",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"ready_to_read",
"(",
")",
":",
"return",
"None",
"data",
"=",
"self",
".",
"_read",
"(",
")",
"if",
"data",
"is",
"None",
":",
"return",
"None",
"return",
"self",
".",
"_parse_message",
"(",
"data",
")"
] | If there is data available to be read from the transport, reads the data and tries to parse it as a protobuf message. If the parsing succeeds, return a protobuf object.
Otherwise, returns None. | [
"If",
"there",
"is",
"data",
"available",
"to",
"be",
"read",
"from",
"the",
"transport",
"reads",
"the",
"data",
"and",
"tries",
"to",
"parse",
"it",
"as",
"a",
"protobuf",
"message",
".",
"If",
"the",
"parsing",
"succeeds",
"return",
"a",
"protobuf",
"object",
".",
"Otherwise",
"returns",
"None",
"."
] | python | train |
ajenhl/tacl | tacl/text.py | https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/text.py#L50-L72 | def get_ngrams(self, minimum, maximum, skip_sizes=None):
"""Returns a generator supplying the n-grams (`minimum` <= n
<= `maximum`) for this text.
Each iteration of the generator supplies a tuple consisting of
the size of the n-grams and a `collections.Counter` of the
n-grams.
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:param skip_sizes: sizes to not generate n-grams for
:type skip_sizes: `list` of `int`
:rtype: `generator`
"""
skip_sizes = skip_sizes or []
tokens = self.get_tokens()
for size in range(minimum, maximum + 1):
if size not in skip_sizes:
ngrams = collections.Counter(self._ngrams(tokens, size))
yield (size, ngrams) | [
"def",
"get_ngrams",
"(",
"self",
",",
"minimum",
",",
"maximum",
",",
"skip_sizes",
"=",
"None",
")",
":",
"skip_sizes",
"=",
"skip_sizes",
"or",
"[",
"]",
"tokens",
"=",
"self",
".",
"get_tokens",
"(",
")",
"for",
"size",
"in",
"range",
"(",
"minimum",
",",
"maximum",
"+",
"1",
")",
":",
"if",
"size",
"not",
"in",
"skip_sizes",
":",
"ngrams",
"=",
"collections",
".",
"Counter",
"(",
"self",
".",
"_ngrams",
"(",
"tokens",
",",
"size",
")",
")",
"yield",
"(",
"size",
",",
"ngrams",
")"
] | Returns a generator supplying the n-grams (`minimum` <= n
<= `maximum`) for this text.
Each iteration of the generator supplies a tuple consisting of
the size of the n-grams and a `collections.Counter` of the
n-grams.
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:param skip_sizes: sizes to not generate n-grams for
:type skip_sizes: `list` of `int`
:rtype: `generator` | [
"Returns",
"a",
"generator",
"supplying",
"the",
"n",
"-",
"grams",
"(",
"minimum",
"<",
"=",
"n",
"<",
"=",
"maximum",
")",
"for",
"this",
"text",
"."
] | python | train |
alex-kostirin/pyatomac | atomac/ldtpd/text.py | https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/text.py#L272-L292 | def appendtext(self, window_name, object_name, data):
"""
Append string sequence.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
object_handle.AXValue += data
return 1 | [
"def",
"appendtext",
"(",
"self",
",",
"window_name",
",",
"object_name",
",",
"data",
")",
":",
"object_handle",
"=",
"self",
".",
"_get_object_handle",
"(",
"window_name",
",",
"object_name",
")",
"if",
"not",
"object_handle",
".",
"AXEnabled",
":",
"raise",
"LdtpServerException",
"(",
"u\"Object %s state disabled\"",
"%",
"object_name",
")",
"object_handle",
".",
"AXValue",
"+=",
"data",
"return",
"1"
] | Append string sequence.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer | [
"Append",
"string",
"sequence",
".",
"@param",
"window_name",
":",
"Window",
"name",
"to",
"type",
"in",
"either",
"full",
"name",
"LDTP",
"s",
"name",
"convention",
"or",
"a",
"Unix",
"glob",
".",
"@type",
"window_name",
":",
"string",
"@param",
"object_name",
":",
"Object",
"name",
"to",
"type",
"in",
"either",
"full",
"name",
"LDTP",
"s",
"name",
"convention",
"or",
"a",
"Unix",
"glob",
".",
"@type",
"object_name",
":",
"string",
"@param",
"data",
":",
"data",
"to",
"type",
".",
"@type",
"data",
":",
"string"
] | python | valid |
hayd/pep8radius | setup.py | https://github.com/hayd/pep8radius/blob/0c1d14835d390f7feeb602f35a768e52ce306a0a/setup.py#L13-L20 | def version():
"""Return version string."""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'pep8radius',
'main.py')) as input_file:
for line in input_file:
if line.startswith('__version__'):
return parse(line).body[0].value.s | [
"def",
"version",
"(",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"'pep8radius'",
",",
"'main.py'",
")",
")",
"as",
"input_file",
":",
"for",
"line",
"in",
"input_file",
":",
"if",
"line",
".",
"startswith",
"(",
"'__version__'",
")",
":",
"return",
"parse",
"(",
"line",
")",
".",
"body",
"[",
"0",
"]",
".",
"value",
".",
"s"
] | Return version string. | [
"Return",
"version",
"string",
"."
] | python | train |
Duke-GCB/DukeDSClient | ddsc/ddsclient.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/ddsclient.py#L358-L369 | def run(self, args):
"""
Lists project names.
:param args Namespace arguments parsed from the command line
"""
long_format = args.long_format
# project_name and auth_role args are mutually exclusive
if args.project_name or args.project_id:
project = self.fetch_project(args, must_exist=True, include_children=True)
self.print_project_details(project, long_format)
else:
self.print_project_list_details(args.auth_role, long_format) | [
"def",
"run",
"(",
"self",
",",
"args",
")",
":",
"long_format",
"=",
"args",
".",
"long_format",
"# project_name and auth_role args are mutually exclusive",
"if",
"args",
".",
"project_name",
"or",
"args",
".",
"project_id",
":",
"project",
"=",
"self",
".",
"fetch_project",
"(",
"args",
",",
"must_exist",
"=",
"True",
",",
"include_children",
"=",
"True",
")",
"self",
".",
"print_project_details",
"(",
"project",
",",
"long_format",
")",
"else",
":",
"self",
".",
"print_project_list_details",
"(",
"args",
".",
"auth_role",
",",
"long_format",
")"
] | Lists project names.
:param args Namespace arguments parsed from the command line | [
"Lists",
"project",
"names",
".",
":",
"param",
"args",
"Namespace",
"arguments",
"parsed",
"from",
"the",
"command",
"line"
] | python | train |
ECRL/ecabc | ecabc/abc.py | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L620-L636 | def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True) | [
"def",
"save_settings",
"(",
"self",
",",
"filename",
")",
":",
"data",
"=",
"dict",
"(",
")",
"data",
"[",
"'valueRanges'",
"]",
"=",
"self",
".",
"_value_ranges",
"data",
"[",
"'best_values'",
"]",
"=",
"[",
"str",
"(",
"value",
")",
"for",
"value",
"in",
"self",
".",
"_best_values",
"]",
"data",
"[",
"'minimize'",
"]",
"=",
"self",
".",
"_minimize",
"data",
"[",
"'num_employers'",
"]",
"=",
"self",
".",
"_num_employers",
"data",
"[",
"'best_score'",
"]",
"=",
"str",
"(",
"self",
".",
"_best_score",
")",
"data",
"[",
"'limit'",
"]",
"=",
"self",
".",
"_limit",
"data",
"[",
"'best_error'",
"]",
"=",
"self",
".",
"_best_error",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"outfile",
":",
"json",
".",
"dump",
"(",
"data",
",",
"outfile",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")"
] | Save settings to a JSON file
Arge:
filename (string): name of the file to save to | [
"Save",
"settings",
"to",
"a",
"JSON",
"file"
] | python | train |
CxAalto/gtfspy | gtfspy/gtfs.py | https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/gtfs.py#L1519-L1549 | def get_straight_line_transfer_distances(self, stop_I=None):
"""
Get (straight line) distances to stations that can be transferred to.
Parameters
----------
stop_I : int, optional
If not specified return all possible transfer distances
Returns
-------
distances: pandas.DataFrame
each row has the following items
from_stop_I: int
to_stop_I: int
d: float or int #distance in meters
"""
if stop_I is not None:
query = u""" SELECT from_stop_I, to_stop_I, d
FROM stop_distances
WHERE
from_stop_I=?
"""
params = (u"{stop_I}".format(stop_I=stop_I),)
else:
query = """ SELECT from_stop_I, to_stop_I, d
FROM stop_distances
"""
params = None
stop_data_df = pd.read_sql_query(query, self.conn, params=params)
return stop_data_df | [
"def",
"get_straight_line_transfer_distances",
"(",
"self",
",",
"stop_I",
"=",
"None",
")",
":",
"if",
"stop_I",
"is",
"not",
"None",
":",
"query",
"=",
"u\"\"\" SELECT from_stop_I, to_stop_I, d\n FROM stop_distances\n WHERE\n from_stop_I=?\n \"\"\"",
"params",
"=",
"(",
"u\"{stop_I}\"",
".",
"format",
"(",
"stop_I",
"=",
"stop_I",
")",
",",
")",
"else",
":",
"query",
"=",
"\"\"\" SELECT from_stop_I, to_stop_I, d\n FROM stop_distances\n \"\"\"",
"params",
"=",
"None",
"stop_data_df",
"=",
"pd",
".",
"read_sql_query",
"(",
"query",
",",
"self",
".",
"conn",
",",
"params",
"=",
"params",
")",
"return",
"stop_data_df"
] | Get (straight line) distances to stations that can be transferred to.
Parameters
----------
stop_I : int, optional
If not specified return all possible transfer distances
Returns
-------
distances: pandas.DataFrame
each row has the following items
from_stop_I: int
to_stop_I: int
d: float or int #distance in meters | [
"Get",
"(",
"straight",
"line",
")",
"distances",
"to",
"stations",
"that",
"can",
"be",
"transferred",
"to",
"."
] | python | valid |
StyXman/ayrton | ayrton/parser/error.py | https://github.com/StyXman/ayrton/blob/e1eed5c7ef230e3c2340a1f0bf44c72bbdc0debb/ayrton/parser/error.py#L112-L117 | def print_application_traceback(self, space, file=None):
"NOT_RPYTHON: Dump a standard application-level traceback."
if file is None:
file = sys.stderr
self.print_app_tb_only(file)
print >> file, self.errorstr(space) | [
"def",
"print_application_traceback",
"(",
"self",
",",
"space",
",",
"file",
"=",
"None",
")",
":",
"if",
"file",
"is",
"None",
":",
"file",
"=",
"sys",
".",
"stderr",
"self",
".",
"print_app_tb_only",
"(",
"file",
")",
"print",
">>",
"file",
",",
"self",
".",
"errorstr",
"(",
"space",
")"
] | NOT_RPYTHON: Dump a standard application-level traceback. | [
"NOT_RPYTHON",
":",
"Dump",
"a",
"standard",
"application",
"-",
"level",
"traceback",
"."
] | python | train |
openego/eDisGo | edisgo/grid/components.py | https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/components.py#L552-L596 | def timeseries_reactive(self):
"""
Reactive power time series in kvar.
Parameters
-----------
timeseries_reactive : :pandas:`pandas.Seriese<series>`
Series containing reactive power in kvar.
Returns
-------
:pandas:`pandas.Series<series>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `generation_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned.
"""
if self._timeseries_reactive is None:
if self.grid.network.timeseries.generation_reactive_power \
is not None:
try:
timeseries = \
self.grid.network.timeseries.generation_reactive_power[
self.type].to_frame('q')
except (KeyError, TypeError):
try:
timeseries = \
self.grid.network.timeseries.generation_reactive_power[
'other'].to_frame('q')
except:
logger.warning(
"No reactive power time series for type {} given. "
"Reactive power time series will be calculated from "
"assumptions in config files and active power "
"timeseries.".format(self.type))
return None
self.power_factor = 'not_applicable'
self.reactive_power_mode = 'not_applicable'
return timeseries * self.nominal_capacity
else:
return None
else:
return self._timeseries_reactive.loc[
self.grid.network.timeseries.timeindex, :] | [
"def",
"timeseries_reactive",
"(",
"self",
")",
":",
"if",
"self",
".",
"_timeseries_reactive",
"is",
"None",
":",
"if",
"self",
".",
"grid",
".",
"network",
".",
"timeseries",
".",
"generation_reactive_power",
"is",
"not",
"None",
":",
"try",
":",
"timeseries",
"=",
"self",
".",
"grid",
".",
"network",
".",
"timeseries",
".",
"generation_reactive_power",
"[",
"self",
".",
"type",
"]",
".",
"to_frame",
"(",
"'q'",
")",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"try",
":",
"timeseries",
"=",
"self",
".",
"grid",
".",
"network",
".",
"timeseries",
".",
"generation_reactive_power",
"[",
"'other'",
"]",
".",
"to_frame",
"(",
"'q'",
")",
"except",
":",
"logger",
".",
"warning",
"(",
"\"No reactive power time series for type {} given. \"",
"\"Reactive power time series will be calculated from \"",
"\"assumptions in config files and active power \"",
"\"timeseries.\"",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"return",
"None",
"self",
".",
"power_factor",
"=",
"'not_applicable'",
"self",
".",
"reactive_power_mode",
"=",
"'not_applicable'",
"return",
"timeseries",
"*",
"self",
".",
"nominal_capacity",
"else",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_timeseries_reactive",
".",
"loc",
"[",
"self",
".",
"grid",
".",
"network",
".",
"timeseries",
".",
"timeindex",
",",
":",
"]"
] | Reactive power time series in kvar.
Parameters
-----------
timeseries_reactive : :pandas:`pandas.Seriese<series>`
Series containing reactive power in kvar.
Returns
-------
:pandas:`pandas.Series<series>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `generation_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned. | [
"Reactive",
"power",
"time",
"series",
"in",
"kvar",
"."
] | python | train |
saltstack/salt | salt/modules/boto_vpc.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_vpc.py#L2471-L2529 | def describe_route_table(route_table_id=None, route_table_name=None,
tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Given route table properties, return route table details if matching table(s) exist.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe_route_table route_table_id='rtb-1f382e7d'
'''
salt.utils.versions.warn_until(
'Neon',
'The \'describe_route_table\' method has been deprecated and '
'replaced by \'describe_route_tables\'.'
)
if not any((route_table_id, route_table_name, tags)):
raise SaltInvocationError('At least one of the following must be specified: '
'route table id, route table name, or tags.')
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
filter_parameters = {'filters': {}}
if route_table_id:
filter_parameters['route_table_ids'] = route_table_id
if route_table_name:
filter_parameters['filters']['tag:Name'] = route_table_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
route_tables = conn.get_all_route_tables(**filter_parameters)
if not route_tables:
return {}
route_table = {}
keys = ['id', 'vpc_id', 'tags', 'routes', 'associations']
route_keys = ['destination_cidr_block', 'gateway_id', 'instance_id', 'interface_id', 'vpc_peering_connection_id']
assoc_keys = ['id', 'main', 'route_table_id', 'subnet_id']
for item in route_tables:
for key in keys:
if hasattr(item, key):
route_table[key] = getattr(item, key)
if key == 'routes':
route_table[key] = _key_iter(key, route_keys, item)
if key == 'associations':
route_table[key] = _key_iter(key, assoc_keys, item)
return route_table
except BotoServerError as e:
return {'error': __utils__['boto.get_error'](e)} | [
"def",
"describe_route_table",
"(",
"route_table_id",
"=",
"None",
",",
"route_table_name",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Neon'",
",",
"'The \\'describe_route_table\\' method has been deprecated and '",
"'replaced by \\'describe_route_tables\\'.'",
")",
"if",
"not",
"any",
"(",
"(",
"route_table_id",
",",
"route_table_name",
",",
"tags",
")",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'At least one of the following must be specified: '",
"'route table id, route table name, or tags.'",
")",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"filter_parameters",
"=",
"{",
"'filters'",
":",
"{",
"}",
"}",
"if",
"route_table_id",
":",
"filter_parameters",
"[",
"'route_table_ids'",
"]",
"=",
"route_table_id",
"if",
"route_table_name",
":",
"filter_parameters",
"[",
"'filters'",
"]",
"[",
"'tag:Name'",
"]",
"=",
"route_table_name",
"if",
"tags",
":",
"for",
"tag_name",
",",
"tag_value",
"in",
"six",
".",
"iteritems",
"(",
"tags",
")",
":",
"filter_parameters",
"[",
"'filters'",
"]",
"[",
"'tag:{0}'",
".",
"format",
"(",
"tag_name",
")",
"]",
"=",
"tag_value",
"route_tables",
"=",
"conn",
".",
"get_all_route_tables",
"(",
"*",
"*",
"filter_parameters",
")",
"if",
"not",
"route_tables",
":",
"return",
"{",
"}",
"route_table",
"=",
"{",
"}",
"keys",
"=",
"[",
"'id'",
",",
"'vpc_id'",
",",
"'tags'",
",",
"'routes'",
",",
"'associations'",
"]",
"route_keys",
"=",
"[",
"'destination_cidr_block'",
",",
"'gateway_id'",
",",
"'instance_id'",
",",
"'interface_id'",
",",
"'vpc_peering_connection_id'",
"]",
"assoc_keys",
"=",
"[",
"'id'",
",",
"'main'",
",",
"'route_table_id'",
",",
"'subnet_id'",
"]",
"for",
"item",
"in",
"route_tables",
":",
"for",
"key",
"in",
"keys",
":",
"if",
"hasattr",
"(",
"item",
",",
"key",
")",
":",
"route_table",
"[",
"key",
"]",
"=",
"getattr",
"(",
"item",
",",
"key",
")",
"if",
"key",
"==",
"'routes'",
":",
"route_table",
"[",
"key",
"]",
"=",
"_key_iter",
"(",
"key",
",",
"route_keys",
",",
"item",
")",
"if",
"key",
"==",
"'associations'",
":",
"route_table",
"[",
"key",
"]",
"=",
"_key_iter",
"(",
"key",
",",
"assoc_keys",
",",
"item",
")",
"return",
"route_table",
"except",
"BotoServerError",
"as",
"e",
":",
"return",
"{",
"'error'",
":",
"__utils__",
"[",
"'boto.get_error'",
"]",
"(",
"e",
")",
"}"
] | Given route table properties, return route table details if matching table(s) exist.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe_route_table route_table_id='rtb-1f382e7d' | [
"Given",
"route",
"table",
"properties",
"return",
"route",
"table",
"details",
"if",
"matching",
"table",
"(",
"s",
")",
"exist",
"."
] | python | train |
andreikop/qutepart | qutepart/syntax/parser.py | https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/syntax/parser.py#L742-L756 | def _tryMatch(self, textToMatchObject):
"""Try to find themselves in the text.
Returns (count, matchedRule) or (None, None) if doesn't match
"""
for rule in self.context.rules:
ruleTryMatchResult = rule.tryMatch(textToMatchObject)
if ruleTryMatchResult is not None:
_logger.debug('\tmatched rule %s at %d in included context %s/%s',
rule.shortId(),
textToMatchObject.currentColumnIndex,
self.context.parser.syntax.name,
self.context.name)
return ruleTryMatchResult
else:
return None | [
"def",
"_tryMatch",
"(",
"self",
",",
"textToMatchObject",
")",
":",
"for",
"rule",
"in",
"self",
".",
"context",
".",
"rules",
":",
"ruleTryMatchResult",
"=",
"rule",
".",
"tryMatch",
"(",
"textToMatchObject",
")",
"if",
"ruleTryMatchResult",
"is",
"not",
"None",
":",
"_logger",
".",
"debug",
"(",
"'\\tmatched rule %s at %d in included context %s/%s'",
",",
"rule",
".",
"shortId",
"(",
")",
",",
"textToMatchObject",
".",
"currentColumnIndex",
",",
"self",
".",
"context",
".",
"parser",
".",
"syntax",
".",
"name",
",",
"self",
".",
"context",
".",
"name",
")",
"return",
"ruleTryMatchResult",
"else",
":",
"return",
"None"
] | Try to find themselves in the text.
Returns (count, matchedRule) or (None, None) if doesn't match | [
"Try",
"to",
"find",
"themselves",
"in",
"the",
"text",
".",
"Returns",
"(",
"count",
"matchedRule",
")",
"or",
"(",
"None",
"None",
")",
"if",
"doesn",
"t",
"match"
] | python | train |
bids-standard/pybids | bids/variables/kollekshuns.py | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L69-L86 | def merge_variables(variables, **kwargs):
''' Concatenates Variables along row axis.
Args:
variables (list): List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns:
A list of Variables.
'''
var_dict = OrderedDict()
for v in variables:
if v.name not in var_dict:
var_dict[v.name] = []
var_dict[v.name].append(v)
return [merge_variables(vars_, **kwargs)
for vars_ in list(var_dict.values())] | [
"def",
"merge_variables",
"(",
"variables",
",",
"*",
"*",
"kwargs",
")",
":",
"var_dict",
"=",
"OrderedDict",
"(",
")",
"for",
"v",
"in",
"variables",
":",
"if",
"v",
".",
"name",
"not",
"in",
"var_dict",
":",
"var_dict",
"[",
"v",
".",
"name",
"]",
"=",
"[",
"]",
"var_dict",
"[",
"v",
".",
"name",
"]",
".",
"append",
"(",
"v",
")",
"return",
"[",
"merge_variables",
"(",
"vars_",
",",
"*",
"*",
"kwargs",
")",
"for",
"vars_",
"in",
"list",
"(",
"var_dict",
".",
"values",
"(",
")",
")",
"]"
] | Concatenates Variables along row axis.
Args:
variables (list): List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns:
A list of Variables. | [
"Concatenates",
"Variables",
"along",
"row",
"axis",
"."
] | python | train |
emory-libraries/eulfedora | eulfedora/syncutil.py | https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/syncutil.py#L469-L501 | def binarycontent_sections(chunk):
'''Split a chunk of data into sections by start and end binary
content tags.'''
# using string split because it is significantly faster than regex.
# use common text of start and end tags to split the text
# (i.e. without < or </ tag beginning)
binary_content_tag = BINARY_CONTENT_START[1:]
if binary_content_tag not in chunk:
# if no tags are present, don't do any extra work
yield chunk
else:
# split on common portion of foxml:binaryContent
sections = chunk.split(binary_content_tag)
for sec in sections:
extra = b''
# check the end of the section to determine start/end tag
if sec.endswith(b'</'):
extra = sec[-2:]
yield sec[:-2]
elif sec.endswith(b'<'):
extra = sec[-1:]
yield sec[:-1]
else:
yield sec
if extra:
# yield the actual binary content tag
# (delimiter removed by split, but needed for processing)
yield b''.join([extra, binary_content_tag]) | [
"def",
"binarycontent_sections",
"(",
"chunk",
")",
":",
"# using string split because it is significantly faster than regex.",
"# use common text of start and end tags to split the text",
"# (i.e. without < or </ tag beginning)",
"binary_content_tag",
"=",
"BINARY_CONTENT_START",
"[",
"1",
":",
"]",
"if",
"binary_content_tag",
"not",
"in",
"chunk",
":",
"# if no tags are present, don't do any extra work",
"yield",
"chunk",
"else",
":",
"# split on common portion of foxml:binaryContent",
"sections",
"=",
"chunk",
".",
"split",
"(",
"binary_content_tag",
")",
"for",
"sec",
"in",
"sections",
":",
"extra",
"=",
"b''",
"# check the end of the section to determine start/end tag",
"if",
"sec",
".",
"endswith",
"(",
"b'</'",
")",
":",
"extra",
"=",
"sec",
"[",
"-",
"2",
":",
"]",
"yield",
"sec",
"[",
":",
"-",
"2",
"]",
"elif",
"sec",
".",
"endswith",
"(",
"b'<'",
")",
":",
"extra",
"=",
"sec",
"[",
"-",
"1",
":",
"]",
"yield",
"sec",
"[",
":",
"-",
"1",
"]",
"else",
":",
"yield",
"sec",
"if",
"extra",
":",
"# yield the actual binary content tag",
"# (delimiter removed by split, but needed for processing)",
"yield",
"b''",
".",
"join",
"(",
"[",
"extra",
",",
"binary_content_tag",
"]",
")"
] | Split a chunk of data into sections by start and end binary
content tags. | [
"Split",
"a",
"chunk",
"of",
"data",
"into",
"sections",
"by",
"start",
"and",
"end",
"binary",
"content",
"tags",
"."
] | python | train |
portfors-lab/sparkle | sparkle/gui/dialogs/calibration_dlg.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/calibration_dlg.py#L34-L46 | def maxRange(self):
"""Sets the maximum range for the currently selection calibration,
determined from its range of values store on file
"""
try:
x, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
self.ui.frangeLowSpnbx.setValue(freqs[0])
self.ui.frangeHighSpnbx.setValue(freqs[-1])
print 'set freq range', freqs[0], freqs[-1], freqs[0], freqs[-1]
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file") | [
"def",
"maxRange",
"(",
"self",
")",
":",
"try",
":",
"x",
",",
"freqs",
"=",
"self",
".",
"datafile",
".",
"get_calibration",
"(",
"str",
"(",
"self",
".",
"ui",
".",
"calChoiceCmbbx",
".",
"currentText",
"(",
")",
")",
",",
"self",
".",
"calf",
")",
"self",
".",
"ui",
".",
"frangeLowSpnbx",
".",
"setValue",
"(",
"freqs",
"[",
"0",
"]",
")",
"self",
".",
"ui",
".",
"frangeHighSpnbx",
".",
"setValue",
"(",
"freqs",
"[",
"-",
"1",
"]",
")",
"print",
"'set freq range'",
",",
"freqs",
"[",
"0",
"]",
",",
"freqs",
"[",
"-",
"1",
"]",
",",
"freqs",
"[",
"0",
"]",
",",
"freqs",
"[",
"-",
"1",
"]",
"except",
"IOError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Read Error\"",
",",
"\"Unable to read calibration file\"",
")",
"except",
"KeyError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Data Error\"",
",",
"\"Unable to find data in file\"",
")"
] | Sets the maximum range for the currently selection calibration,
determined from its range of values store on file | [
"Sets",
"the",
"maximum",
"range",
"for",
"the",
"currently",
"selection",
"calibration",
"determined",
"from",
"its",
"range",
"of",
"values",
"store",
"on",
"file"
] | python | train |
codenerix/django-codenerix | codenerix/forms.py | https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/forms.py#L95-L502 | def get_groups(self, gs=None, processed=[], initial=True):
'''
<--------------------------------------- 12 columns ------------------------------------>
<--- 6 columns ---> <--- 6 columns --->
------------------------------------------ ------------------------------------------
| Info | | Personal |
|==========================================| |==========================================|
| ----------------- ------------------ | | |
| | Passport | | Name | | | Phone Zipcode |
| |=================| | [.....] [.....] | | | [...........................] [.......] |
| | CID Country | | <- 6 -> <- 6 -> | | | <--- 8 columns ---> <-4 col-> |
| | [.....] [.....] | | | | | |
| | <- 6 -> <- 6 -> | ----------------- | | Address |
| ----------------- | | [.....................................] |
------------------------------------------ | <--- 12 columns ---> |
| [..] number |
| <--- 12 columns ---> |
| |
------------------------------------------
group = [
(_('Info'),(6,'#8a6d3b','#fcf8e3','center'),
(_('Identification'),6,
["cid",6],
["country",6],
),
(None,6,
["name",None,6],
["surname",None,6,False],
),
),
(_('Personal'),6,
["phone",None,8],
["zipcode",None,4],
["address",None,12],
["number",None,12, True],
),
]
Group: it is defined as tuple with 3 or more elements:
Grammar: (<Name>, <Attributes>, <Element1>, <Element2>, ..., <ElementN>)
If <Name> is None: no name will be given to the group and no panel decoration will be shown
If <Size in columns> is None: default of 6 will be used
<Attributes>:
it can be an integer that represent the size in columns
it can be a tuple with several attributes where each element represents:
(<Size in columns>,'#<Font color>','#<Background color>','<Alignment>')
<Element>:
it can be a Group
it can be a Field
Examples:
('Info', 6, ["name",6], ["surname",6]) -> Info panel using 6 columns with 2 boxes 6 columns for each with name and surname inputs
('Info', (6,None,'#fcf8e3','center'), ["name",6], ["surname",6]) -> Info panel using 6 columns with a yellow brackground in centered title, 2 boxes, 6 columns for each with name and surname inputs
('Info', 12, ('Name', 6, ["name",12]), ('Surname',6, ["surname",12])) -> Info panel using 12 columns with 2 panels inside
of 6 columns each named "Name" and "Surname" and inside each of them an input "name" and "surname" where it belongs.
Field: must be a list with at least 1 element in it:
Grammar: [<Name of field>, <Size in columns>, <Label>]
<Name of field>:
This must be filled always
It is the input's name inside the form
Must exists as a form element or as a grouped form element
<Size in columns>:
Size of the input in columns
If it is not defined or if it is defined as None: default of 6 will be used
<Label>:
It it is defined as False: the label for this field will not be shown
If it is not defined or if it is defined as None: default of True will be used (default input's label will be shown)
If it is a string: this string will be shown as a label
Examples:
['age'] Input 'age' will be shown with 6 columns and its default label
['age',8] Input 'age' will be shown with 8 columns and its default label
['age', None, False] Input 'age' will be shown with 6 columns and NO LABEL
['age',8,False] Input 'age' will be shown with 8 columns and NO LABEL
['age',8,_("Age in days")] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language
['age',8,_("Age in days"), True] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language, and input inline with label
['age',6, None, None, None, None, None, ["ng-click=functionjs('param1')", "ng-change=functionjs2()"]] Input 'age' with extras functions
['age',None,None,None,None, 'filter'] Input 'age' with extras filter ONLY DETAILS
['age',6, {'color': 'red'} Input 'age' will be shown with red title
'''
# Check if language is set
if not self.__language:
raise IOError("ERROR: No language suplied!")
# Initialize the list
if initial:
processed = []
# Where to look for fields
if 'list_fields' in dir(self):
list_fields = self.list_fields
check_system = "html_name"
else:
list_fields = self
check_system = "name"
# Default attributes for fields
attributes = [
('columns', 6),
('color', None),
('bgcolor', None),
('textalign', None),
('inline', False), # input in line with label
('label', True),
('extra', None),
('extra_div', None),
('foreign_info', {}),
]
labels = [x[0] for x in attributes]
# Get groups if none was given
if gs is None:
gs = self.__groups__()
# Prepare the answer
groups = []
# Prepare focus control
focus_first = None
focus_must = None
# html helper for groups and fields
html_helper = self.html_helper()
# Start processing
for g in gs:
token = {}
token['name'] = g[0]
if token['name'] in html_helper:
if 'pre' in html_helper[token['name']]:
token["html_helper_pre"] = html_helper[token['name']]['pre']
if 'post' in html_helper[token['name']]:
token["html_helper_post"] = html_helper[token['name']]['post']
styles = g[1]
if type(styles) is tuple:
if len(styles) >= 1:
token['columns'] = g[1][0]
if len(styles) >= 2:
token['color'] = g[1][1]
if len(styles) >= 3:
token['bgcolor'] = g[1][2]
if len(styles) >= 4:
token['textalign'] = g[1][3]
if len(styles) >= 5:
token['inline'] = g[1][4]
if len(styles) >= 7:
token['extra'] = g[1][5]
if len(styles) >= 8:
token['extra_div'] = g[1][6]
else:
token['columns'] = g[1]
fs = g[2:]
fields = []
for f in fs:
# Field
atr = {}
# Decide weather this is a Group or not
if type(f) == tuple:
# Recursive
fields += self.get_groups([list(f)], processed, False)
else:
try:
list_type = [str, unicode, ]
except NameError:
list_type = [str, ]
# Check if it is a list
if type(f) == list:
# This is a field with attributes, get the name
field = f[0]
if html_helper and token['name'] in html_helper and 'items' in html_helper[token['name']] and field in html_helper[token['name']]['items']:
if 'pre' in html_helper[token['name']]['items'][field]:
atr["html_helper_pre"] = html_helper[token['name']]['items'][field]['pre']
if 'post' in html_helper[token['name']]['items'][field]:
atr["html_helper_post"] = html_helper[token['name']]['items'][field]['post']
# Process each attribute (if any)
dictionary = False
for idx, element in enumerate(f[1:]):
if type(element) == dict:
dictionary = True
for key in element.keys():
if key in labels:
atr[key] = element[key]
else:
raise IOError("Unknown attribute '{0}' as field '{1}' in list of fields".format(key, field))
else:
if not dictionary:
if element is not None:
atr[attributes[idx][0]] = element
else:
raise IOError("We already processed a dicionary element in this list of fields, you can not add anoother type of elements to it, you must keep going with dictionaries")
elif type(f) in list_type:
field = f
else:
raise IOError("Uknown element type '{0}' inside group '{1}'".format(type(f), token['name']))
# Get the Django Field object
found = None
for infield in list_fields:
if infield.__dict__[check_system] == field:
found = infield
break
if found:
# Get attributes (required and original attributes)
wrequired = found.field.widget.is_required
wattrs = found.field.widget.attrs
# Fill base attributes
atr['name'] = found.html_name
atr['input'] = found
atr['focus'] = False
# Set focus
if focus_must is None:
if focus_first is None:
focus_first = atr
if wrequired:
focus_must = atr
# Autocomplete
if 'autofill' in dir(self.Meta):
autofill = self.Meta.autofill.get(found.html_name, None)
atr['autofill'] = autofill
if autofill:
# Check format of the request
autokind = autofill[0]
if type(autokind) == str:
# Using new format
if autokind == 'select':
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicSelect(wattrs)
elif autokind == 'multiselect':
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = MultiDynamicSelect(wattrs)
elif autokind == 'input':
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicInput(wattrs)
else:
raise IOError("Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'".format(autokind))
# Configure the field
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
found.field.widget.autofill_deepness = autofill[1]
found.field.widget.autofill_url = autofill[2]
found.field.widget.autofill = autofill[3:]
else:
# Get old information [COMPATIBILITY WITH OLD VERSION]
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicSelect(wattrs)
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
found.field.widget.autofill_deepness = autofill[0]
found.field.widget.autofill_url = autofill[1]
found.field.widget.autofill = autofill[2:]
else:
# Set we don't have autofill for this field
atr['autofill'] = None
# Check if we have to replace the widget with a newer one
if isinstance(found.field.widget, Select) and not isinstance(found.field.widget, DynamicSelect):
if not isinstance(found.field.widget, MultiStaticSelect):
found.field.widget = StaticSelect(wattrs)
found.field.widget.choices = found.field.choices
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
# Fill all attributes
for (attribute, default) in attributes:
if attribute not in atr.keys():
atr[attribute] = default
# Fill label
if atr['label'] is True:
atr['label'] = found.label
# Set language
flang = getattr(found.field, "set_language", None)
if flang:
flang(self.__language)
flang = getattr(found.field.widget, "set_language", None)
if flang:
flang(self.__language)
# Attach the element
fields.append(atr)
# Remember we have processed it
processed.append(found.__dict__[check_system])
else:
raise IOError("Unknown field '{0}' specified in group '{1}'".format(f, token['name']))
token['fields'] = fields
groups.append(token)
# Add the rest of attributes we didn't use yet
if initial:
fields = []
for infield in list_fields:
if infield.__dict__[check_system] not in processed:
# Get attributes (required and original attributes)
wattrs = infield.field.widget.attrs
wrequired = infield.field.widget.is_required
# Prepare attr
atr = {}
# Fill base attributes
atr['name'] = infield.html_name
atr['input'] = infield
atr['focus'] = False
# Set focus
if focus_must is None:
if focus_first is None:
focus_first = atr
if wrequired:
focus_must = atr
# Autocomplete
if 'autofill' in dir(self.Meta):
autofill = self.Meta.autofill.get(infield.html_name, None)
atr['autofill'] = autofill
if autofill:
# Check format of the request
autokind = autofill[0]
if type(autokind) == str:
# Get old information
# Using new format
if autokind == 'select':
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicSelect(wattrs)
elif autokind == 'multiselect':
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = MultiDynamicSelect(wattrs)
elif autokind == 'input':
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicInput(wattrs)
else:
raise IOError("Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'".format(autokind))
# Configure the field
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
infield.field.widget.autofill_deepness = autofill[1]
infield.field.widget.autofill_url = autofill[2]
infield.field.widget.autofill = autofill[3:]
else:
# Get old information [COMPATIBILITY WITH OLD VERSION]
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicSelect(wattrs)
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
infield.field.widget.autofill_deepness = autofill[0]
infield.field.widget.autofill_url = autofill[1]
infield.field.widget.autofill = autofill[2:]
else:
# Set we don't have autofill for this field
atr['autofill'] = None
# Check if we have to replace the widget with a newer one
if isinstance(infield.field.widget, Select) and not isinstance(infield.field.widget, DynamicSelect):
if isinstance(infield.field, NullBooleanField):
infield.field.widget = CheckboxInput(wattrs)
elif not isinstance(infield.field.widget, MultiStaticSelect):
infield.field.widget = StaticSelect(wattrs)
if hasattr(infield.field.widget, 'choices') and hasattr(infield.field, 'choices'):
infield.field.widget.choices = infield.field.choices
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
# Fill all attributes
for (attribute, default) in attributes:
if attribute not in atr.keys():
atr[attribute] = default
# Fill label
if atr['label'] is True:
atr['label'] = infield.label
# Set language
flang = getattr(infield.field, "set_language", None)
if flang:
flang(self.__language)
flang = getattr(infield.field.widget, "set_language", None)
if flang:
flang(self.__language)
# Attach the attribute
fields.append(atr)
# Save the new elements
if fields:
groups.append({'name': None, 'columns': 12, 'fields': fields})
# Set focus
if focus_must:
focus_must['focus'] = True
elif focus_first is not None:
focus_first['focus'] = True
# Return the resulting groups
return groups | [
"def",
"get_groups",
"(",
"self",
",",
"gs",
"=",
"None",
",",
"processed",
"=",
"[",
"]",
",",
"initial",
"=",
"True",
")",
":",
"# Check if language is set",
"if",
"not",
"self",
".",
"__language",
":",
"raise",
"IOError",
"(",
"\"ERROR: No language suplied!\"",
")",
"# Initialize the list",
"if",
"initial",
":",
"processed",
"=",
"[",
"]",
"# Where to look for fields",
"if",
"'list_fields'",
"in",
"dir",
"(",
"self",
")",
":",
"list_fields",
"=",
"self",
".",
"list_fields",
"check_system",
"=",
"\"html_name\"",
"else",
":",
"list_fields",
"=",
"self",
"check_system",
"=",
"\"name\"",
"# Default attributes for fields",
"attributes",
"=",
"[",
"(",
"'columns'",
",",
"6",
")",
",",
"(",
"'color'",
",",
"None",
")",
",",
"(",
"'bgcolor'",
",",
"None",
")",
",",
"(",
"'textalign'",
",",
"None",
")",
",",
"(",
"'inline'",
",",
"False",
")",
",",
"# input in line with label",
"(",
"'label'",
",",
"True",
")",
",",
"(",
"'extra'",
",",
"None",
")",
",",
"(",
"'extra_div'",
",",
"None",
")",
",",
"(",
"'foreign_info'",
",",
"{",
"}",
")",
",",
"]",
"labels",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"attributes",
"]",
"# Get groups if none was given",
"if",
"gs",
"is",
"None",
":",
"gs",
"=",
"self",
".",
"__groups__",
"(",
")",
"# Prepare the answer",
"groups",
"=",
"[",
"]",
"# Prepare focus control",
"focus_first",
"=",
"None",
"focus_must",
"=",
"None",
"# html helper for groups and fields",
"html_helper",
"=",
"self",
".",
"html_helper",
"(",
")",
"# Start processing",
"for",
"g",
"in",
"gs",
":",
"token",
"=",
"{",
"}",
"token",
"[",
"'name'",
"]",
"=",
"g",
"[",
"0",
"]",
"if",
"token",
"[",
"'name'",
"]",
"in",
"html_helper",
":",
"if",
"'pre'",
"in",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
":",
"token",
"[",
"\"html_helper_pre\"",
"]",
"=",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"[",
"'pre'",
"]",
"if",
"'post'",
"in",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
":",
"token",
"[",
"\"html_helper_post\"",
"]",
"=",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"[",
"'post'",
"]",
"styles",
"=",
"g",
"[",
"1",
"]",
"if",
"type",
"(",
"styles",
")",
"is",
"tuple",
":",
"if",
"len",
"(",
"styles",
")",
">=",
"1",
":",
"token",
"[",
"'columns'",
"]",
"=",
"g",
"[",
"1",
"]",
"[",
"0",
"]",
"if",
"len",
"(",
"styles",
")",
">=",
"2",
":",
"token",
"[",
"'color'",
"]",
"=",
"g",
"[",
"1",
"]",
"[",
"1",
"]",
"if",
"len",
"(",
"styles",
")",
">=",
"3",
":",
"token",
"[",
"'bgcolor'",
"]",
"=",
"g",
"[",
"1",
"]",
"[",
"2",
"]",
"if",
"len",
"(",
"styles",
")",
">=",
"4",
":",
"token",
"[",
"'textalign'",
"]",
"=",
"g",
"[",
"1",
"]",
"[",
"3",
"]",
"if",
"len",
"(",
"styles",
")",
">=",
"5",
":",
"token",
"[",
"'inline'",
"]",
"=",
"g",
"[",
"1",
"]",
"[",
"4",
"]",
"if",
"len",
"(",
"styles",
")",
">=",
"7",
":",
"token",
"[",
"'extra'",
"]",
"=",
"g",
"[",
"1",
"]",
"[",
"5",
"]",
"if",
"len",
"(",
"styles",
")",
">=",
"8",
":",
"token",
"[",
"'extra_div'",
"]",
"=",
"g",
"[",
"1",
"]",
"[",
"6",
"]",
"else",
":",
"token",
"[",
"'columns'",
"]",
"=",
"g",
"[",
"1",
"]",
"fs",
"=",
"g",
"[",
"2",
":",
"]",
"fields",
"=",
"[",
"]",
"for",
"f",
"in",
"fs",
":",
"# Field",
"atr",
"=",
"{",
"}",
"# Decide weather this is a Group or not",
"if",
"type",
"(",
"f",
")",
"==",
"tuple",
":",
"# Recursive",
"fields",
"+=",
"self",
".",
"get_groups",
"(",
"[",
"list",
"(",
"f",
")",
"]",
",",
"processed",
",",
"False",
")",
"else",
":",
"try",
":",
"list_type",
"=",
"[",
"str",
",",
"unicode",
",",
"]",
"except",
"NameError",
":",
"list_type",
"=",
"[",
"str",
",",
"]",
"# Check if it is a list",
"if",
"type",
"(",
"f",
")",
"==",
"list",
":",
"# This is a field with attributes, get the name",
"field",
"=",
"f",
"[",
"0",
"]",
"if",
"html_helper",
"and",
"token",
"[",
"'name'",
"]",
"in",
"html_helper",
"and",
"'items'",
"in",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"and",
"field",
"in",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"[",
"'items'",
"]",
":",
"if",
"'pre'",
"in",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"[",
"'items'",
"]",
"[",
"field",
"]",
":",
"atr",
"[",
"\"html_helper_pre\"",
"]",
"=",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"[",
"'items'",
"]",
"[",
"field",
"]",
"[",
"'pre'",
"]",
"if",
"'post'",
"in",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"[",
"'items'",
"]",
"[",
"field",
"]",
":",
"atr",
"[",
"\"html_helper_post\"",
"]",
"=",
"html_helper",
"[",
"token",
"[",
"'name'",
"]",
"]",
"[",
"'items'",
"]",
"[",
"field",
"]",
"[",
"'post'",
"]",
"# Process each attribute (if any)",
"dictionary",
"=",
"False",
"for",
"idx",
",",
"element",
"in",
"enumerate",
"(",
"f",
"[",
"1",
":",
"]",
")",
":",
"if",
"type",
"(",
"element",
")",
"==",
"dict",
":",
"dictionary",
"=",
"True",
"for",
"key",
"in",
"element",
".",
"keys",
"(",
")",
":",
"if",
"key",
"in",
"labels",
":",
"atr",
"[",
"key",
"]",
"=",
"element",
"[",
"key",
"]",
"else",
":",
"raise",
"IOError",
"(",
"\"Unknown attribute '{0}' as field '{1}' in list of fields\"",
".",
"format",
"(",
"key",
",",
"field",
")",
")",
"else",
":",
"if",
"not",
"dictionary",
":",
"if",
"element",
"is",
"not",
"None",
":",
"atr",
"[",
"attributes",
"[",
"idx",
"]",
"[",
"0",
"]",
"]",
"=",
"element",
"else",
":",
"raise",
"IOError",
"(",
"\"We already processed a dicionary element in this list of fields, you can not add anoother type of elements to it, you must keep going with dictionaries\"",
")",
"elif",
"type",
"(",
"f",
")",
"in",
"list_type",
":",
"field",
"=",
"f",
"else",
":",
"raise",
"IOError",
"(",
"\"Uknown element type '{0}' inside group '{1}'\"",
".",
"format",
"(",
"type",
"(",
"f",
")",
",",
"token",
"[",
"'name'",
"]",
")",
")",
"# Get the Django Field object",
"found",
"=",
"None",
"for",
"infield",
"in",
"list_fields",
":",
"if",
"infield",
".",
"__dict__",
"[",
"check_system",
"]",
"==",
"field",
":",
"found",
"=",
"infield",
"break",
"if",
"found",
":",
"# Get attributes (required and original attributes)",
"wrequired",
"=",
"found",
".",
"field",
".",
"widget",
".",
"is_required",
"wattrs",
"=",
"found",
".",
"field",
".",
"widget",
".",
"attrs",
"# Fill base attributes",
"atr",
"[",
"'name'",
"]",
"=",
"found",
".",
"html_name",
"atr",
"[",
"'input'",
"]",
"=",
"found",
"atr",
"[",
"'focus'",
"]",
"=",
"False",
"# Set focus",
"if",
"focus_must",
"is",
"None",
":",
"if",
"focus_first",
"is",
"None",
":",
"focus_first",
"=",
"atr",
"if",
"wrequired",
":",
"focus_must",
"=",
"atr",
"# Autocomplete",
"if",
"'autofill'",
"in",
"dir",
"(",
"self",
".",
"Meta",
")",
":",
"autofill",
"=",
"self",
".",
"Meta",
".",
"autofill",
".",
"get",
"(",
"found",
".",
"html_name",
",",
"None",
")",
"atr",
"[",
"'autofill'",
"]",
"=",
"autofill",
"if",
"autofill",
":",
"# Check format of the request",
"autokind",
"=",
"autofill",
"[",
"0",
"]",
"if",
"type",
"(",
"autokind",
")",
"==",
"str",
":",
"# Using new format",
"if",
"autokind",
"==",
"'select'",
":",
"# If autofill is True for this field set the DynamicSelect widget",
"found",
".",
"field",
".",
"widget",
"=",
"DynamicSelect",
"(",
"wattrs",
")",
"elif",
"autokind",
"==",
"'multiselect'",
":",
"# If autofill is True for this field set the DynamicSelect widget",
"found",
".",
"field",
".",
"widget",
"=",
"MultiDynamicSelect",
"(",
"wattrs",
")",
"elif",
"autokind",
"==",
"'input'",
":",
"# If autofill is True for this field set the DynamicSelect widget",
"found",
".",
"field",
".",
"widget",
"=",
"DynamicInput",
"(",
"wattrs",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'\"",
".",
"format",
"(",
"autokind",
")",
")",
"# Configure the field",
"found",
".",
"field",
".",
"widget",
".",
"is_required",
"=",
"wrequired",
"found",
".",
"field",
".",
"widget",
".",
"form_name",
"=",
"self",
".",
"form_name",
"found",
".",
"field",
".",
"widget",
".",
"field_name",
"=",
"infield",
".",
"html_name",
"found",
".",
"field",
".",
"widget",
".",
"autofill_deepness",
"=",
"autofill",
"[",
"1",
"]",
"found",
".",
"field",
".",
"widget",
".",
"autofill_url",
"=",
"autofill",
"[",
"2",
"]",
"found",
".",
"field",
".",
"widget",
".",
"autofill",
"=",
"autofill",
"[",
"3",
":",
"]",
"else",
":",
"# Get old information [COMPATIBILITY WITH OLD VERSION]",
"# If autofill is True for this field set the DynamicSelect widget",
"found",
".",
"field",
".",
"widget",
"=",
"DynamicSelect",
"(",
"wattrs",
")",
"found",
".",
"field",
".",
"widget",
".",
"is_required",
"=",
"wrequired",
"found",
".",
"field",
".",
"widget",
".",
"form_name",
"=",
"self",
".",
"form_name",
"found",
".",
"field",
".",
"widget",
".",
"field_name",
"=",
"infield",
".",
"html_name",
"found",
".",
"field",
".",
"widget",
".",
"autofill_deepness",
"=",
"autofill",
"[",
"0",
"]",
"found",
".",
"field",
".",
"widget",
".",
"autofill_url",
"=",
"autofill",
"[",
"1",
"]",
"found",
".",
"field",
".",
"widget",
".",
"autofill",
"=",
"autofill",
"[",
"2",
":",
"]",
"else",
":",
"# Set we don't have autofill for this field",
"atr",
"[",
"'autofill'",
"]",
"=",
"None",
"# Check if we have to replace the widget with a newer one",
"if",
"isinstance",
"(",
"found",
".",
"field",
".",
"widget",
",",
"Select",
")",
"and",
"not",
"isinstance",
"(",
"found",
".",
"field",
".",
"widget",
",",
"DynamicSelect",
")",
":",
"if",
"not",
"isinstance",
"(",
"found",
".",
"field",
".",
"widget",
",",
"MultiStaticSelect",
")",
":",
"found",
".",
"field",
".",
"widget",
"=",
"StaticSelect",
"(",
"wattrs",
")",
"found",
".",
"field",
".",
"widget",
".",
"choices",
"=",
"found",
".",
"field",
".",
"choices",
"found",
".",
"field",
".",
"widget",
".",
"is_required",
"=",
"wrequired",
"found",
".",
"field",
".",
"widget",
".",
"form_name",
"=",
"self",
".",
"form_name",
"found",
".",
"field",
".",
"widget",
".",
"field_name",
"=",
"infield",
".",
"html_name",
"# Fill all attributes",
"for",
"(",
"attribute",
",",
"default",
")",
"in",
"attributes",
":",
"if",
"attribute",
"not",
"in",
"atr",
".",
"keys",
"(",
")",
":",
"atr",
"[",
"attribute",
"]",
"=",
"default",
"# Fill label",
"if",
"atr",
"[",
"'label'",
"]",
"is",
"True",
":",
"atr",
"[",
"'label'",
"]",
"=",
"found",
".",
"label",
"# Set language",
"flang",
"=",
"getattr",
"(",
"found",
".",
"field",
",",
"\"set_language\"",
",",
"None",
")",
"if",
"flang",
":",
"flang",
"(",
"self",
".",
"__language",
")",
"flang",
"=",
"getattr",
"(",
"found",
".",
"field",
".",
"widget",
",",
"\"set_language\"",
",",
"None",
")",
"if",
"flang",
":",
"flang",
"(",
"self",
".",
"__language",
")",
"# Attach the element",
"fields",
".",
"append",
"(",
"atr",
")",
"# Remember we have processed it",
"processed",
".",
"append",
"(",
"found",
".",
"__dict__",
"[",
"check_system",
"]",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Unknown field '{0}' specified in group '{1}'\"",
".",
"format",
"(",
"f",
",",
"token",
"[",
"'name'",
"]",
")",
")",
"token",
"[",
"'fields'",
"]",
"=",
"fields",
"groups",
".",
"append",
"(",
"token",
")",
"# Add the rest of attributes we didn't use yet",
"if",
"initial",
":",
"fields",
"=",
"[",
"]",
"for",
"infield",
"in",
"list_fields",
":",
"if",
"infield",
".",
"__dict__",
"[",
"check_system",
"]",
"not",
"in",
"processed",
":",
"# Get attributes (required and original attributes)",
"wattrs",
"=",
"infield",
".",
"field",
".",
"widget",
".",
"attrs",
"wrequired",
"=",
"infield",
".",
"field",
".",
"widget",
".",
"is_required",
"# Prepare attr",
"atr",
"=",
"{",
"}",
"# Fill base attributes",
"atr",
"[",
"'name'",
"]",
"=",
"infield",
".",
"html_name",
"atr",
"[",
"'input'",
"]",
"=",
"infield",
"atr",
"[",
"'focus'",
"]",
"=",
"False",
"# Set focus",
"if",
"focus_must",
"is",
"None",
":",
"if",
"focus_first",
"is",
"None",
":",
"focus_first",
"=",
"atr",
"if",
"wrequired",
":",
"focus_must",
"=",
"atr",
"# Autocomplete",
"if",
"'autofill'",
"in",
"dir",
"(",
"self",
".",
"Meta",
")",
":",
"autofill",
"=",
"self",
".",
"Meta",
".",
"autofill",
".",
"get",
"(",
"infield",
".",
"html_name",
",",
"None",
")",
"atr",
"[",
"'autofill'",
"]",
"=",
"autofill",
"if",
"autofill",
":",
"# Check format of the request",
"autokind",
"=",
"autofill",
"[",
"0",
"]",
"if",
"type",
"(",
"autokind",
")",
"==",
"str",
":",
"# Get old information",
"# Using new format",
"if",
"autokind",
"==",
"'select'",
":",
"# If autofill is True for this field set the DynamicSelect widget",
"infield",
".",
"field",
".",
"widget",
"=",
"DynamicSelect",
"(",
"wattrs",
")",
"elif",
"autokind",
"==",
"'multiselect'",
":",
"# If autofill is True for this field set the DynamicSelect widget",
"infield",
".",
"field",
".",
"widget",
"=",
"MultiDynamicSelect",
"(",
"wattrs",
")",
"elif",
"autokind",
"==",
"'input'",
":",
"# If autofill is True for this field set the DynamicSelect widget",
"infield",
".",
"field",
".",
"widget",
"=",
"DynamicInput",
"(",
"wattrs",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'\"",
".",
"format",
"(",
"autokind",
")",
")",
"# Configure the field",
"infield",
".",
"field",
".",
"widget",
".",
"is_required",
"=",
"wrequired",
"infield",
".",
"field",
".",
"widget",
".",
"form_name",
"=",
"self",
".",
"form_name",
"infield",
".",
"field",
".",
"widget",
".",
"field_name",
"=",
"infield",
".",
"html_name",
"infield",
".",
"field",
".",
"widget",
".",
"autofill_deepness",
"=",
"autofill",
"[",
"1",
"]",
"infield",
".",
"field",
".",
"widget",
".",
"autofill_url",
"=",
"autofill",
"[",
"2",
"]",
"infield",
".",
"field",
".",
"widget",
".",
"autofill",
"=",
"autofill",
"[",
"3",
":",
"]",
"else",
":",
"# Get old information [COMPATIBILITY WITH OLD VERSION]",
"# If autofill is True for this field set the DynamicSelect widget",
"infield",
".",
"field",
".",
"widget",
"=",
"DynamicSelect",
"(",
"wattrs",
")",
"infield",
".",
"field",
".",
"widget",
".",
"is_required",
"=",
"wrequired",
"infield",
".",
"field",
".",
"widget",
".",
"form_name",
"=",
"self",
".",
"form_name",
"infield",
".",
"field",
".",
"widget",
".",
"field_name",
"=",
"infield",
".",
"html_name",
"infield",
".",
"field",
".",
"widget",
".",
"autofill_deepness",
"=",
"autofill",
"[",
"0",
"]",
"infield",
".",
"field",
".",
"widget",
".",
"autofill_url",
"=",
"autofill",
"[",
"1",
"]",
"infield",
".",
"field",
".",
"widget",
".",
"autofill",
"=",
"autofill",
"[",
"2",
":",
"]",
"else",
":",
"# Set we don't have autofill for this field",
"atr",
"[",
"'autofill'",
"]",
"=",
"None",
"# Check if we have to replace the widget with a newer one",
"if",
"isinstance",
"(",
"infield",
".",
"field",
".",
"widget",
",",
"Select",
")",
"and",
"not",
"isinstance",
"(",
"infield",
".",
"field",
".",
"widget",
",",
"DynamicSelect",
")",
":",
"if",
"isinstance",
"(",
"infield",
".",
"field",
",",
"NullBooleanField",
")",
":",
"infield",
".",
"field",
".",
"widget",
"=",
"CheckboxInput",
"(",
"wattrs",
")",
"elif",
"not",
"isinstance",
"(",
"infield",
".",
"field",
".",
"widget",
",",
"MultiStaticSelect",
")",
":",
"infield",
".",
"field",
".",
"widget",
"=",
"StaticSelect",
"(",
"wattrs",
")",
"if",
"hasattr",
"(",
"infield",
".",
"field",
".",
"widget",
",",
"'choices'",
")",
"and",
"hasattr",
"(",
"infield",
".",
"field",
",",
"'choices'",
")",
":",
"infield",
".",
"field",
".",
"widget",
".",
"choices",
"=",
"infield",
".",
"field",
".",
"choices",
"infield",
".",
"field",
".",
"widget",
".",
"is_required",
"=",
"wrequired",
"infield",
".",
"field",
".",
"widget",
".",
"form_name",
"=",
"self",
".",
"form_name",
"infield",
".",
"field",
".",
"widget",
".",
"field_name",
"=",
"infield",
".",
"html_name",
"# Fill all attributes",
"for",
"(",
"attribute",
",",
"default",
")",
"in",
"attributes",
":",
"if",
"attribute",
"not",
"in",
"atr",
".",
"keys",
"(",
")",
":",
"atr",
"[",
"attribute",
"]",
"=",
"default",
"# Fill label",
"if",
"atr",
"[",
"'label'",
"]",
"is",
"True",
":",
"atr",
"[",
"'label'",
"]",
"=",
"infield",
".",
"label",
"# Set language",
"flang",
"=",
"getattr",
"(",
"infield",
".",
"field",
",",
"\"set_language\"",
",",
"None",
")",
"if",
"flang",
":",
"flang",
"(",
"self",
".",
"__language",
")",
"flang",
"=",
"getattr",
"(",
"infield",
".",
"field",
".",
"widget",
",",
"\"set_language\"",
",",
"None",
")",
"if",
"flang",
":",
"flang",
"(",
"self",
".",
"__language",
")",
"# Attach the attribute",
"fields",
".",
"append",
"(",
"atr",
")",
"# Save the new elements",
"if",
"fields",
":",
"groups",
".",
"append",
"(",
"{",
"'name'",
":",
"None",
",",
"'columns'",
":",
"12",
",",
"'fields'",
":",
"fields",
"}",
")",
"# Set focus",
"if",
"focus_must",
":",
"focus_must",
"[",
"'focus'",
"]",
"=",
"True",
"elif",
"focus_first",
"is",
"not",
"None",
":",
"focus_first",
"[",
"'focus'",
"]",
"=",
"True",
"# Return the resulting groups",
"return",
"groups"
] | <--------------------------------------- 12 columns ------------------------------------>
<--- 6 columns ---> <--- 6 columns --->
------------------------------------------ ------------------------------------------
| Info | | Personal |
|==========================================| |==========================================|
| ----------------- ------------------ | | |
| | Passport | | Name | | | Phone Zipcode |
| |=================| | [.....] [.....] | | | [...........................] [.......] |
| | CID Country | | <- 6 -> <- 6 -> | | | <--- 8 columns ---> <-4 col-> |
| | [.....] [.....] | | | | | |
| | <- 6 -> <- 6 -> | ----------------- | | Address |
| ----------------- | | [.....................................] |
------------------------------------------ | <--- 12 columns ---> |
| [..] number |
| <--- 12 columns ---> |
| |
------------------------------------------
group = [
(_('Info'),(6,'#8a6d3b','#fcf8e3','center'),
(_('Identification'),6,
["cid",6],
["country",6],
),
(None,6,
["name",None,6],
["surname",None,6,False],
),
),
(_('Personal'),6,
["phone",None,8],
["zipcode",None,4],
["address",None,12],
["number",None,12, True],
),
]
Group: it is defined as tuple with 3 or more elements:
Grammar: (<Name>, <Attributes>, <Element1>, <Element2>, ..., <ElementN>)
If <Name> is None: no name will be given to the group and no panel decoration will be shown
If <Size in columns> is None: default of 6 will be used
<Attributes>:
it can be an integer that represent the size in columns
it can be a tuple with several attributes where each element represents:
(<Size in columns>,'#<Font color>','#<Background color>','<Alignment>')
<Element>:
it can be a Group
it can be a Field
Examples:
('Info', 6, ["name",6], ["surname",6]) -> Info panel using 6 columns with 2 boxes 6 columns for each with name and surname inputs
('Info', (6,None,'#fcf8e3','center'), ["name",6], ["surname",6]) -> Info panel using 6 columns with a yellow brackground in centered title, 2 boxes, 6 columns for each with name and surname inputs
('Info', 12, ('Name', 6, ["name",12]), ('Surname',6, ["surname",12])) -> Info panel using 12 columns with 2 panels inside
of 6 columns each named "Name" and "Surname" and inside each of them an input "name" and "surname" where it belongs.
Field: must be a list with at least 1 element in it:
Grammar: [<Name of field>, <Size in columns>, <Label>]
<Name of field>:
This must be filled always
It is the input's name inside the form
Must exists as a form element or as a grouped form element
<Size in columns>:
Size of the input in columns
If it is not defined or if it is defined as None: default of 6 will be used
<Label>:
It it is defined as False: the label for this field will not be shown
If it is not defined or if it is defined as None: default of True will be used (default input's label will be shown)
If it is a string: this string will be shown as a label
Examples:
['age'] Input 'age' will be shown with 6 columns and its default label
['age',8] Input 'age' will be shown with 8 columns and its default label
['age', None, False] Input 'age' will be shown with 6 columns and NO LABEL
['age',8,False] Input 'age' will be shown with 8 columns and NO LABEL
['age',8,_("Age in days")] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language
['age',8,_("Age in days"), True] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language, and input inline with label
['age',6, None, None, None, None, None, ["ng-click=functionjs('param1')", "ng-change=functionjs2()"]] Input 'age' with extras functions
['age',None,None,None,None, 'filter'] Input 'age' with extras filter ONLY DETAILS
['age',6, {'color': 'red'} Input 'age' will be shown with red title | [
"<",
"---------------------------------------",
"12",
"columns",
"------------------------------------",
">",
"<",
"---",
"6",
"columns",
"---",
">",
"<",
"---",
"6",
"columns",
"---",
">",
"------------------------------------------",
"------------------------------------------",
"|",
"Info",
"|",
"|",
"Personal",
"|",
"|",
"==========================================",
"|",
"|",
"==========================================",
"|",
"|",
"-----------------",
"------------------",
"|",
"|",
"|",
"|",
"|",
"Passport",
"|",
"|",
"Name",
"|",
"|",
"|",
"Phone",
"Zipcode",
"|",
"|",
"|",
"=================",
"|",
"|",
"[",
".....",
"]",
"[",
".....",
"]",
"|",
"|",
"|",
"[",
"...........................",
"]",
"[",
".......",
"]",
"|",
"|",
"|",
"CID",
"Country",
"|",
"|",
"<",
"-",
"6",
"-",
">",
"<",
"-",
"6",
"-",
">",
"|",
"|",
"|",
"<",
"---",
"8",
"columns",
"---",
">",
"<",
"-",
"4",
"col",
"-",
">",
"|",
"|",
"|",
"[",
".....",
"]",
"[",
".....",
"]",
"|",
"|",
"|",
"|",
"|",
"|",
"|",
"|",
"<",
"-",
"6",
"-",
">",
"<",
"-",
"6",
"-",
">",
"|",
"-----------------",
"|",
"|",
"Address",
"|",
"|",
"-----------------",
"|",
"|",
"[",
".....................................",
"]",
"|",
"------------------------------------------",
"|",
"<",
"---",
"12",
"columns",
"---",
">",
"|",
"|",
"[",
"..",
"]",
"number",
"|",
"|",
"<",
"---",
"12",
"columns",
"---",
">",
"|",
"|",
"|",
"------------------------------------------",
"group",
"=",
"[",
"(",
"_",
"(",
"Info",
")",
"(",
"6",
"#8a6d3b",
"#fcf8e3",
"center",
")",
"(",
"_",
"(",
"Identification",
")",
"6",
"[",
"cid",
"6",
"]",
"[",
"country",
"6",
"]",
")",
"(",
"None",
"6",
"[",
"name",
"None",
"6",
"]",
"[",
"surname",
"None",
"6",
"False",
"]",
")",
")",
"(",
"_",
"(",
"Personal",
")",
"6",
"[",
"phone",
"None",
"8",
"]",
"[",
"zipcode",
"None",
"4",
"]",
"[",
"address",
"None",
"12",
"]",
"[",
"number",
"None",
"12",
"True",
"]",
")",
"]"
] | python | train |
sphinx-gallery/sphinx-gallery | sphinx_gallery/py_source_parser.py | https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/py_source_parser.py#L64-L121 | def get_docstring_and_rest(filename):
"""Separate ``filename`` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Returns
-------
docstring : str
docstring of ``filename``
rest : str
``filename`` content without the docstring
"""
node, content = parse_source_file(filename)
if node is None:
return SYNTAX_ERROR_DOCSTRING, content, 1
if not isinstance(node, ast.Module):
raise TypeError("This function only supports modules. "
"You provided {0}".format(node.__class__.__name__))
if not (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
raise ValueError(('Could not find docstring in file "{0}". '
'A docstring is required by sphinx-gallery '
'unless the file is ignored by "ignore_pattern"')
.format(filename))
if LooseVersion(sys.version) >= LooseVersion('3.7'):
docstring = ast.get_docstring(node)
assert docstring is not None # should be guaranteed above
# This is just for backward compat
if len(node.body[0].value.s) and node.body[0].value.s[0] == '\n':
# just for strict backward compat here
docstring = '\n' + docstring
ts = tokenize.tokenize(BytesIO(content.encode()).readline)
# find the first string according to the tokenizer and get its end row
for tk in ts:
if tk.exact_type == 3:
lineno, _ = tk.end
break
else:
lineno = 0
else:
# this block can be removed when python 3.6 support is dropped
docstring_node = node.body[0]
docstring = docstring_node.value.s
# python2.7: Code was read in bytes needs decoding to utf-8
# unless future unicode_literals is imported in source which
# make ast output unicode strings
if hasattr(docstring, 'decode') and not isinstance(docstring, unicode):
docstring = docstring.decode('utf-8')
lineno = docstring_node.lineno # The last line of the string.
# This get the content of the file after the docstring last line
# Note: 'maxsplit' argument is not a keyword argument in python2
rest = '\n'.join(content.split('\n')[lineno:])
lineno += 1
return docstring, rest, lineno | [
"def",
"get_docstring_and_rest",
"(",
"filename",
")",
":",
"node",
",",
"content",
"=",
"parse_source_file",
"(",
"filename",
")",
"if",
"node",
"is",
"None",
":",
"return",
"SYNTAX_ERROR_DOCSTRING",
",",
"content",
",",
"1",
"if",
"not",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Module",
")",
":",
"raise",
"TypeError",
"(",
"\"This function only supports modules. \"",
"\"You provided {0}\"",
".",
"format",
"(",
"node",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"not",
"(",
"node",
".",
"body",
"and",
"isinstance",
"(",
"node",
".",
"body",
"[",
"0",
"]",
",",
"ast",
".",
"Expr",
")",
"and",
"isinstance",
"(",
"node",
".",
"body",
"[",
"0",
"]",
".",
"value",
",",
"ast",
".",
"Str",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Could not find docstring in file \"{0}\". '",
"'A docstring is required by sphinx-gallery '",
"'unless the file is ignored by \"ignore_pattern\"'",
")",
".",
"format",
"(",
"filename",
")",
")",
"if",
"LooseVersion",
"(",
"sys",
".",
"version",
")",
">=",
"LooseVersion",
"(",
"'3.7'",
")",
":",
"docstring",
"=",
"ast",
".",
"get_docstring",
"(",
"node",
")",
"assert",
"docstring",
"is",
"not",
"None",
"# should be guaranteed above",
"# This is just for backward compat",
"if",
"len",
"(",
"node",
".",
"body",
"[",
"0",
"]",
".",
"value",
".",
"s",
")",
"and",
"node",
".",
"body",
"[",
"0",
"]",
".",
"value",
".",
"s",
"[",
"0",
"]",
"==",
"'\\n'",
":",
"# just for strict backward compat here",
"docstring",
"=",
"'\\n'",
"+",
"docstring",
"ts",
"=",
"tokenize",
".",
"tokenize",
"(",
"BytesIO",
"(",
"content",
".",
"encode",
"(",
")",
")",
".",
"readline",
")",
"# find the first string according to the tokenizer and get its end row",
"for",
"tk",
"in",
"ts",
":",
"if",
"tk",
".",
"exact_type",
"==",
"3",
":",
"lineno",
",",
"_",
"=",
"tk",
".",
"end",
"break",
"else",
":",
"lineno",
"=",
"0",
"else",
":",
"# this block can be removed when python 3.6 support is dropped",
"docstring_node",
"=",
"node",
".",
"body",
"[",
"0",
"]",
"docstring",
"=",
"docstring_node",
".",
"value",
".",
"s",
"# python2.7: Code was read in bytes needs decoding to utf-8",
"# unless future unicode_literals is imported in source which",
"# make ast output unicode strings",
"if",
"hasattr",
"(",
"docstring",
",",
"'decode'",
")",
"and",
"not",
"isinstance",
"(",
"docstring",
",",
"unicode",
")",
":",
"docstring",
"=",
"docstring",
".",
"decode",
"(",
"'utf-8'",
")",
"lineno",
"=",
"docstring_node",
".",
"lineno",
"# The last line of the string.",
"# This get the content of the file after the docstring last line",
"# Note: 'maxsplit' argument is not a keyword argument in python2",
"rest",
"=",
"'\\n'",
".",
"join",
"(",
"content",
".",
"split",
"(",
"'\\n'",
")",
"[",
"lineno",
":",
"]",
")",
"lineno",
"+=",
"1",
"return",
"docstring",
",",
"rest",
",",
"lineno"
] | Separate ``filename`` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Returns
-------
docstring : str
docstring of ``filename``
rest : str
``filename`` content without the docstring | [
"Separate",
"filename",
"content",
"between",
"docstring",
"and",
"the",
"rest"
] | python | train |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1566-L1588 | def songs_add(self, songs):
"""Add store songs to your library.
Parameters:
songs (list): A list of store song dicts.
Returns:
list: Songs' library IDs.
"""
mutations = [mc_calls.TrackBatch.add(song) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
return success_ids | [
"def",
"songs_add",
"(",
"self",
",",
"songs",
")",
":",
"mutations",
"=",
"[",
"mc_calls",
".",
"TrackBatch",
".",
"add",
"(",
"song",
")",
"for",
"song",
"in",
"songs",
"]",
"response",
"=",
"self",
".",
"_call",
"(",
"mc_calls",
".",
"TrackBatch",
",",
"mutations",
")",
"success_ids",
"=",
"[",
"res",
"[",
"'id'",
"]",
"for",
"res",
"in",
"response",
".",
"body",
"[",
"'mutate_response'",
"]",
"if",
"res",
"[",
"'response_code'",
"]",
"==",
"'OK'",
"]",
"return",
"success_ids"
] | Add store songs to your library.
Parameters:
songs (list): A list of store song dicts.
Returns:
list: Songs' library IDs. | [
"Add",
"store",
"songs",
"to",
"your",
"library",
"."
] | python | train |
ncclient/ncclient | ncclient/devices/nexus.py | https://github.com/ncclient/ncclient/blob/2b75f2c6a06bd2a5d1be67b01bb65c5ffd2e2d7a/ncclient/devices/nexus.py#L79-L95 | def get_ssh_subsystem_names(self):
"""
Return a list of possible SSH subsystem names.
Different NXOS versions use different SSH subsystem names for netconf.
Therefore, we return a list so that several can be tried, if necessary.
The Nexus device handler also accepts
"""
preferred_ssh_subsystem = self.device_params.get("ssh_subsystem_name")
name_list = [ "netconf", "xmlagent" ]
if preferred_ssh_subsystem:
return [ preferred_ssh_subsystem ] + \
[ n for n in name_list if n != preferred_ssh_subsystem ]
else:
return name_list | [
"def",
"get_ssh_subsystem_names",
"(",
"self",
")",
":",
"preferred_ssh_subsystem",
"=",
"self",
".",
"device_params",
".",
"get",
"(",
"\"ssh_subsystem_name\"",
")",
"name_list",
"=",
"[",
"\"netconf\"",
",",
"\"xmlagent\"",
"]",
"if",
"preferred_ssh_subsystem",
":",
"return",
"[",
"preferred_ssh_subsystem",
"]",
"+",
"[",
"n",
"for",
"n",
"in",
"name_list",
"if",
"n",
"!=",
"preferred_ssh_subsystem",
"]",
"else",
":",
"return",
"name_list"
] | Return a list of possible SSH subsystem names.
Different NXOS versions use different SSH subsystem names for netconf.
Therefore, we return a list so that several can be tried, if necessary.
The Nexus device handler also accepts | [
"Return",
"a",
"list",
"of",
"possible",
"SSH",
"subsystem",
"names",
"."
] | python | train |
alex-kostirin/pyatomac | atomac/ldtpd/__init__.py | https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/__init__.py#L65-L94 | def main(port=4118, parentpid=None):
"""Main entry point. Parse command line options and start up a server."""
if "LDTP_DEBUG" in os.environ:
_ldtp_debug = True
else:
_ldtp_debug = False
_ldtp_debug_file = os.environ.get('LDTP_DEBUG_FILE', None)
if _ldtp_debug:
print("Parent PID: {}".format(int(parentpid)))
if _ldtp_debug_file:
with open(unicode(_ldtp_debug_file), "a") as fp:
fp.write("Parent PID: {}".format(int(parentpid)))
server = LDTPServer(('', port), allow_none=True, logRequests=_ldtp_debug,
requestHandler=RequestHandler)
server.register_introspection_functions()
server.register_multicall_functions()
ldtp_inst = core.Core()
server.register_instance(ldtp_inst)
if parentpid:
thread.start_new_thread(notifyclient, (parentpid,))
try:
server.serve_forever()
except KeyboardInterrupt:
pass
except:
if _ldtp_debug:
print(traceback.format_exc())
if _ldtp_debug_file:
with open(_ldtp_debug_file, "a") as fp:
fp.write(traceback.format_exc()) | [
"def",
"main",
"(",
"port",
"=",
"4118",
",",
"parentpid",
"=",
"None",
")",
":",
"if",
"\"LDTP_DEBUG\"",
"in",
"os",
".",
"environ",
":",
"_ldtp_debug",
"=",
"True",
"else",
":",
"_ldtp_debug",
"=",
"False",
"_ldtp_debug_file",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'LDTP_DEBUG_FILE'",
",",
"None",
")",
"if",
"_ldtp_debug",
":",
"print",
"(",
"\"Parent PID: {}\"",
".",
"format",
"(",
"int",
"(",
"parentpid",
")",
")",
")",
"if",
"_ldtp_debug_file",
":",
"with",
"open",
"(",
"unicode",
"(",
"_ldtp_debug_file",
")",
",",
"\"a\"",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"\"Parent PID: {}\"",
".",
"format",
"(",
"int",
"(",
"parentpid",
")",
")",
")",
"server",
"=",
"LDTPServer",
"(",
"(",
"''",
",",
"port",
")",
",",
"allow_none",
"=",
"True",
",",
"logRequests",
"=",
"_ldtp_debug",
",",
"requestHandler",
"=",
"RequestHandler",
")",
"server",
".",
"register_introspection_functions",
"(",
")",
"server",
".",
"register_multicall_functions",
"(",
")",
"ldtp_inst",
"=",
"core",
".",
"Core",
"(",
")",
"server",
".",
"register_instance",
"(",
"ldtp_inst",
")",
"if",
"parentpid",
":",
"thread",
".",
"start_new_thread",
"(",
"notifyclient",
",",
"(",
"parentpid",
",",
")",
")",
"try",
":",
"server",
".",
"serve_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"except",
":",
"if",
"_ldtp_debug",
":",
"print",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"if",
"_ldtp_debug_file",
":",
"with",
"open",
"(",
"_ldtp_debug_file",
",",
"\"a\"",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")"
] | Main entry point. Parse command line options and start up a server. | [
"Main",
"entry",
"point",
".",
"Parse",
"command",
"line",
"options",
"and",
"start",
"up",
"a",
"server",
"."
] | python | valid |
Clinical-Genomics/scout | scout/adapter/mongo/panel.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/panel.py#L155-L173 | def add_gene_panel(self, panel_obj):
"""Add a gene panel to the database
Args:
panel_obj(dict)
"""
panel_name = panel_obj['panel_name']
panel_version = panel_obj['version']
display_name = panel_obj.get('display_name', panel_name)
if self.gene_panel(panel_name, panel_version):
raise IntegrityError("Panel {0} with version {1} already"
" exist in database".format(panel_name, panel_version))
LOG.info("loading panel {0}, version {1} to database".format(
display_name, panel_version
))
result = self.panel_collection.insert_one(panel_obj)
LOG.debug("Panel saved")
return result.inserted_id | [
"def",
"add_gene_panel",
"(",
"self",
",",
"panel_obj",
")",
":",
"panel_name",
"=",
"panel_obj",
"[",
"'panel_name'",
"]",
"panel_version",
"=",
"panel_obj",
"[",
"'version'",
"]",
"display_name",
"=",
"panel_obj",
".",
"get",
"(",
"'display_name'",
",",
"panel_name",
")",
"if",
"self",
".",
"gene_panel",
"(",
"panel_name",
",",
"panel_version",
")",
":",
"raise",
"IntegrityError",
"(",
"\"Panel {0} with version {1} already\"",
"\" exist in database\"",
".",
"format",
"(",
"panel_name",
",",
"panel_version",
")",
")",
"LOG",
".",
"info",
"(",
"\"loading panel {0}, version {1} to database\"",
".",
"format",
"(",
"display_name",
",",
"panel_version",
")",
")",
"result",
"=",
"self",
".",
"panel_collection",
".",
"insert_one",
"(",
"panel_obj",
")",
"LOG",
".",
"debug",
"(",
"\"Panel saved\"",
")",
"return",
"result",
".",
"inserted_id"
] | Add a gene panel to the database
Args:
panel_obj(dict) | [
"Add",
"a",
"gene",
"panel",
"to",
"the",
"database"
] | python | test |
jtwhite79/pyemu | pyemu/prototypes/da.py | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/prototypes/da.py#L445-L459 | def model_temporal_evolotion(self, time_index, cycle_files):
"""
- The function prepares the model for this time cycle
- Any time-dependant forcing should be handled here. This includes temporal stresses, boundary conditions, and
initial conditions.
- Two options are available (1) template files to update input parameters
(2) use results from previous cycle to update input files using instruction
files.
(3) the user must prepare a pair of files : .tpl(or .ins) and corresponding file to change
:param time_index:
:return:
"""
for file in cycle_files:
# generate log here about files being updated
self.update_inputfile(file[0], file[1]) | [
"def",
"model_temporal_evolotion",
"(",
"self",
",",
"time_index",
",",
"cycle_files",
")",
":",
"for",
"file",
"in",
"cycle_files",
":",
"# generate log here about files being updated",
"self",
".",
"update_inputfile",
"(",
"file",
"[",
"0",
"]",
",",
"file",
"[",
"1",
"]",
")"
] | - The function prepares the model for this time cycle
- Any time-dependant forcing should be handled here. This includes temporal stresses, boundary conditions, and
initial conditions.
- Two options are available (1) template files to update input parameters
(2) use results from previous cycle to update input files using instruction
files.
(3) the user must prepare a pair of files : .tpl(or .ins) and corresponding file to change
:param time_index:
:return: | [
"-",
"The",
"function",
"prepares",
"the",
"model",
"for",
"this",
"time",
"cycle",
"-",
"Any",
"time",
"-",
"dependant",
"forcing",
"should",
"be",
"handled",
"here",
".",
"This",
"includes",
"temporal",
"stresses",
"boundary",
"conditions",
"and",
"initial",
"conditions",
".",
"-",
"Two",
"options",
"are",
"available",
"(",
"1",
")",
"template",
"files",
"to",
"update",
"input",
"parameters",
"(",
"2",
")",
"use",
"results",
"from",
"previous",
"cycle",
"to",
"update",
"input",
"files",
"using",
"instruction",
"files",
".",
"(",
"3",
")",
"the",
"user",
"must",
"prepare",
"a",
"pair",
"of",
"files",
":",
".",
"tpl",
"(",
"or",
".",
"ins",
")",
"and",
"corresponding",
"file",
"to",
"change",
":",
"param",
"time_index",
":",
":",
"return",
":"
] | python | train |
pawel-kow/domainconnect_python | domainconnect/domainconnect.py | https://github.com/pawel-kow/domainconnect_python/blob/2467093cc4e997234e0fb5c55e71f76b856c1ab1/domainconnect/domainconnect.py#L297-L342 | def get_domain_connect_template_sync_url(self, domain, provider_id, service_id, redirect_uri=None, params=None,
state=None, group_ids=None):
"""Makes full Domain Connect discovery of a domain and returns full url to request sync consent.
:param domain: str
:param provider_id: str
:param service_id: str
:param redirect_uri: str
:param params: dict
:param state: str
:param group_ids: list(str)
:return: (str, str)
first field is an url which shall be used to redirect the browser to
second field is an indication of error
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
:raises: InvalidDomainConnectSettingsException
when settings contain missing fields
"""
# TODO: support for signatures
# TODO: support for provider_name (for shared templates)
if params is None:
params = {}
config = self.get_domain_config(domain)
self.check_template_supported(config, provider_id, service_id)
if config.urlSyncUX is None:
raise InvalidDomainConnectSettingsException("No sync URL in config")
sync_url_format = '{}/v2/domainTemplates/providers/{}/services/{}/' \
'apply?domain={}&host={}&{}'
if redirect_uri is not None:
params["redirect_uri"] = redirect_uri
if state is not None:
params["state"] = state
if group_ids is not None:
params["groupId"] = ",".join(group_ids)
return sync_url_format.format(config.urlSyncUX, provider_id, service_id, config.domain_root, config.host,
urllib.parse.urlencode(sorted(params.items(), key=lambda val: val[0]))) | [
"def",
"get_domain_connect_template_sync_url",
"(",
"self",
",",
"domain",
",",
"provider_id",
",",
"service_id",
",",
"redirect_uri",
"=",
"None",
",",
"params",
"=",
"None",
",",
"state",
"=",
"None",
",",
"group_ids",
"=",
"None",
")",
":",
"# TODO: support for signatures",
"# TODO: support for provider_name (for shared templates)",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"config",
"=",
"self",
".",
"get_domain_config",
"(",
"domain",
")",
"self",
".",
"check_template_supported",
"(",
"config",
",",
"provider_id",
",",
"service_id",
")",
"if",
"config",
".",
"urlSyncUX",
"is",
"None",
":",
"raise",
"InvalidDomainConnectSettingsException",
"(",
"\"No sync URL in config\"",
")",
"sync_url_format",
"=",
"'{}/v2/domainTemplates/providers/{}/services/{}/'",
"'apply?domain={}&host={}&{}'",
"if",
"redirect_uri",
"is",
"not",
"None",
":",
"params",
"[",
"\"redirect_uri\"",
"]",
"=",
"redirect_uri",
"if",
"state",
"is",
"not",
"None",
":",
"params",
"[",
"\"state\"",
"]",
"=",
"state",
"if",
"group_ids",
"is",
"not",
"None",
":",
"params",
"[",
"\"groupId\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"group_ids",
")",
"return",
"sync_url_format",
".",
"format",
"(",
"config",
".",
"urlSyncUX",
",",
"provider_id",
",",
"service_id",
",",
"config",
".",
"domain_root",
",",
"config",
".",
"host",
",",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"sorted",
"(",
"params",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"val",
":",
"val",
"[",
"0",
"]",
")",
")",
")"
] | Makes full Domain Connect discovery of a domain and returns full url to request sync consent.
:param domain: str
:param provider_id: str
:param service_id: str
:param redirect_uri: str
:param params: dict
:param state: str
:param group_ids: list(str)
:return: (str, str)
first field is an url which shall be used to redirect the browser to
second field is an indication of error
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
:raises: InvalidDomainConnectSettingsException
when settings contain missing fields | [
"Makes",
"full",
"Domain",
"Connect",
"discovery",
"of",
"a",
"domain",
"and",
"returns",
"full",
"url",
"to",
"request",
"sync",
"consent",
"."
] | python | train |
Esri/ArcREST | src/arcrest/manageorg/_community.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_community.py#L119-L143 | def getGroupIDs(self, groupNames,communityInfo=None):
"""
This function retrieves the group IDs
Inputs:
group_names - tuple of group names
Output:
dict - list of group IDs
"""
group_ids=[]
if communityInfo is None:
communityInfo = self.communitySelf
if isinstance(groupNames,list):
groupNames = map(str.upper, groupNames)
else:
groupNames = groupNames.upper()
if 'groups' in communityInfo:
for gp in communityInfo['groups']:
if str(gp['title']).upper() in groupNames:
group_ids.append(gp['id'])
del communityInfo
return group_ids | [
"def",
"getGroupIDs",
"(",
"self",
",",
"groupNames",
",",
"communityInfo",
"=",
"None",
")",
":",
"group_ids",
"=",
"[",
"]",
"if",
"communityInfo",
"is",
"None",
":",
"communityInfo",
"=",
"self",
".",
"communitySelf",
"if",
"isinstance",
"(",
"groupNames",
",",
"list",
")",
":",
"groupNames",
"=",
"map",
"(",
"str",
".",
"upper",
",",
"groupNames",
")",
"else",
":",
"groupNames",
"=",
"groupNames",
".",
"upper",
"(",
")",
"if",
"'groups'",
"in",
"communityInfo",
":",
"for",
"gp",
"in",
"communityInfo",
"[",
"'groups'",
"]",
":",
"if",
"str",
"(",
"gp",
"[",
"'title'",
"]",
")",
".",
"upper",
"(",
")",
"in",
"groupNames",
":",
"group_ids",
".",
"append",
"(",
"gp",
"[",
"'id'",
"]",
")",
"del",
"communityInfo",
"return",
"group_ids"
] | This function retrieves the group IDs
Inputs:
group_names - tuple of group names
Output:
dict - list of group IDs | [
"This",
"function",
"retrieves",
"the",
"group",
"IDs"
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py#L1232-L1242 | def add_dependency(self, depend):
"""Adds dependencies."""
try:
self._add_child(self.depends, self.depends_set, depend)
except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e))) | [
"def",
"add_dependency",
"(",
"self",
",",
"depend",
")",
":",
"try",
":",
"self",
".",
"_add_child",
"(",
"self",
".",
"depends",
",",
"self",
".",
"depends_set",
",",
"depend",
")",
"except",
"TypeError",
"as",
"e",
":",
"e",
"=",
"e",
".",
"args",
"[",
"0",
"]",
"if",
"SCons",
".",
"Util",
".",
"is_List",
"(",
"e",
")",
":",
"s",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"e",
")",
")",
"else",
":",
"s",
"=",
"str",
"(",
"e",
")",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"attempted to add a non-Node dependency to %s:\\n\\t%s is a %s, not a Node\"",
"%",
"(",
"str",
"(",
"self",
")",
",",
"s",
",",
"type",
"(",
"e",
")",
")",
")"
] | Adds dependencies. | [
"Adds",
"dependencies",
"."
] | python | train |
lsst-sqre/sqre-codekit | codekit/pygithub.py | https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/pygithub.py#L292-L302 | def debug_ratelimit(g):
"""Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object
"""
assert isinstance(g, github.MainClass.Github), type(g)
debug("github ratelimit: {rl}".format(rl=g.rate_limiting)) | [
"def",
"debug_ratelimit",
"(",
"g",
")",
":",
"assert",
"isinstance",
"(",
"g",
",",
"github",
".",
"MainClass",
".",
"Github",
")",
",",
"type",
"(",
"g",
")",
"debug",
"(",
"\"github ratelimit: {rl}\"",
".",
"format",
"(",
"rl",
"=",
"g",
".",
"rate_limiting",
")",
")"
] | Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object | [
"Log",
"debug",
"of",
"github",
"ratelimit",
"information",
"from",
"last",
"API",
"call"
] | python | train |
pyusb/pyusb | usb/control.py | https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/control.py#L188-L206 | def get_configuration(dev):
r"""Get the current active configuration of the device.
dev is the Device object to which the request will be
sent to.
This function differs from the Device.get_active_configuration
method because the later may use cached data, while this
function always does a device request.
"""
bmRequestType = util.build_request_type(
util.CTRL_IN,
util.CTRL_TYPE_STANDARD,
util.CTRL_RECIPIENT_DEVICE)
return dev.ctrl_transfer(
bmRequestType,
bRequest = 0x08,
data_or_wLength = 1)[0] | [
"def",
"get_configuration",
"(",
"dev",
")",
":",
"bmRequestType",
"=",
"util",
".",
"build_request_type",
"(",
"util",
".",
"CTRL_IN",
",",
"util",
".",
"CTRL_TYPE_STANDARD",
",",
"util",
".",
"CTRL_RECIPIENT_DEVICE",
")",
"return",
"dev",
".",
"ctrl_transfer",
"(",
"bmRequestType",
",",
"bRequest",
"=",
"0x08",
",",
"data_or_wLength",
"=",
"1",
")",
"[",
"0",
"]"
] | r"""Get the current active configuration of the device.
dev is the Device object to which the request will be
sent to.
This function differs from the Device.get_active_configuration
method because the later may use cached data, while this
function always does a device request. | [
"r",
"Get",
"the",
"current",
"active",
"configuration",
"of",
"the",
"device",
"."
] | python | train |
idlesign/django-siteprefs | siteprefs/utils.py | https://github.com/idlesign/django-siteprefs/blob/3d6bf5e64220fe921468a36fce68e15d7947cf92/siteprefs/utils.py#L392-L408 | def import_prefs():
"""Imports preferences modules from packages (apps) and project root."""
# settings.py locals if autodiscover_siteprefs() is in urls.py
settings_locals = get_frame_locals(3)
if 'self' not in settings_locals: # If not SiteprefsConfig.ready()
# Try to import project-wide prefs.
project_package = settings_locals['__package__'] # Expected project layout introduced in Django 1.4
if not project_package:
# Fallback to old layout.
project_package = os.path.split(os.path.dirname(settings_locals['__file__']))[-1]
import_module(project_package, PREFS_MODULE_NAME)
import_project_modules(PREFS_MODULE_NAME) | [
"def",
"import_prefs",
"(",
")",
":",
"# settings.py locals if autodiscover_siteprefs() is in urls.py",
"settings_locals",
"=",
"get_frame_locals",
"(",
"3",
")",
"if",
"'self'",
"not",
"in",
"settings_locals",
":",
"# If not SiteprefsConfig.ready()",
"# Try to import project-wide prefs.",
"project_package",
"=",
"settings_locals",
"[",
"'__package__'",
"]",
"# Expected project layout introduced in Django 1.4",
"if",
"not",
"project_package",
":",
"# Fallback to old layout.",
"project_package",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"settings_locals",
"[",
"'__file__'",
"]",
")",
")",
"[",
"-",
"1",
"]",
"import_module",
"(",
"project_package",
",",
"PREFS_MODULE_NAME",
")",
"import_project_modules",
"(",
"PREFS_MODULE_NAME",
")"
] | Imports preferences modules from packages (apps) and project root. | [
"Imports",
"preferences",
"modules",
"from",
"packages",
"(",
"apps",
")",
"and",
"project",
"root",
"."
] | python | valid |
GNS3/gns3-server | gns3server/compute/virtualbox/virtualbox_vm.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/virtualbox/virtualbox_vm.py#L890-L933 | def _create_linked_clone(self):
"""
Creates a new linked clone.
"""
gns3_snapshot_exists = False
vm_info = yield from self._get_vm_info()
for entry, value in vm_info.items():
if entry.startswith("SnapshotName") and value == "GNS3 Linked Base for clones":
gns3_snapshot_exists = True
if not gns3_snapshot_exists:
result = yield from self.manager.execute("snapshot", [self._vmname, "take", "GNS3 Linked Base for clones"])
log.debug("GNS3 snapshot created: {}".format(result))
args = [self._vmname,
"--snapshot",
"GNS3 Linked Base for clones",
"--options",
"link",
"--name",
self.name,
"--basefolder",
self.working_dir,
"--register"]
result = yield from self.manager.execute("clonevm", args)
log.debug("VirtualBox VM: {} cloned".format(result))
self._vmname = self._name
yield from self.manager.execute("setextradata", [self._vmname, "GNS3/Clone", "yes"])
# We create a reset snapshot in order to simplify life of user who want to rollback their VM
# Warning: Do not document this it's seem buggy we keep it because Raizo students use it.
try:
args = [self._vmname, "take", "reset"]
result = yield from self.manager.execute("snapshot", args)
log.debug("Snapshot 'reset' created: {}".format(result))
# It seem sometimes this failed due to internal race condition of Vbox
# we have no real explanation of this.
except VirtualBoxError:
log.warn("Snapshot 'reset' not created")
os.makedirs(os.path.join(self.working_dir, self._vmname), exist_ok=True) | [
"def",
"_create_linked_clone",
"(",
"self",
")",
":",
"gns3_snapshot_exists",
"=",
"False",
"vm_info",
"=",
"yield",
"from",
"self",
".",
"_get_vm_info",
"(",
")",
"for",
"entry",
",",
"value",
"in",
"vm_info",
".",
"items",
"(",
")",
":",
"if",
"entry",
".",
"startswith",
"(",
"\"SnapshotName\"",
")",
"and",
"value",
"==",
"\"GNS3 Linked Base for clones\"",
":",
"gns3_snapshot_exists",
"=",
"True",
"if",
"not",
"gns3_snapshot_exists",
":",
"result",
"=",
"yield",
"from",
"self",
".",
"manager",
".",
"execute",
"(",
"\"snapshot\"",
",",
"[",
"self",
".",
"_vmname",
",",
"\"take\"",
",",
"\"GNS3 Linked Base for clones\"",
"]",
")",
"log",
".",
"debug",
"(",
"\"GNS3 snapshot created: {}\"",
".",
"format",
"(",
"result",
")",
")",
"args",
"=",
"[",
"self",
".",
"_vmname",
",",
"\"--snapshot\"",
",",
"\"GNS3 Linked Base for clones\"",
",",
"\"--options\"",
",",
"\"link\"",
",",
"\"--name\"",
",",
"self",
".",
"name",
",",
"\"--basefolder\"",
",",
"self",
".",
"working_dir",
",",
"\"--register\"",
"]",
"result",
"=",
"yield",
"from",
"self",
".",
"manager",
".",
"execute",
"(",
"\"clonevm\"",
",",
"args",
")",
"log",
".",
"debug",
"(",
"\"VirtualBox VM: {} cloned\"",
".",
"format",
"(",
"result",
")",
")",
"self",
".",
"_vmname",
"=",
"self",
".",
"_name",
"yield",
"from",
"self",
".",
"manager",
".",
"execute",
"(",
"\"setextradata\"",
",",
"[",
"self",
".",
"_vmname",
",",
"\"GNS3/Clone\"",
",",
"\"yes\"",
"]",
")",
"# We create a reset snapshot in order to simplify life of user who want to rollback their VM",
"# Warning: Do not document this it's seem buggy we keep it because Raizo students use it.",
"try",
":",
"args",
"=",
"[",
"self",
".",
"_vmname",
",",
"\"take\"",
",",
"\"reset\"",
"]",
"result",
"=",
"yield",
"from",
"self",
".",
"manager",
".",
"execute",
"(",
"\"snapshot\"",
",",
"args",
")",
"log",
".",
"debug",
"(",
"\"Snapshot 'reset' created: {}\"",
".",
"format",
"(",
"result",
")",
")",
"# It seem sometimes this failed due to internal race condition of Vbox",
"# we have no real explanation of this.",
"except",
"VirtualBoxError",
":",
"log",
".",
"warn",
"(",
"\"Snapshot 'reset' not created\"",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"working_dir",
",",
"self",
".",
"_vmname",
")",
",",
"exist_ok",
"=",
"True",
")"
] | Creates a new linked clone. | [
"Creates",
"a",
"new",
"linked",
"clone",
"."
] | python | train |
rackerlabs/simpl | simpl/incubator/dicts.py | https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/incubator/dicts.py#L233-L264 | def merge_lists(dest, source, extend_lists=False):
"""Recursively merge two lists.
:keyword extend_lists: if true, just extends lists instead of merging them.
This applies merge_dictionary if any of the entries are dicts.
Note: This updates dest and returns it.
"""
if not source:
return
if not extend_lists:
# Make them the same size
left = dest
right = source[:]
if len(dest) > len(source):
right.extend([None] * (len(dest) - len(source)))
elif len(dest) < len(source):
left.extend([None] * (len(source) - len(dest)))
# Merge lists
for index, value in enumerate(left):
if value is None and right[index] is not None:
dest[index] = right[index]
elif isinstance(value, dict) and isinstance(right[index], dict):
merge_dictionary(dest[index], source[index],
extend_lists=extend_lists)
elif isinstance(value, list):
merge_lists(value, right[index])
elif right[index] is not None:
dest[index] = right[index]
else:
dest.extend([src for src in source if src not in dest])
return dest | [
"def",
"merge_lists",
"(",
"dest",
",",
"source",
",",
"extend_lists",
"=",
"False",
")",
":",
"if",
"not",
"source",
":",
"return",
"if",
"not",
"extend_lists",
":",
"# Make them the same size",
"left",
"=",
"dest",
"right",
"=",
"source",
"[",
":",
"]",
"if",
"len",
"(",
"dest",
")",
">",
"len",
"(",
"source",
")",
":",
"right",
".",
"extend",
"(",
"[",
"None",
"]",
"*",
"(",
"len",
"(",
"dest",
")",
"-",
"len",
"(",
"source",
")",
")",
")",
"elif",
"len",
"(",
"dest",
")",
"<",
"len",
"(",
"source",
")",
":",
"left",
".",
"extend",
"(",
"[",
"None",
"]",
"*",
"(",
"len",
"(",
"source",
")",
"-",
"len",
"(",
"dest",
")",
")",
")",
"# Merge lists",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"left",
")",
":",
"if",
"value",
"is",
"None",
"and",
"right",
"[",
"index",
"]",
"is",
"not",
"None",
":",
"dest",
"[",
"index",
"]",
"=",
"right",
"[",
"index",
"]",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
"and",
"isinstance",
"(",
"right",
"[",
"index",
"]",
",",
"dict",
")",
":",
"merge_dictionary",
"(",
"dest",
"[",
"index",
"]",
",",
"source",
"[",
"index",
"]",
",",
"extend_lists",
"=",
"extend_lists",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"merge_lists",
"(",
"value",
",",
"right",
"[",
"index",
"]",
")",
"elif",
"right",
"[",
"index",
"]",
"is",
"not",
"None",
":",
"dest",
"[",
"index",
"]",
"=",
"right",
"[",
"index",
"]",
"else",
":",
"dest",
".",
"extend",
"(",
"[",
"src",
"for",
"src",
"in",
"source",
"if",
"src",
"not",
"in",
"dest",
"]",
")",
"return",
"dest"
] | Recursively merge two lists.
:keyword extend_lists: if true, just extends lists instead of merging them.
This applies merge_dictionary if any of the entries are dicts.
Note: This updates dest and returns it. | [
"Recursively",
"merge",
"two",
"lists",
"."
] | python | train |
phaethon/kamene | kamene/contrib/gsm_um.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L1860-L1874 | def modifyReject(LowLayerCompatibility_presence=0,
HighLayerCompatibility_presence=0):
"""MODIFY REJECT Section 9.3.15"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x13) # 00010011
c = BearerCapability()
d = Cause()
packet = a / b / c / d
if LowLayerCompatibility_presence is 1:
e = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
packet = packet / e
if HighLayerCompatibility_presence is 1:
f = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
packet = packet / f
return packet | [
"def",
"modifyReject",
"(",
"LowLayerCompatibility_presence",
"=",
"0",
",",
"HighLayerCompatibility_presence",
"=",
"0",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"0x3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x13",
")",
"# 00010011",
"c",
"=",
"BearerCapability",
"(",
")",
"d",
"=",
"Cause",
"(",
")",
"packet",
"=",
"a",
"/",
"b",
"/",
"c",
"/",
"d",
"if",
"LowLayerCompatibility_presence",
"is",
"1",
":",
"e",
"=",
"LowLayerCompatibilityHdr",
"(",
"ieiLLC",
"=",
"0x7C",
",",
"eightBitLLC",
"=",
"0x0",
")",
"packet",
"=",
"packet",
"/",
"e",
"if",
"HighLayerCompatibility_presence",
"is",
"1",
":",
"f",
"=",
"HighLayerCompatibilityHdr",
"(",
"ieiHLC",
"=",
"0x7D",
",",
"eightBitHLC",
"=",
"0x0",
")",
"packet",
"=",
"packet",
"/",
"f",
"return",
"packet"
] | MODIFY REJECT Section 9.3.15 | [
"MODIFY",
"REJECT",
"Section",
"9",
".",
"3",
".",
"15"
] | python | train |
nephila/python-taiga | taiga/models/base.py | https://github.com/nephila/python-taiga/blob/5b471d6b8b59e5d410162a6f1c2f0d4188445a56/taiga/models/base.py#L221-L232 | def parse(cls, requester, entry):
"""
Turns a JSON object into a model instance.
"""
if not type(entry) is dict:
return entry
for key_to_parse, cls_to_parse in six.iteritems(cls.parser):
if key_to_parse in entry:
entry[key_to_parse] = cls_to_parse.parse(
requester, entry[key_to_parse]
)
return cls(requester, **entry) | [
"def",
"parse",
"(",
"cls",
",",
"requester",
",",
"entry",
")",
":",
"if",
"not",
"type",
"(",
"entry",
")",
"is",
"dict",
":",
"return",
"entry",
"for",
"key_to_parse",
",",
"cls_to_parse",
"in",
"six",
".",
"iteritems",
"(",
"cls",
".",
"parser",
")",
":",
"if",
"key_to_parse",
"in",
"entry",
":",
"entry",
"[",
"key_to_parse",
"]",
"=",
"cls_to_parse",
".",
"parse",
"(",
"requester",
",",
"entry",
"[",
"key_to_parse",
"]",
")",
"return",
"cls",
"(",
"requester",
",",
"*",
"*",
"entry",
")"
] | Turns a JSON object into a model instance. | [
"Turns",
"a",
"JSON",
"object",
"into",
"a",
"model",
"instance",
"."
] | python | train |
numberoverzero/bloop | bloop/search.py | https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L245-L257 | def prepare(
self, engine=None, mode=None, model=None, index=None, key=None,
filter=None, projection=None, consistent=None, forward=None, parallel=None):
"""Validates the search parameters and builds the base request dict for each Query/Scan call."""
self.prepare_iterator_cls(engine, mode)
self.prepare_model(model, index, consistent)
self.prepare_key(key)
self.prepare_projection(projection)
self.prepare_filter(filter)
self.prepare_constraints(forward, parallel)
self.prepare_request() | [
"def",
"prepare",
"(",
"self",
",",
"engine",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"model",
"=",
"None",
",",
"index",
"=",
"None",
",",
"key",
"=",
"None",
",",
"filter",
"=",
"None",
",",
"projection",
"=",
"None",
",",
"consistent",
"=",
"None",
",",
"forward",
"=",
"None",
",",
"parallel",
"=",
"None",
")",
":",
"self",
".",
"prepare_iterator_cls",
"(",
"engine",
",",
"mode",
")",
"self",
".",
"prepare_model",
"(",
"model",
",",
"index",
",",
"consistent",
")",
"self",
".",
"prepare_key",
"(",
"key",
")",
"self",
".",
"prepare_projection",
"(",
"projection",
")",
"self",
".",
"prepare_filter",
"(",
"filter",
")",
"self",
".",
"prepare_constraints",
"(",
"forward",
",",
"parallel",
")",
"self",
".",
"prepare_request",
"(",
")"
] | Validates the search parameters and builds the base request dict for each Query/Scan call. | [
"Validates",
"the",
"search",
"parameters",
"and",
"builds",
"the",
"base",
"request",
"dict",
"for",
"each",
"Query",
"/",
"Scan",
"call",
"."
] | python | train |
Azure/azure-sdk-for-python | azure-servicebus/azure/servicebus/receive_handler.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/receive_handler.py#L665-L702 | def list_sessions(self, updated_since=None, max_results=100, skip=0):
"""List session IDs.
List the Session IDs with pending messages in the queue where the state of the session
has been updated since the timestamp provided. If no timestamp is provided, all will be returned.
If the state of a Session has never been set, it will not be returned regardless of whether
there are messages pending.
:param updated_since: The UTC datetime from which to return updated pending Session IDs.
:type updated_since: datetime.datetime
:param max_results: The maximum number of Session IDs to return. Default value is 100.
:type max_results: int
:param skip: The page value to jump to. Default value is 0.
:type skip: int
:rtype: list[str]
Example:
.. literalinclude:: ../examples/test_examples.py
:start-after: [START list_sessions]
:end-before: [END list_sessions]
:language: python
:dedent: 4
:caption: List the ids of sessions with pending messages
"""
if int(max_results) < 1:
raise ValueError("max_results must be 1 or greater.")
self._can_run()
message = {
'last-updated-time': updated_since or datetime.datetime.utcfromtimestamp(0),
'skip': skip,
'top': max_results,
}
return self._mgmt_request_response(
REQUEST_RESPONSE_GET_MESSAGE_SESSIONS_OPERATION,
message,
mgmt_handlers.list_sessions_op) | [
"def",
"list_sessions",
"(",
"self",
",",
"updated_since",
"=",
"None",
",",
"max_results",
"=",
"100",
",",
"skip",
"=",
"0",
")",
":",
"if",
"int",
"(",
"max_results",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"max_results must be 1 or greater.\"",
")",
"self",
".",
"_can_run",
"(",
")",
"message",
"=",
"{",
"'last-updated-time'",
":",
"updated_since",
"or",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"0",
")",
",",
"'skip'",
":",
"skip",
",",
"'top'",
":",
"max_results",
",",
"}",
"return",
"self",
".",
"_mgmt_request_response",
"(",
"REQUEST_RESPONSE_GET_MESSAGE_SESSIONS_OPERATION",
",",
"message",
",",
"mgmt_handlers",
".",
"list_sessions_op",
")"
] | List session IDs.
List the Session IDs with pending messages in the queue where the state of the session
has been updated since the timestamp provided. If no timestamp is provided, all will be returned.
If the state of a Session has never been set, it will not be returned regardless of whether
there are messages pending.
:param updated_since: The UTC datetime from which to return updated pending Session IDs.
:type updated_since: datetime.datetime
:param max_results: The maximum number of Session IDs to return. Default value is 100.
:type max_results: int
:param skip: The page value to jump to. Default value is 0.
:type skip: int
:rtype: list[str]
Example:
.. literalinclude:: ../examples/test_examples.py
:start-after: [START list_sessions]
:end-before: [END list_sessions]
:language: python
:dedent: 4
:caption: List the ids of sessions with pending messages | [
"List",
"session",
"IDs",
"."
] | python | test |
python-cmd2/cmd2 | cmd2/rl_utils.py | https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/rl_utils.py#L151-L162 | def rl_get_point() -> int: # pragma: no cover
"""
Returns the offset of the current cursor position in rl_line_buffer
"""
if rl_type == RlType.GNU:
return ctypes.c_int.in_dll(readline_lib, "rl_point").value
elif rl_type == RlType.PYREADLINE:
return readline.rl.mode.l_buffer.point
else:
return 0 | [
"def",
"rl_get_point",
"(",
")",
"->",
"int",
":",
"# pragma: no cover",
"if",
"rl_type",
"==",
"RlType",
".",
"GNU",
":",
"return",
"ctypes",
".",
"c_int",
".",
"in_dll",
"(",
"readline_lib",
",",
"\"rl_point\"",
")",
".",
"value",
"elif",
"rl_type",
"==",
"RlType",
".",
"PYREADLINE",
":",
"return",
"readline",
".",
"rl",
".",
"mode",
".",
"l_buffer",
".",
"point",
"else",
":",
"return",
"0"
] | Returns the offset of the current cursor position in rl_line_buffer | [
"Returns",
"the",
"offset",
"of",
"the",
"current",
"cursor",
"position",
"in",
"rl_line_buffer"
] | python | train |
lambdamusic/Ontospy | ontospy/extras/hacks/sketch.py | https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/sketch.py#L134-L146 | def __serializedDot(self):
"""
DOT format:
digraph graphname {
a -> b [label=instanceOf];
b -> d [label=isA];
}
"""
temp = ""
for x,y,z in self.rdflib_graph.triples((None, None, None)):
temp += """"%s" -> "%s" [label="%s"];\n""" % (self.namespace_manager.normalizeUri(x), self.namespace_manager.normalizeUri(z), self.namespace_manager.normalizeUri(y))
temp = "digraph graphname {\n%s}" % temp
return temp | [
"def",
"__serializedDot",
"(",
"self",
")",
":",
"temp",
"=",
"\"\"",
"for",
"x",
",",
"y",
",",
"z",
"in",
"self",
".",
"rdflib_graph",
".",
"triples",
"(",
"(",
"None",
",",
"None",
",",
"None",
")",
")",
":",
"temp",
"+=",
"\"\"\"\"%s\" -> \"%s\" [label=\"%s\"];\\n\"\"\"",
"%",
"(",
"self",
".",
"namespace_manager",
".",
"normalizeUri",
"(",
"x",
")",
",",
"self",
".",
"namespace_manager",
".",
"normalizeUri",
"(",
"z",
")",
",",
"self",
".",
"namespace_manager",
".",
"normalizeUri",
"(",
"y",
")",
")",
"temp",
"=",
"\"digraph graphname {\\n%s}\"",
"%",
"temp",
"return",
"temp"
] | DOT format:
digraph graphname {
a -> b [label=instanceOf];
b -> d [label=isA];
} | [
"DOT",
"format",
":",
"digraph",
"graphname",
"{",
"a",
"-",
">",
"b",
"[",
"label",
"=",
"instanceOf",
"]",
";",
"b",
"-",
">",
"d",
"[",
"label",
"=",
"isA",
"]",
";",
"}"
] | python | train |
pyca/pyopenssl | src/OpenSSL/crypto.py | https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L1156-L1182 | def sign(self, pkey, digest):
"""
Sign the certificate with this key and digest type.
:param pkey: The key to sign with.
:type pkey: :py:class:`PKey`
:param digest: The name of the message digest to use.
:type digest: :py:class:`bytes`
:return: :py:data:`None`
"""
if not isinstance(pkey, PKey):
raise TypeError("pkey must be a PKey instance")
if pkey._only_public:
raise ValueError("Key only has public part")
if not pkey._initialized:
raise ValueError("Key is uninitialized")
evp_md = _lib.EVP_get_digestbyname(_byte_string(digest))
if evp_md == _ffi.NULL:
raise ValueError("No such digest method")
sign_result = _lib.X509_sign(self._x509, pkey._pkey, evp_md)
_openssl_assert(sign_result > 0) | [
"def",
"sign",
"(",
"self",
",",
"pkey",
",",
"digest",
")",
":",
"if",
"not",
"isinstance",
"(",
"pkey",
",",
"PKey",
")",
":",
"raise",
"TypeError",
"(",
"\"pkey must be a PKey instance\"",
")",
"if",
"pkey",
".",
"_only_public",
":",
"raise",
"ValueError",
"(",
"\"Key only has public part\"",
")",
"if",
"not",
"pkey",
".",
"_initialized",
":",
"raise",
"ValueError",
"(",
"\"Key is uninitialized\"",
")",
"evp_md",
"=",
"_lib",
".",
"EVP_get_digestbyname",
"(",
"_byte_string",
"(",
"digest",
")",
")",
"if",
"evp_md",
"==",
"_ffi",
".",
"NULL",
":",
"raise",
"ValueError",
"(",
"\"No such digest method\"",
")",
"sign_result",
"=",
"_lib",
".",
"X509_sign",
"(",
"self",
".",
"_x509",
",",
"pkey",
".",
"_pkey",
",",
"evp_md",
")",
"_openssl_assert",
"(",
"sign_result",
">",
"0",
")"
] | Sign the certificate with this key and digest type.
:param pkey: The key to sign with.
:type pkey: :py:class:`PKey`
:param digest: The name of the message digest to use.
:type digest: :py:class:`bytes`
:return: :py:data:`None` | [
"Sign",
"the",
"certificate",
"with",
"this",
"key",
"and",
"digest",
"type",
"."
] | python | test |
alefnula/tea | tea/utils/__init__.py | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/__init__.py#L177-L189 | def get_exception():
"""Return full formatted traceback as a string."""
trace = ""
exception = ""
exc_list = traceback.format_exception_only(
sys.exc_info()[0], sys.exc_info()[1]
)
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list:
trace += entry
return "%s\n%s" % (exception, trace) | [
"def",
"get_exception",
"(",
")",
":",
"trace",
"=",
"\"\"",
"exception",
"=",
"\"\"",
"exc_list",
"=",
"traceback",
".",
"format_exception_only",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
"for",
"entry",
"in",
"exc_list",
":",
"exception",
"+=",
"entry",
"tb_list",
"=",
"traceback",
".",
"format_tb",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"for",
"entry",
"in",
"tb_list",
":",
"trace",
"+=",
"entry",
"return",
"\"%s\\n%s\"",
"%",
"(",
"exception",
",",
"trace",
")"
] | Return full formatted traceback as a string. | [
"Return",
"full",
"formatted",
"traceback",
"as",
"a",
"string",
"."
] | python | train |
stevearc/dql | dql/expressions/constraint.py | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/expressions/constraint.py#L160-L170 | def _get_fields(self, attr):
""" Get the hash/range fields of all joined constraints """
ret = set()
if "OR" in self.pieces:
return ret
for i in range(0, len(self.pieces), 2):
const = self.pieces[i]
field = getattr(const, attr)
if field is not None:
ret.add(field)
return ret | [
"def",
"_get_fields",
"(",
"self",
",",
"attr",
")",
":",
"ret",
"=",
"set",
"(",
")",
"if",
"\"OR\"",
"in",
"self",
".",
"pieces",
":",
"return",
"ret",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"pieces",
")",
",",
"2",
")",
":",
"const",
"=",
"self",
".",
"pieces",
"[",
"i",
"]",
"field",
"=",
"getattr",
"(",
"const",
",",
"attr",
")",
"if",
"field",
"is",
"not",
"None",
":",
"ret",
".",
"add",
"(",
"field",
")",
"return",
"ret"
] | Get the hash/range fields of all joined constraints | [
"Get",
"the",
"hash",
"/",
"range",
"fields",
"of",
"all",
"joined",
"constraints"
] | python | train |
mottosso/be | be/cli.py | https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/cli.py#L614-L620 | def mkdir(dir, enter):
"""Create directory with template for topic of the current environment
"""
if not os.path.exists(dir):
os.makedirs(dir) | [
"def",
"mkdir",
"(",
"dir",
",",
"enter",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir",
")",
":",
"os",
".",
"makedirs",
"(",
"dir",
")"
] | Create directory with template for topic of the current environment | [
"Create",
"directory",
"with",
"template",
"for",
"topic",
"of",
"the",
"current",
"environment"
] | python | train |
annoviko/pyclustering | pyclustering/nnet/hhn.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/hhn.py#L465-L549 | def hnn_state(self, inputs, t, argv):
"""!
@brief Returns new values of excitatory and inhibitory parts of oscillator and potential of oscillator.
@param[in] inputs (list): States of oscillator for integration [v, m, h, n] (see description below).
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Extra arguments that are not used for integration - index of oscillator.
@return (list) new values of oscillator [v, m, h, n], where:
v - membrane potantial of oscillator,
m - activation conductance of the sodium channel,
h - inactication conductance of the sodium channel,
n - activation conductance of the potassium channel.
"""
index = argv;
v = inputs[0]; # membrane potential (v).
m = inputs[1]; # activation conductance of the sodium channel (m).
h = inputs[2]; # inactivaton conductance of the sodium channel (h).
n = inputs[3]; # activation conductance of the potassium channel (n).
# Calculate ion current
# gNa * m[i]^3 * h * (v[i] - vNa) + gK * n[i]^4 * (v[i] - vK) + gL (v[i] - vL)
active_sodium_part = self._params.gNa * (m ** 3) * h * (v - self._params.vNa);
inactive_sodium_part = self._params.gK * (n ** 4) * (v - self._params.vK);
active_potassium_part = self._params.gL * (v - self._params.vL);
Iion = active_sodium_part + inactive_sodium_part + active_potassium_part;
Iext = 0.0;
Isyn = 0.0;
if (index < self._num_osc):
# PN - peripheral neuron - calculation of external current and synaptic current.
Iext = self._stimulus[index] * self._noise[index]; # probably noise can be pre-defined for reducting compexity
memory_impact1 = 0.0;
for i in range(0, len(self._central_element[0].pulse_generation_time)):
memory_impact1 += self.__alfa_function(t - self._central_element[0].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);
memory_impact2 = 0.0;
for i in range(0, len(self._central_element[1].pulse_generation_time)):
memory_impact2 += self.__alfa_function(t - self._central_element[1].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);
Isyn = self._params.w2 * (v - self._params.Vsyninh) * memory_impact1 + self._link_weight3[index] * (v - self._params.Vsyninh) * memory_impact2;
else:
# CN - central element.
central_index = index - self._num_osc;
if (central_index == 0):
Iext = self._params.Icn1; # CN1
memory_impact = 0.0;
for index_oscillator in range(0, self._num_osc):
for index_generation in range(0, len(self._pulse_generation_time[index_oscillator])):
memory_impact += self.__alfa_function(t - self._pulse_generation_time[index_oscillator][index_generation], self._params.alfa_excitatory, self._params.betta_excitatory);
Isyn = self._params.w1 * (v - self._params.Vsynexc) * memory_impact;
elif (central_index == 1):
Iext = self._params.Icn2; # CN2
Isyn = 0.0;
else:
assert 0;
# Membrane potential
dv = -Iion + Iext - Isyn;
# Calculate variables
potential = v - self._params.vRest;
am = (2.5 - 0.1 * potential) / (math.exp(2.5 - 0.1 * potential) - 1.0);
ah = 0.07 * math.exp(-potential / 20.0);
an = (0.1 - 0.01 * potential) / (math.exp(1.0 - 0.1 * potential) - 1.0);
bm = 4.0 * math.exp(-potential / 18.0);
bh = 1.0 / (math.exp(3.0 - 0.1 * potential) + 1.0);
bn = 0.125 * math.exp(-potential / 80.0);
dm = am * (1.0 - m) - bm * m;
dh = ah * (1.0 - h) - bh * h;
dn = an * (1.0 - n) - bn * n;
return [dv, dm, dh, dn]; | [
"def",
"hnn_state",
"(",
"self",
",",
"inputs",
",",
"t",
",",
"argv",
")",
":",
"index",
"=",
"argv",
"v",
"=",
"inputs",
"[",
"0",
"]",
"# membrane potential (v).\r",
"m",
"=",
"inputs",
"[",
"1",
"]",
"# activation conductance of the sodium channel (m).\r",
"h",
"=",
"inputs",
"[",
"2",
"]",
"# inactivaton conductance of the sodium channel (h).\r",
"n",
"=",
"inputs",
"[",
"3",
"]",
"# activation conductance of the potassium channel (n).\r",
"# Calculate ion current\r",
"# gNa * m[i]^3 * h * (v[i] - vNa) + gK * n[i]^4 * (v[i] - vK) + gL (v[i] - vL)\r",
"active_sodium_part",
"=",
"self",
".",
"_params",
".",
"gNa",
"*",
"(",
"m",
"**",
"3",
")",
"*",
"h",
"*",
"(",
"v",
"-",
"self",
".",
"_params",
".",
"vNa",
")",
"inactive_sodium_part",
"=",
"self",
".",
"_params",
".",
"gK",
"*",
"(",
"n",
"**",
"4",
")",
"*",
"(",
"v",
"-",
"self",
".",
"_params",
".",
"vK",
")",
"active_potassium_part",
"=",
"self",
".",
"_params",
".",
"gL",
"*",
"(",
"v",
"-",
"self",
".",
"_params",
".",
"vL",
")",
"Iion",
"=",
"active_sodium_part",
"+",
"inactive_sodium_part",
"+",
"active_potassium_part",
"Iext",
"=",
"0.0",
"Isyn",
"=",
"0.0",
"if",
"(",
"index",
"<",
"self",
".",
"_num_osc",
")",
":",
"# PN - peripheral neuron - calculation of external current and synaptic current.\r",
"Iext",
"=",
"self",
".",
"_stimulus",
"[",
"index",
"]",
"*",
"self",
".",
"_noise",
"[",
"index",
"]",
"# probably noise can be pre-defined for reducting compexity\r",
"memory_impact1",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_central_element",
"[",
"0",
"]",
".",
"pulse_generation_time",
")",
")",
":",
"memory_impact1",
"+=",
"self",
".",
"__alfa_function",
"(",
"t",
"-",
"self",
".",
"_central_element",
"[",
"0",
"]",
".",
"pulse_generation_time",
"[",
"i",
"]",
",",
"self",
".",
"_params",
".",
"alfa_inhibitory",
",",
"self",
".",
"_params",
".",
"betta_inhibitory",
")",
"memory_impact2",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_central_element",
"[",
"1",
"]",
".",
"pulse_generation_time",
")",
")",
":",
"memory_impact2",
"+=",
"self",
".",
"__alfa_function",
"(",
"t",
"-",
"self",
".",
"_central_element",
"[",
"1",
"]",
".",
"pulse_generation_time",
"[",
"i",
"]",
",",
"self",
".",
"_params",
".",
"alfa_inhibitory",
",",
"self",
".",
"_params",
".",
"betta_inhibitory",
")",
"Isyn",
"=",
"self",
".",
"_params",
".",
"w2",
"*",
"(",
"v",
"-",
"self",
".",
"_params",
".",
"Vsyninh",
")",
"*",
"memory_impact1",
"+",
"self",
".",
"_link_weight3",
"[",
"index",
"]",
"*",
"(",
"v",
"-",
"self",
".",
"_params",
".",
"Vsyninh",
")",
"*",
"memory_impact2",
"else",
":",
"# CN - central element.\r",
"central_index",
"=",
"index",
"-",
"self",
".",
"_num_osc",
"if",
"(",
"central_index",
"==",
"0",
")",
":",
"Iext",
"=",
"self",
".",
"_params",
".",
"Icn1",
"# CN1\r",
"memory_impact",
"=",
"0.0",
"for",
"index_oscillator",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_num_osc",
")",
":",
"for",
"index_generation",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_pulse_generation_time",
"[",
"index_oscillator",
"]",
")",
")",
":",
"memory_impact",
"+=",
"self",
".",
"__alfa_function",
"(",
"t",
"-",
"self",
".",
"_pulse_generation_time",
"[",
"index_oscillator",
"]",
"[",
"index_generation",
"]",
",",
"self",
".",
"_params",
".",
"alfa_excitatory",
",",
"self",
".",
"_params",
".",
"betta_excitatory",
")",
"Isyn",
"=",
"self",
".",
"_params",
".",
"w1",
"*",
"(",
"v",
"-",
"self",
".",
"_params",
".",
"Vsynexc",
")",
"*",
"memory_impact",
"elif",
"(",
"central_index",
"==",
"1",
")",
":",
"Iext",
"=",
"self",
".",
"_params",
".",
"Icn2",
"# CN2\r",
"Isyn",
"=",
"0.0",
"else",
":",
"assert",
"0",
"# Membrane potential\r",
"dv",
"=",
"-",
"Iion",
"+",
"Iext",
"-",
"Isyn",
"# Calculate variables\r",
"potential",
"=",
"v",
"-",
"self",
".",
"_params",
".",
"vRest",
"am",
"=",
"(",
"2.5",
"-",
"0.1",
"*",
"potential",
")",
"/",
"(",
"math",
".",
"exp",
"(",
"2.5",
"-",
"0.1",
"*",
"potential",
")",
"-",
"1.0",
")",
"ah",
"=",
"0.07",
"*",
"math",
".",
"exp",
"(",
"-",
"potential",
"/",
"20.0",
")",
"an",
"=",
"(",
"0.1",
"-",
"0.01",
"*",
"potential",
")",
"/",
"(",
"math",
".",
"exp",
"(",
"1.0",
"-",
"0.1",
"*",
"potential",
")",
"-",
"1.0",
")",
"bm",
"=",
"4.0",
"*",
"math",
".",
"exp",
"(",
"-",
"potential",
"/",
"18.0",
")",
"bh",
"=",
"1.0",
"/",
"(",
"math",
".",
"exp",
"(",
"3.0",
"-",
"0.1",
"*",
"potential",
")",
"+",
"1.0",
")",
"bn",
"=",
"0.125",
"*",
"math",
".",
"exp",
"(",
"-",
"potential",
"/",
"80.0",
")",
"dm",
"=",
"am",
"*",
"(",
"1.0",
"-",
"m",
")",
"-",
"bm",
"*",
"m",
"dh",
"=",
"ah",
"*",
"(",
"1.0",
"-",
"h",
")",
"-",
"bh",
"*",
"h",
"dn",
"=",
"an",
"*",
"(",
"1.0",
"-",
"n",
")",
"-",
"bn",
"*",
"n",
"return",
"[",
"dv",
",",
"dm",
",",
"dh",
",",
"dn",
"]"
] | !
@brief Returns new values of excitatory and inhibitory parts of oscillator and potential of oscillator.
@param[in] inputs (list): States of oscillator for integration [v, m, h, n] (see description below).
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Extra arguments that are not used for integration - index of oscillator.
@return (list) new values of oscillator [v, m, h, n], where:
v - membrane potantial of oscillator,
m - activation conductance of the sodium channel,
h - inactication conductance of the sodium channel,
n - activation conductance of the potassium channel. | [
"!"
] | python | valid |
Telefonica/toolium | toolium/selenoid.py | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/selenoid.py#L75-L109 | def __download_file(self, url, path_file, timeout):
"""
download a file from the server using a request with retries policy
:param url: server url to request
:param path_file: path and file where to download
:param timeout: threshold until the video file is downloaded
:return boolean
"""
status_code = 0
init_time = time.time()
self.driver_wrapper.logger.info('Downloading file from Selenoid node: %s' % url)
# retries policy
while status_code != STATUS_OK and time.time() - init_time < float(timeout):
body = requests.get(url)
status_code = body.status_code
if status_code != STATUS_OK:
time.sleep(1)
took = time.time() - init_time # time used to download the file
# create the folders and store the file downloaded
if status_code == STATUS_OK:
path, name = os.path.split(path_file)
if not os.path.exists(path):
os.makedirs(path)
try:
fp = open(path_file, 'wb')
fp.write(body.content)
fp.close()
self.driver_wrapper.logger.info('File has been downloaded successfully to "%s" and took %d '
'seconds' % (path_file, took))
return True
except IOError as e:
self.driver_wrapper.logger.warn('Error writing downloaded file in "%s":\n %s' % (path_file, e))
else:
self.driver_wrapper.logger.warn('File "%s" does not exist in the server after %s seconds' % (url, timeout))
return False | [
"def",
"__download_file",
"(",
"self",
",",
"url",
",",
"path_file",
",",
"timeout",
")",
":",
"status_code",
"=",
"0",
"init_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"driver_wrapper",
".",
"logger",
".",
"info",
"(",
"'Downloading file from Selenoid node: %s'",
"%",
"url",
")",
"# retries policy",
"while",
"status_code",
"!=",
"STATUS_OK",
"and",
"time",
".",
"time",
"(",
")",
"-",
"init_time",
"<",
"float",
"(",
"timeout",
")",
":",
"body",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"status_code",
"=",
"body",
".",
"status_code",
"if",
"status_code",
"!=",
"STATUS_OK",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"took",
"=",
"time",
".",
"time",
"(",
")",
"-",
"init_time",
"# time used to download the file",
"# create the folders and store the file downloaded",
"if",
"status_code",
"==",
"STATUS_OK",
":",
"path",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path_file",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"try",
":",
"fp",
"=",
"open",
"(",
"path_file",
",",
"'wb'",
")",
"fp",
".",
"write",
"(",
"body",
".",
"content",
")",
"fp",
".",
"close",
"(",
")",
"self",
".",
"driver_wrapper",
".",
"logger",
".",
"info",
"(",
"'File has been downloaded successfully to \"%s\" and took %d '",
"'seconds'",
"%",
"(",
"path_file",
",",
"took",
")",
")",
"return",
"True",
"except",
"IOError",
"as",
"e",
":",
"self",
".",
"driver_wrapper",
".",
"logger",
".",
"warn",
"(",
"'Error writing downloaded file in \"%s\":\\n %s'",
"%",
"(",
"path_file",
",",
"e",
")",
")",
"else",
":",
"self",
".",
"driver_wrapper",
".",
"logger",
".",
"warn",
"(",
"'File \"%s\" does not exist in the server after %s seconds'",
"%",
"(",
"url",
",",
"timeout",
")",
")",
"return",
"False"
] | download a file from the server using a request with retries policy
:param url: server url to request
:param path_file: path and file where to download
:param timeout: threshold until the video file is downloaded
:return boolean | [
"download",
"a",
"file",
"from",
"the",
"server",
"using",
"a",
"request",
"with",
"retries",
"policy",
":",
"param",
"url",
":",
"server",
"url",
"to",
"request",
":",
"param",
"path_file",
":",
"path",
"and",
"file",
"where",
"to",
"download",
":",
"param",
"timeout",
":",
"threshold",
"until",
"the",
"video",
"file",
"is",
"downloaded",
":",
"return",
"boolean"
] | python | train |
mgedmin/findimports | findimports.py | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L483-L511 | def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name | [
"def",
"findModuleOfName",
"(",
"self",
",",
"dotted_name",
",",
"level",
",",
"filename",
",",
"extrapath",
"=",
"None",
")",
":",
"if",
"dotted_name",
".",
"endswith",
"(",
"'.*'",
")",
":",
"return",
"dotted_name",
"[",
":",
"-",
"2",
"]",
"name",
"=",
"dotted_name",
"# extrapath is None only in a couple of test cases; in real life it's",
"# always present",
"if",
"level",
"and",
"level",
">",
"1",
"and",
"extrapath",
":",
"# strip trailing path bits for each extra level to account for",
"# relative imports",
"# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)",
"# from .. import X has level == 2 and one trailing path component must go",
"# from ... import X has level == 3 and two trailing path components must go",
"extrapath",
"=",
"extrapath",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"level",
"-=",
"1",
"extrapath",
"=",
"extrapath",
"[",
"0",
":",
"-",
"level",
"]",
"extrapath",
"=",
"os",
".",
"path",
".",
"sep",
".",
"join",
"(",
"extrapath",
")",
"while",
"name",
":",
"candidate",
"=",
"self",
".",
"isModule",
"(",
"name",
",",
"extrapath",
")",
"if",
"candidate",
":",
"return",
"candidate",
"candidate",
"=",
"self",
".",
"isPackage",
"(",
"name",
",",
"extrapath",
")",
"if",
"candidate",
":",
"return",
"candidate",
"name",
"=",
"name",
"[",
":",
"name",
".",
"rfind",
"(",
"'.'",
")",
"]",
"self",
".",
"warn",
"(",
"dotted_name",
",",
"'%s: could not find %s'",
",",
"filename",
",",
"dotted_name",
")",
"return",
"dotted_name"
] | Given a fully qualified name, find what module contains it. | [
"Given",
"a",
"fully",
"qualified",
"name",
"find",
"what",
"module",
"contains",
"it",
"."
] | python | train |
david-cortes/costsensitive | costsensitive/__init__.py | https://github.com/david-cortes/costsensitive/blob/355fbf20397ce673ce9e22048b6c52dbeeb354cc/costsensitive/__init__.py#L722-L741 | def fit(self, X, C):
"""
Fit one weighted classifier per class
Parameters
----------
X : array (n_samples, n_features)
The data on which to fit a cost-sensitive classifier.
C : array (n_samples, n_classes)
The cost of predicting each label for each observation (more means worse).
"""
X, C = _check_fit_input(X, C)
C = np.asfortranarray(C)
self.nclasses = C.shape[1]
self.classifiers = [deepcopy(self.base_classifier) for i in range(self.nclasses)]
if not self.weight_simple_diff:
C = WeightedAllPairs._calculate_v(self, C)
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._fit)(c, X, C) for c in range(self.nclasses))
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"C",
")",
":",
"X",
",",
"C",
"=",
"_check_fit_input",
"(",
"X",
",",
"C",
")",
"C",
"=",
"np",
".",
"asfortranarray",
"(",
"C",
")",
"self",
".",
"nclasses",
"=",
"C",
".",
"shape",
"[",
"1",
"]",
"self",
".",
"classifiers",
"=",
"[",
"deepcopy",
"(",
"self",
".",
"base_classifier",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"nclasses",
")",
"]",
"if",
"not",
"self",
".",
"weight_simple_diff",
":",
"C",
"=",
"WeightedAllPairs",
".",
"_calculate_v",
"(",
"self",
",",
"C",
")",
"Parallel",
"(",
"n_jobs",
"=",
"self",
".",
"njobs",
",",
"verbose",
"=",
"0",
",",
"require",
"=",
"\"sharedmem\"",
")",
"(",
"delayed",
"(",
"self",
".",
"_fit",
")",
"(",
"c",
",",
"X",
",",
"C",
")",
"for",
"c",
"in",
"range",
"(",
"self",
".",
"nclasses",
")",
")",
"return",
"self"
] | Fit one weighted classifier per class
Parameters
----------
X : array (n_samples, n_features)
The data on which to fit a cost-sensitive classifier.
C : array (n_samples, n_classes)
The cost of predicting each label for each observation (more means worse). | [
"Fit",
"one",
"weighted",
"classifier",
"per",
"class",
"Parameters",
"----------",
"X",
":",
"array",
"(",
"n_samples",
"n_features",
")",
"The",
"data",
"on",
"which",
"to",
"fit",
"a",
"cost",
"-",
"sensitive",
"classifier",
".",
"C",
":",
"array",
"(",
"n_samples",
"n_classes",
")",
"The",
"cost",
"of",
"predicting",
"each",
"label",
"for",
"each",
"observation",
"(",
"more",
"means",
"worse",
")",
"."
] | python | train |
kensho-technologies/graphql-compiler | graphql_compiler/schema_generation/graphql_schema.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/graphql_schema.py#L212-L225 | def _create_union_types_specification(schema_graph, graphql_types, hidden_classes, base_name):
"""Return a function that gives the types in the union type rooted at base_name."""
# When edges point to vertices of type base_name, and base_name is both non-abstract and
# has subclasses, we need to represent the edge endpoint type with a union type based on
# base_name and its subclasses. This function calculates what types that union should include.
def types_spec():
"""Return a list of GraphQL types that this class' corresponding union type includes."""
return [
graphql_types[x]
for x in sorted(list(schema_graph.get_subclass_set(base_name)))
if x not in hidden_classes
]
return types_spec | [
"def",
"_create_union_types_specification",
"(",
"schema_graph",
",",
"graphql_types",
",",
"hidden_classes",
",",
"base_name",
")",
":",
"# When edges point to vertices of type base_name, and base_name is both non-abstract and",
"# has subclasses, we need to represent the edge endpoint type with a union type based on",
"# base_name and its subclasses. This function calculates what types that union should include.",
"def",
"types_spec",
"(",
")",
":",
"\"\"\"Return a list of GraphQL types that this class' corresponding union type includes.\"\"\"",
"return",
"[",
"graphql_types",
"[",
"x",
"]",
"for",
"x",
"in",
"sorted",
"(",
"list",
"(",
"schema_graph",
".",
"get_subclass_set",
"(",
"base_name",
")",
")",
")",
"if",
"x",
"not",
"in",
"hidden_classes",
"]",
"return",
"types_spec"
] | Return a function that gives the types in the union type rooted at base_name. | [
"Return",
"a",
"function",
"that",
"gives",
"the",
"types",
"in",
"the",
"union",
"type",
"rooted",
"at",
"base_name",
"."
] | python | train |
abingham/docopt-subcommands | docopt_subcommands/subcommands.py | https://github.com/abingham/docopt-subcommands/blob/4b5cd75bb8eed01f9405345446ca58e9a29d67ad/docopt_subcommands/subcommands.py#L90-L96 | def command(self, name=None):
"""A decorator to add subcommands.
"""
def decorator(f):
self.add_command(f, name)
return f
return decorator | [
"def",
"command",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"self",
".",
"add_command",
"(",
"f",
",",
"name",
")",
"return",
"f",
"return",
"decorator"
] | A decorator to add subcommands. | [
"A",
"decorator",
"to",
"add",
"subcommands",
"."
] | python | train |
obriencj/python-javatools | javatools/__init__.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L222-L248 | def deref_const(self, index):
"""
returns the dereferenced value from the const pool. For simple
types, this will be a single value indicating the constant.
For more complex types, such as fieldref, methodref, etc, this
will return a tuple.
"""
if not index:
raise IndexError("Requested const 0")
t, v = self.consts[index]
if t in (CONST_Utf8, CONST_Integer, CONST_Float,
CONST_Long, CONST_Double):
return v
elif t in (CONST_Class, CONST_String, CONST_MethodType):
return self.deref_const(v)
elif t in (CONST_Fieldref, CONST_Methodref,
CONST_InterfaceMethodref, CONST_NameAndType,
CONST_ModuleId):
return tuple(self.deref_const(i) for i in v)
else:
raise Unimplemented("Unknown constant pool type %r" % t) | [
"def",
"deref_const",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"index",
":",
"raise",
"IndexError",
"(",
"\"Requested const 0\"",
")",
"t",
",",
"v",
"=",
"self",
".",
"consts",
"[",
"index",
"]",
"if",
"t",
"in",
"(",
"CONST_Utf8",
",",
"CONST_Integer",
",",
"CONST_Float",
",",
"CONST_Long",
",",
"CONST_Double",
")",
":",
"return",
"v",
"elif",
"t",
"in",
"(",
"CONST_Class",
",",
"CONST_String",
",",
"CONST_MethodType",
")",
":",
"return",
"self",
".",
"deref_const",
"(",
"v",
")",
"elif",
"t",
"in",
"(",
"CONST_Fieldref",
",",
"CONST_Methodref",
",",
"CONST_InterfaceMethodref",
",",
"CONST_NameAndType",
",",
"CONST_ModuleId",
")",
":",
"return",
"tuple",
"(",
"self",
".",
"deref_const",
"(",
"i",
")",
"for",
"i",
"in",
"v",
")",
"else",
":",
"raise",
"Unimplemented",
"(",
"\"Unknown constant pool type %r\"",
"%",
"t",
")"
] | returns the dereferenced value from the const pool. For simple
types, this will be a single value indicating the constant.
For more complex types, such as fieldref, methodref, etc, this
will return a tuple. | [
"returns",
"the",
"dereferenced",
"value",
"from",
"the",
"const",
"pool",
".",
"For",
"simple",
"types",
"this",
"will",
"be",
"a",
"single",
"value",
"indicating",
"the",
"constant",
".",
"For",
"more",
"complex",
"types",
"such",
"as",
"fieldref",
"methodref",
"etc",
"this",
"will",
"return",
"a",
"tuple",
"."
] | python | train |
ilevkivskyi/typing_inspect | typing_inspect.py | https://github.com/ilevkivskyi/typing_inspect/blob/fd81278cc440b6003f8298bcb22d5bc0f82ee3cd/typing_inspect.py#L66-L90 | def is_callable_type(tp):
"""Test if the type is a generic callable type, including subclasses
excluding non-generic types and callables.
Examples::
is_callable_type(int) == False
is_callable_type(type) == False
is_callable_type(Callable) == True
is_callable_type(Callable[..., int]) == True
is_callable_type(Callable[[int, int], Iterable[str]]) == True
class MyClass(Callable[[int], int]):
...
is_callable_type(MyClass) == True
For more general tests use callable(), for more precise test
(excluding subclasses) use::
get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7
"""
if NEW_TYPING:
return (tp is Callable or isinstance(tp, _GenericAlias) and
tp.__origin__ is collections.abc.Callable or
isinstance(tp, type) and issubclass(tp, Generic) and
issubclass(tp, collections.abc.Callable))
return type(tp) is CallableMeta | [
"def",
"is_callable_type",
"(",
"tp",
")",
":",
"if",
"NEW_TYPING",
":",
"return",
"(",
"tp",
"is",
"Callable",
"or",
"isinstance",
"(",
"tp",
",",
"_GenericAlias",
")",
"and",
"tp",
".",
"__origin__",
"is",
"collections",
".",
"abc",
".",
"Callable",
"or",
"isinstance",
"(",
"tp",
",",
"type",
")",
"and",
"issubclass",
"(",
"tp",
",",
"Generic",
")",
"and",
"issubclass",
"(",
"tp",
",",
"collections",
".",
"abc",
".",
"Callable",
")",
")",
"return",
"type",
"(",
"tp",
")",
"is",
"CallableMeta"
] | Test if the type is a generic callable type, including subclasses
excluding non-generic types and callables.
Examples::
is_callable_type(int) == False
is_callable_type(type) == False
is_callable_type(Callable) == True
is_callable_type(Callable[..., int]) == True
is_callable_type(Callable[[int, int], Iterable[str]]) == True
class MyClass(Callable[[int], int]):
...
is_callable_type(MyClass) == True
For more general tests use callable(), for more precise test
(excluding subclasses) use::
get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7 | [
"Test",
"if",
"the",
"type",
"is",
"a",
"generic",
"callable",
"type",
"including",
"subclasses",
"excluding",
"non",
"-",
"generic",
"types",
"and",
"callables",
".",
"Examples",
"::"
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/compare_comply_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L3444-L3451 | def _from_dict(cls, _dict):
"""Initialize a KeyValuePair object from a json dictionary."""
args = {}
if 'key' in _dict:
args['key'] = Key._from_dict(_dict.get('key'))
if 'value' in _dict:
args['value'] = Value._from_dict(_dict.get('value'))
return cls(**args) | [
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'key'",
"in",
"_dict",
":",
"args",
"[",
"'key'",
"]",
"=",
"Key",
".",
"_from_dict",
"(",
"_dict",
".",
"get",
"(",
"'key'",
")",
")",
"if",
"'value'",
"in",
"_dict",
":",
"args",
"[",
"'value'",
"]",
"=",
"Value",
".",
"_from_dict",
"(",
"_dict",
".",
"get",
"(",
"'value'",
")",
")",
"return",
"cls",
"(",
"*",
"*",
"args",
")"
] | Initialize a KeyValuePair object from a json dictionary. | [
"Initialize",
"a",
"KeyValuePair",
"object",
"from",
"a",
"json",
"dictionary",
"."
] | python | train |
angr/angr | angr/state_plugins/gdb.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/gdb.py#L33-L49 | def set_stack(self, stack_dump, stack_top):
"""
Stack dump is a dump of the stack from gdb, i.e. the result of the following gdb command :
``dump binary memory [stack_dump] [begin_addr] [end_addr]``
We set the stack to the same addresses as the gdb session to avoid pointers corruption.
:param stack_dump: The dump file.
:param stack_top: The address of the top of the stack in the gdb session.
"""
data = self._read_data(stack_dump)
self.real_stack_top = stack_top
addr = stack_top - len(data) # Address of the bottom of the stack
l.info("Setting stack from 0x%x up to %#x", addr, stack_top)
#FIXME: we should probably make we don't overwrite other stuff loaded there
self._write(addr, data) | [
"def",
"set_stack",
"(",
"self",
",",
"stack_dump",
",",
"stack_top",
")",
":",
"data",
"=",
"self",
".",
"_read_data",
"(",
"stack_dump",
")",
"self",
".",
"real_stack_top",
"=",
"stack_top",
"addr",
"=",
"stack_top",
"-",
"len",
"(",
"data",
")",
"# Address of the bottom of the stack",
"l",
".",
"info",
"(",
"\"Setting stack from 0x%x up to %#x\"",
",",
"addr",
",",
"stack_top",
")",
"#FIXME: we should probably make we don't overwrite other stuff loaded there",
"self",
".",
"_write",
"(",
"addr",
",",
"data",
")"
] | Stack dump is a dump of the stack from gdb, i.e. the result of the following gdb command :
``dump binary memory [stack_dump] [begin_addr] [end_addr]``
We set the stack to the same addresses as the gdb session to avoid pointers corruption.
:param stack_dump: The dump file.
:param stack_top: The address of the top of the stack in the gdb session. | [
"Stack",
"dump",
"is",
"a",
"dump",
"of",
"the",
"stack",
"from",
"gdb",
"i",
".",
"e",
".",
"the",
"result",
"of",
"the",
"following",
"gdb",
"command",
":"
] | python | train |
sbg/sevenbridges-python | sevenbridges/models/project.py | https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/project.py#L251-L276 | def add_member_email(self, email, permissions=None):
"""
Add a member to the project using member email.
:param email: Member email.
:param permissions: Permissions dictionary.
:return: Member object.
"""
data = {'email': email}
if isinstance(permissions, dict):
data.update({
'permissions': permissions
})
extra = {
'resource': self.__class__.__name__,
'query': {
'id': self.id,
'data': data,
}
}
logger.info('Adding member using email', extra=extra)
response = self._api.post(
url=self._URL['members_query'].format(id=self.id), data=data)
member_data = response.json()
return Member(api=self._api, **member_data) | [
"def",
"add_member_email",
"(",
"self",
",",
"email",
",",
"permissions",
"=",
"None",
")",
":",
"data",
"=",
"{",
"'email'",
":",
"email",
"}",
"if",
"isinstance",
"(",
"permissions",
",",
"dict",
")",
":",
"data",
".",
"update",
"(",
"{",
"'permissions'",
":",
"permissions",
"}",
")",
"extra",
"=",
"{",
"'resource'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"'query'",
":",
"{",
"'id'",
":",
"self",
".",
"id",
",",
"'data'",
":",
"data",
",",
"}",
"}",
"logger",
".",
"info",
"(",
"'Adding member using email'",
",",
"extra",
"=",
"extra",
")",
"response",
"=",
"self",
".",
"_api",
".",
"post",
"(",
"url",
"=",
"self",
".",
"_URL",
"[",
"'members_query'",
"]",
".",
"format",
"(",
"id",
"=",
"self",
".",
"id",
")",
",",
"data",
"=",
"data",
")",
"member_data",
"=",
"response",
".",
"json",
"(",
")",
"return",
"Member",
"(",
"api",
"=",
"self",
".",
"_api",
",",
"*",
"*",
"member_data",
")"
] | Add a member to the project using member email.
:param email: Member email.
:param permissions: Permissions dictionary.
:return: Member object. | [
"Add",
"a",
"member",
"to",
"the",
"project",
"using",
"member",
"email",
".",
":",
"param",
"email",
":",
"Member",
"email",
".",
":",
"param",
"permissions",
":",
"Permissions",
"dictionary",
".",
":",
"return",
":",
"Member",
"object",
"."
] | python | train |
FlaskGuys/Flask-Imagine | flask_imagine/filters/rotate.py | https://github.com/FlaskGuys/Flask-Imagine/blob/f79c6517ecb5480b63a2b3b8554edb6e2ac8be8c/flask_imagine/filters/rotate.py#L23-L36 | def apply(self, resource):
"""
Apply filter to resource
:param resource: Image
:return: Image
"""
if not isinstance(resource, Image.Image):
raise ValueError('Unknown resource format')
resource_format = resource.format
resource = resource.rotate(self.angle, expand=True)
resource.format = resource_format
return resource | [
"def",
"apply",
"(",
"self",
",",
"resource",
")",
":",
"if",
"not",
"isinstance",
"(",
"resource",
",",
"Image",
".",
"Image",
")",
":",
"raise",
"ValueError",
"(",
"'Unknown resource format'",
")",
"resource_format",
"=",
"resource",
".",
"format",
"resource",
"=",
"resource",
".",
"rotate",
"(",
"self",
".",
"angle",
",",
"expand",
"=",
"True",
")",
"resource",
".",
"format",
"=",
"resource_format",
"return",
"resource"
] | Apply filter to resource
:param resource: Image
:return: Image | [
"Apply",
"filter",
"to",
"resource",
":",
"param",
"resource",
":",
"Image",
":",
"return",
":",
"Image"
] | python | train |
saltstack/salt | salt/modules/namecheap_domains.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/namecheap_domains.py#L304-L376 | def get_list(list_type=None,
search_term=None,
page=None,
page_size=None,
sort_by=None):
'''
Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type : ALL
One of ``ALL``, ``EXPIRING``, ``EXPIRED``
search_term
Keyword to look for on the domain list
page : 1
Number of result page to return
page_size : 20
Number of domains to be listed per page (minimum: ``10``, maximum:
``100``)
sort_by
One of ``NAME``, ``NAME_DESC``, ``EXPIREDATE``, ``EXPIREDATE_DESC``,
``CREATEDATE``, or ``CREATEDATE_DESC``
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.getList')
if list_type is not None:
if list_type not in ['ALL', 'EXPIRING', 'EXPIRED']:
log.error('Invalid option for list_type')
raise Exception('Invalid option for list_type')
opts['ListType'] = list_type
if search_term is not None:
if len(search_term) > 70:
log.warning('search_term trimmed to first 70 characters')
search_term = search_term[0:70]
opts['SearchTerm'] = search_term
if page is not None:
opts['Page'] = page
if page_size is not None:
if page_size > 100 or page_size < 10:
log.error('Invalid option for page')
raise Exception('Invalid option for page')
opts['PageSize'] = page_size
if sort_by is not None:
if sort_by not in ['NAME', 'NAME_DESC', 'EXPIREDATE', 'EXPIREDATE_DESC', 'CREATEDATE', 'CREATEDATE_DESC']:
log.error('Invalid option for sort_by')
raise Exception('Invalid option for sort_by')
opts['SortBy'] = sort_by
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return []
domainresult = response_xml.getElementsByTagName("DomainGetListResult")[0]
domains = []
for d in domainresult.getElementsByTagName("Domain"):
domains.append(salt.utils.namecheap.atts_to_dict(d))
return domains | [
"def",
"get_list",
"(",
"list_type",
"=",
"None",
",",
"search_term",
"=",
"None",
",",
"page",
"=",
"None",
",",
"page_size",
"=",
"None",
",",
"sort_by",
"=",
"None",
")",
":",
"opts",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"get_opts",
"(",
"'namecheap.domains.getList'",
")",
"if",
"list_type",
"is",
"not",
"None",
":",
"if",
"list_type",
"not",
"in",
"[",
"'ALL'",
",",
"'EXPIRING'",
",",
"'EXPIRED'",
"]",
":",
"log",
".",
"error",
"(",
"'Invalid option for list_type'",
")",
"raise",
"Exception",
"(",
"'Invalid option for list_type'",
")",
"opts",
"[",
"'ListType'",
"]",
"=",
"list_type",
"if",
"search_term",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"search_term",
")",
">",
"70",
":",
"log",
".",
"warning",
"(",
"'search_term trimmed to first 70 characters'",
")",
"search_term",
"=",
"search_term",
"[",
"0",
":",
"70",
"]",
"opts",
"[",
"'SearchTerm'",
"]",
"=",
"search_term",
"if",
"page",
"is",
"not",
"None",
":",
"opts",
"[",
"'Page'",
"]",
"=",
"page",
"if",
"page_size",
"is",
"not",
"None",
":",
"if",
"page_size",
">",
"100",
"or",
"page_size",
"<",
"10",
":",
"log",
".",
"error",
"(",
"'Invalid option for page'",
")",
"raise",
"Exception",
"(",
"'Invalid option for page'",
")",
"opts",
"[",
"'PageSize'",
"]",
"=",
"page_size",
"if",
"sort_by",
"is",
"not",
"None",
":",
"if",
"sort_by",
"not",
"in",
"[",
"'NAME'",
",",
"'NAME_DESC'",
",",
"'EXPIREDATE'",
",",
"'EXPIREDATE_DESC'",
",",
"'CREATEDATE'",
",",
"'CREATEDATE_DESC'",
"]",
":",
"log",
".",
"error",
"(",
"'Invalid option for sort_by'",
")",
"raise",
"Exception",
"(",
"'Invalid option for sort_by'",
")",
"opts",
"[",
"'SortBy'",
"]",
"=",
"sort_by",
"response_xml",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"get_request",
"(",
"opts",
")",
"if",
"response_xml",
"is",
"None",
":",
"return",
"[",
"]",
"domainresult",
"=",
"response_xml",
".",
"getElementsByTagName",
"(",
"\"DomainGetListResult\"",
")",
"[",
"0",
"]",
"domains",
"=",
"[",
"]",
"for",
"d",
"in",
"domainresult",
".",
"getElementsByTagName",
"(",
"\"Domain\"",
")",
":",
"domains",
".",
"append",
"(",
"salt",
".",
"utils",
".",
"namecheap",
".",
"atts_to_dict",
"(",
"d",
")",
")",
"return",
"domains"
] | Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type : ALL
One of ``ALL``, ``EXPIRING``, ``EXPIRED``
search_term
Keyword to look for on the domain list
page : 1
Number of result page to return
page_size : 20
Number of domains to be listed per page (minimum: ``10``, maximum:
``100``)
sort_by
One of ``NAME``, ``NAME_DESC``, ``EXPIREDATE``, ``EXPIREDATE_DESC``,
``CREATEDATE``, or ``CREATEDATE_DESC``
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list | [
"Returns",
"a",
"list",
"of",
"domains",
"for",
"the",
"particular",
"user",
"as",
"a",
"list",
"of",
"objects",
"offset",
"by",
"page",
"length",
"of",
"page_size"
] | python | train |
f3at/feat | src/feat/models/response.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/response.py#L42-L48 | def deleted(message):
"""Create a Deleted response builder with specified message."""
def deleted(value, _context, **_params):
return Deleted(value, message)
return deleted | [
"def",
"deleted",
"(",
"message",
")",
":",
"def",
"deleted",
"(",
"value",
",",
"_context",
",",
"*",
"*",
"_params",
")",
":",
"return",
"Deleted",
"(",
"value",
",",
"message",
")",
"return",
"deleted"
] | Create a Deleted response builder with specified message. | [
"Create",
"a",
"Deleted",
"response",
"builder",
"with",
"specified",
"message",
"."
] | python | train |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L227-L240 | def is_tensor_on_canonical_device(self, tensor_name):
"""Whether the tensor is on the first (canonical) device.
Tensors not assigned to a device are assumed to be on all devices, including
the canonical device.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor is on the first device.
"""
device = self.get_tensor_device(tensor_name)
return not device or device == self.canonical_device | [
"def",
"is_tensor_on_canonical_device",
"(",
"self",
",",
"tensor_name",
")",
":",
"device",
"=",
"self",
".",
"get_tensor_device",
"(",
"tensor_name",
")",
"return",
"not",
"device",
"or",
"device",
"==",
"self",
".",
"canonical_device"
] | Whether the tensor is on the first (canonical) device.
Tensors not assigned to a device are assumed to be on all devices, including
the canonical device.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor is on the first device. | [
"Whether",
"the",
"tensor",
"is",
"on",
"the",
"first",
"(",
"canonical",
")",
"device",
"."
] | python | train |
davebridges/mousedb | mousedb/animal/views.py | https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/animal/views.py#L415-L446 | def multiple_pups(request):
"""This view is used to enter multiple animals at the same time.
It will generate a form containing animal information and a number of mice. It is intended to create several identical animals with the same attributes.
"""
if request.method == "POST":
form = MultipleAnimalForm(request.POST)
if form.is_valid():
count = form.cleaned_data['count']
for i in range(count):
animal = Animal(
Strain = form.cleaned_data['Strain'],
Background = form.cleaned_data['Background'],
Breeding = form.cleaned_data['Breeding'],
Cage = form.cleaned_data['Cage'],
Rack = form.cleaned_data['Rack'],
Rack_Position = form.cleaned_data['Rack_Position'],
Genotype = form.cleaned_data['Genotype'],
Gender = form.cleaned_data['Gender'],
Born = form.cleaned_data['Born'],
Weaned = form.cleaned_data['Weaned'],
Backcross = form.cleaned_data['Backcross'],
Generation = form.cleaned_data['Generation'],
Father = form.cleaned_data['Father'],
Mother = form.cleaned_data['Mother'],
Markings = form.cleaned_data['Markings'],
Notes = form.cleaned_data['Notes'])
animal.save()
return HttpResponseRedirect( reverse('strain-list') )
else:
form = MultipleAnimalForm()
return render(request, "animal_multiple_form.html", {"form":form,}) | [
"def",
"multiple_pups",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"form",
"=",
"MultipleAnimalForm",
"(",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"count",
"=",
"form",
".",
"cleaned_data",
"[",
"'count'",
"]",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"animal",
"=",
"Animal",
"(",
"Strain",
"=",
"form",
".",
"cleaned_data",
"[",
"'Strain'",
"]",
",",
"Background",
"=",
"form",
".",
"cleaned_data",
"[",
"'Background'",
"]",
",",
"Breeding",
"=",
"form",
".",
"cleaned_data",
"[",
"'Breeding'",
"]",
",",
"Cage",
"=",
"form",
".",
"cleaned_data",
"[",
"'Cage'",
"]",
",",
"Rack",
"=",
"form",
".",
"cleaned_data",
"[",
"'Rack'",
"]",
",",
"Rack_Position",
"=",
"form",
".",
"cleaned_data",
"[",
"'Rack_Position'",
"]",
",",
"Genotype",
"=",
"form",
".",
"cleaned_data",
"[",
"'Genotype'",
"]",
",",
"Gender",
"=",
"form",
".",
"cleaned_data",
"[",
"'Gender'",
"]",
",",
"Born",
"=",
"form",
".",
"cleaned_data",
"[",
"'Born'",
"]",
",",
"Weaned",
"=",
"form",
".",
"cleaned_data",
"[",
"'Weaned'",
"]",
",",
"Backcross",
"=",
"form",
".",
"cleaned_data",
"[",
"'Backcross'",
"]",
",",
"Generation",
"=",
"form",
".",
"cleaned_data",
"[",
"'Generation'",
"]",
",",
"Father",
"=",
"form",
".",
"cleaned_data",
"[",
"'Father'",
"]",
",",
"Mother",
"=",
"form",
".",
"cleaned_data",
"[",
"'Mother'",
"]",
",",
"Markings",
"=",
"form",
".",
"cleaned_data",
"[",
"'Markings'",
"]",
",",
"Notes",
"=",
"form",
".",
"cleaned_data",
"[",
"'Notes'",
"]",
")",
"animal",
".",
"save",
"(",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'strain-list'",
")",
")",
"else",
":",
"form",
"=",
"MultipleAnimalForm",
"(",
")",
"return",
"render",
"(",
"request",
",",
"\"animal_multiple_form.html\"",
",",
"{",
"\"form\"",
":",
"form",
",",
"}",
")"
] | This view is used to enter multiple animals at the same time.
It will generate a form containing animal information and a number of mice. It is intended to create several identical animals with the same attributes. | [
"This",
"view",
"is",
"used",
"to",
"enter",
"multiple",
"animals",
"at",
"the",
"same",
"time",
".",
"It",
"will",
"generate",
"a",
"form",
"containing",
"animal",
"information",
"and",
"a",
"number",
"of",
"mice",
".",
"It",
"is",
"intended",
"to",
"create",
"several",
"identical",
"animals",
"with",
"the",
"same",
"attributes",
"."
] | python | train |
dankelley/nota | nota/notaclass.py | https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L756-L797 | def find_by_hash(self, hash=None, book=-1):
'''Search notes for a given (possibly abbreviated) hash'''
if hash:
self.fyi("nota.find_by_hash() with abbreviated hash %s; book=%s" % (hash, book))
try:
if book < 0:
rows = self.cur.execute("SELECT noteId, hash FROM note WHERE book > 0;").fetchall()
else:
rows = self.cur.execute("SELECT noteId, hash FROM note WHERE book=?;", [book]).fetchall()
except:
self.error("nota.find_by_hash() cannot look up note list")
# Possibly save time by finding IDs first.
noteIds = []
if hash:
l = len(hash)
for r in rows:
if hash == r[1][0:l]:
noteIds.append((r[0],))
else:
for r in rows:
noteIds.append((r[0],))
self.fyi("noteIds: %s" % noteIds)
rval = []
for n in noteIds:
try:
note = self.cur.execute("SELECT noteId, authorId, date, title, content, due, privacy, modified, hash, book FROM note WHERE noteId=?;", n).fetchone()
except:
self.warning("Problem extracting note %s from database" % n)
next
if note:
date = note[2]
due = note[5]
privacy = note[6]
keywordIds = []
keywordIds.extend(self.con.execute("SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;", n))
keywords = []
for k in keywordIds:
keywords.append(self.cur.execute("SELECT keyword FROM keyword WHERE keywordId=?;", k).fetchone()[0])
rval.append({"noteId":note[0], "title":note[3], "keywords":keywords,
"content":note[4], "due":note[5], "privacy":note[6],
"date":note[2], "modified":note[7], "hash":note[8], "book":note[9]})
return rval | [
"def",
"find_by_hash",
"(",
"self",
",",
"hash",
"=",
"None",
",",
"book",
"=",
"-",
"1",
")",
":",
"if",
"hash",
":",
"self",
".",
"fyi",
"(",
"\"nota.find_by_hash() with abbreviated hash %s; book=%s\"",
"%",
"(",
"hash",
",",
"book",
")",
")",
"try",
":",
"if",
"book",
"<",
"0",
":",
"rows",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT noteId, hash FROM note WHERE book > 0;\"",
")",
".",
"fetchall",
"(",
")",
"else",
":",
"rows",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT noteId, hash FROM note WHERE book=?;\"",
",",
"[",
"book",
"]",
")",
".",
"fetchall",
"(",
")",
"except",
":",
"self",
".",
"error",
"(",
"\"nota.find_by_hash() cannot look up note list\"",
")",
"# Possibly save time by finding IDs first.",
"noteIds",
"=",
"[",
"]",
"if",
"hash",
":",
"l",
"=",
"len",
"(",
"hash",
")",
"for",
"r",
"in",
"rows",
":",
"if",
"hash",
"==",
"r",
"[",
"1",
"]",
"[",
"0",
":",
"l",
"]",
":",
"noteIds",
".",
"append",
"(",
"(",
"r",
"[",
"0",
"]",
",",
")",
")",
"else",
":",
"for",
"r",
"in",
"rows",
":",
"noteIds",
".",
"append",
"(",
"(",
"r",
"[",
"0",
"]",
",",
")",
")",
"self",
".",
"fyi",
"(",
"\"noteIds: %s\"",
"%",
"noteIds",
")",
"rval",
"=",
"[",
"]",
"for",
"n",
"in",
"noteIds",
":",
"try",
":",
"note",
"=",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT noteId, authorId, date, title, content, due, privacy, modified, hash, book FROM note WHERE noteId=?;\"",
",",
"n",
")",
".",
"fetchone",
"(",
")",
"except",
":",
"self",
".",
"warning",
"(",
"\"Problem extracting note %s from database\"",
"%",
"n",
")",
"next",
"if",
"note",
":",
"date",
"=",
"note",
"[",
"2",
"]",
"due",
"=",
"note",
"[",
"5",
"]",
"privacy",
"=",
"note",
"[",
"6",
"]",
"keywordIds",
"=",
"[",
"]",
"keywordIds",
".",
"extend",
"(",
"self",
".",
"con",
".",
"execute",
"(",
"\"SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;\"",
",",
"n",
")",
")",
"keywords",
"=",
"[",
"]",
"for",
"k",
"in",
"keywordIds",
":",
"keywords",
".",
"append",
"(",
"self",
".",
"cur",
".",
"execute",
"(",
"\"SELECT keyword FROM keyword WHERE keywordId=?;\"",
",",
"k",
")",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
")",
"rval",
".",
"append",
"(",
"{",
"\"noteId\"",
":",
"note",
"[",
"0",
"]",
",",
"\"title\"",
":",
"note",
"[",
"3",
"]",
",",
"\"keywords\"",
":",
"keywords",
",",
"\"content\"",
":",
"note",
"[",
"4",
"]",
",",
"\"due\"",
":",
"note",
"[",
"5",
"]",
",",
"\"privacy\"",
":",
"note",
"[",
"6",
"]",
",",
"\"date\"",
":",
"note",
"[",
"2",
"]",
",",
"\"modified\"",
":",
"note",
"[",
"7",
"]",
",",
"\"hash\"",
":",
"note",
"[",
"8",
"]",
",",
"\"book\"",
":",
"note",
"[",
"9",
"]",
"}",
")",
"return",
"rval"
] | Search notes for a given (possibly abbreviated) hash | [
"Search",
"notes",
"for",
"a",
"given",
"(",
"possibly",
"abbreviated",
")",
"hash"
] | python | train |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L596-L642 | def _handle_message_for_stream(self, stream_transport, message, timeout):
"""Handle an incoming message, check if it's for the given stream.
If the message is not for the stream, then add it to the appropriate
message queue.
Args:
stream_transport: AdbStreamTransport currently waiting on a message.
message: Message to check and handle.
timeout: Timeout to use for the operation, should be an instance of
timeouts.PolledTimeout.
Returns:
The message read if it was for this stream, None otherwise.
Raises:
AdbProtocolError: If we receive an unexpected message type.
"""
if message.command not in ('OKAY', 'CLSE', 'WRTE'):
raise usb_exceptions.AdbProtocolError(
'%s received unexpected message: %s', self, message)
if message.arg1 == stream_transport.local_id:
# Ack writes immediately.
if message.command == 'WRTE':
# Make sure we don't get a WRTE before an OKAY/CLSE message.
if not stream_transport.remote_id:
raise usb_exceptions.AdbProtocolError(
'%s received WRTE before OKAY/CLSE: %s',
stream_transport, message)
self.transport.write_message(adb_message.AdbMessage(
'OKAY', stream_transport.local_id, stream_transport.remote_id),
timeout)
elif message.command == 'CLSE':
self.close_stream_transport(stream_transport, timeout)
return message
else:
# Message was not for this stream, add it to the right stream's queue.
with self._stream_transport_map_lock:
dest_transport = self._stream_transport_map.get(message.arg1)
if dest_transport:
if message.command == 'CLSE':
self.close_stream_transport(dest_transport, timeout)
dest_transport.enqueue_message(message, timeout)
else:
_LOG.warning('Received message for unknown local-id: %s', message) | [
"def",
"_handle_message_for_stream",
"(",
"self",
",",
"stream_transport",
",",
"message",
",",
"timeout",
")",
":",
"if",
"message",
".",
"command",
"not",
"in",
"(",
"'OKAY'",
",",
"'CLSE'",
",",
"'WRTE'",
")",
":",
"raise",
"usb_exceptions",
".",
"AdbProtocolError",
"(",
"'%s received unexpected message: %s'",
",",
"self",
",",
"message",
")",
"if",
"message",
".",
"arg1",
"==",
"stream_transport",
".",
"local_id",
":",
"# Ack writes immediately.",
"if",
"message",
".",
"command",
"==",
"'WRTE'",
":",
"# Make sure we don't get a WRTE before an OKAY/CLSE message.",
"if",
"not",
"stream_transport",
".",
"remote_id",
":",
"raise",
"usb_exceptions",
".",
"AdbProtocolError",
"(",
"'%s received WRTE before OKAY/CLSE: %s'",
",",
"stream_transport",
",",
"message",
")",
"self",
".",
"transport",
".",
"write_message",
"(",
"adb_message",
".",
"AdbMessage",
"(",
"'OKAY'",
",",
"stream_transport",
".",
"local_id",
",",
"stream_transport",
".",
"remote_id",
")",
",",
"timeout",
")",
"elif",
"message",
".",
"command",
"==",
"'CLSE'",
":",
"self",
".",
"close_stream_transport",
"(",
"stream_transport",
",",
"timeout",
")",
"return",
"message",
"else",
":",
"# Message was not for this stream, add it to the right stream's queue.",
"with",
"self",
".",
"_stream_transport_map_lock",
":",
"dest_transport",
"=",
"self",
".",
"_stream_transport_map",
".",
"get",
"(",
"message",
".",
"arg1",
")",
"if",
"dest_transport",
":",
"if",
"message",
".",
"command",
"==",
"'CLSE'",
":",
"self",
".",
"close_stream_transport",
"(",
"dest_transport",
",",
"timeout",
")",
"dest_transport",
".",
"enqueue_message",
"(",
"message",
",",
"timeout",
")",
"else",
":",
"_LOG",
".",
"warning",
"(",
"'Received message for unknown local-id: %s'",
",",
"message",
")"
] | Handle an incoming message, check if it's for the given stream.
If the message is not for the stream, then add it to the appropriate
message queue.
Args:
stream_transport: AdbStreamTransport currently waiting on a message.
message: Message to check and handle.
timeout: Timeout to use for the operation, should be an instance of
timeouts.PolledTimeout.
Returns:
The message read if it was for this stream, None otherwise.
Raises:
AdbProtocolError: If we receive an unexpected message type. | [
"Handle",
"an",
"incoming",
"message",
"check",
"if",
"it",
"s",
"for",
"the",
"given",
"stream",
"."
] | python | train |
Erotemic/utool | utool/util_inspect.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_inspect.py#L1227-L1247 | def inherit_kwargs(inherit_func):
"""
TODO move to util_decor
inherit_func = inspect_pdfs
func = encoder.visualize.im_func
"""
import utool as ut
keys, is_arbitrary = ut.get_kwargs(inherit_func)
if is_arbitrary:
keys += ['**kwargs']
kwargs_append = '\n'.join(keys)
#from six.moves import builtins
#builtins.print(kwargs_block)
def _wrp(func):
if func.__doc__ is None:
func.__doc__ = ''
# TODO append to kwargs block if it exists
kwargs_block = 'Kwargs:\n' + ut.indent(kwargs_append)
func.__doc__ += kwargs_block
return func
return _wrp | [
"def",
"inherit_kwargs",
"(",
"inherit_func",
")",
":",
"import",
"utool",
"as",
"ut",
"keys",
",",
"is_arbitrary",
"=",
"ut",
".",
"get_kwargs",
"(",
"inherit_func",
")",
"if",
"is_arbitrary",
":",
"keys",
"+=",
"[",
"'**kwargs'",
"]",
"kwargs_append",
"=",
"'\\n'",
".",
"join",
"(",
"keys",
")",
"#from six.moves import builtins",
"#builtins.print(kwargs_block)",
"def",
"_wrp",
"(",
"func",
")",
":",
"if",
"func",
".",
"__doc__",
"is",
"None",
":",
"func",
".",
"__doc__",
"=",
"''",
"# TODO append to kwargs block if it exists",
"kwargs_block",
"=",
"'Kwargs:\\n'",
"+",
"ut",
".",
"indent",
"(",
"kwargs_append",
")",
"func",
".",
"__doc__",
"+=",
"kwargs_block",
"return",
"func",
"return",
"_wrp"
] | TODO move to util_decor
inherit_func = inspect_pdfs
func = encoder.visualize.im_func | [
"TODO",
"move",
"to",
"util_decor",
"inherit_func",
"=",
"inspect_pdfs",
"func",
"=",
"encoder",
".",
"visualize",
".",
"im_func"
] | python | train |
Guake/guake | guake/guake_app.py | https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/guake_app.py#L1125-L1142 | def execute_hook(self, event_name):
"""Execute shell commands related to current event_name"""
hook = self.settings.hooks.get_string('{!s}'.format(event_name))
if hook is not None and hook != "":
hook = hook.split()
try:
subprocess.Popen(hook)
except OSError as oserr:
if oserr.errno == 8:
log.error("Hook execution failed! Check shebang at first line of %s!", hook)
log.debug(traceback.format_exc())
else:
log.error(str(oserr))
except Exception as e:
log.error("hook execution failed! %s", e)
log.debug(traceback.format_exc())
else:
log.debug("hook on event %s has been executed", event_name) | [
"def",
"execute_hook",
"(",
"self",
",",
"event_name",
")",
":",
"hook",
"=",
"self",
".",
"settings",
".",
"hooks",
".",
"get_string",
"(",
"'{!s}'",
".",
"format",
"(",
"event_name",
")",
")",
"if",
"hook",
"is",
"not",
"None",
"and",
"hook",
"!=",
"\"\"",
":",
"hook",
"=",
"hook",
".",
"split",
"(",
")",
"try",
":",
"subprocess",
".",
"Popen",
"(",
"hook",
")",
"except",
"OSError",
"as",
"oserr",
":",
"if",
"oserr",
".",
"errno",
"==",
"8",
":",
"log",
".",
"error",
"(",
"\"Hook execution failed! Check shebang at first line of %s!\"",
",",
"hook",
")",
"log",
".",
"debug",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"else",
":",
"log",
".",
"error",
"(",
"str",
"(",
"oserr",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"hook execution failed! %s\"",
",",
"e",
")",
"log",
".",
"debug",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"else",
":",
"log",
".",
"debug",
"(",
"\"hook on event %s has been executed\"",
",",
"event_name",
")"
] | Execute shell commands related to current event_name | [
"Execute",
"shell",
"commands",
"related",
"to",
"current",
"event_name"
] | python | train |
Unidata/siphon | siphon/http_util.py | https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L277-L297 | def time(self, time):
"""Add a request for a specific time to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
time : datetime.datetime
The time to request
Returns
-------
self : DataQuery
Returns self for chaining calls
"""
self._set_query(self.time_query, time=self._format_time(time))
return self | [
"def",
"time",
"(",
"self",
",",
"time",
")",
":",
"self",
".",
"_set_query",
"(",
"self",
".",
"time_query",
",",
"time",
"=",
"self",
".",
"_format_time",
"(",
"time",
")",
")",
"return",
"self"
] | Add a request for a specific time to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
time : datetime.datetime
The time to request
Returns
-------
self : DataQuery
Returns self for chaining calls | [
"Add",
"a",
"request",
"for",
"a",
"specific",
"time",
"to",
"the",
"query",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/click/decorators.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/decorators.py#L12-L18 | def pass_context(f):
"""Marks a callback as wanting to receive the current context
object as first argument.
"""
def new_func(*args, **kwargs):
return f(get_current_context(), *args, **kwargs)
return update_wrapper(new_func, f) | [
"def",
"pass_context",
"(",
"f",
")",
":",
"def",
"new_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"f",
"(",
"get_current_context",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"update_wrapper",
"(",
"new_func",
",",
"f",
")"
] | Marks a callback as wanting to receive the current context
object as first argument. | [
"Marks",
"a",
"callback",
"as",
"wanting",
"to",
"receive",
"the",
"current",
"context",
"object",
"as",
"first",
"argument",
"."
] | python | train |
diamondman/proteusisc | proteusisc/command_queue.py | https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/command_queue.py#L77-L244 | def _compile_device_specific_prims(self, debug=False,
stages=None, stagenames=None):
"""Using the data stored in the CommandQueue, Extract and align compatible sequences of Primitives and compile/optimize the Primitives down into a stream of Level 2 device agnostic primitives.
BACKGROUND:
Device Specific primitives present a special opportunity for
optimization. Many JTAG systems program one device on the
chain at a time. But because all devices on a JTAG chain are
sent information at once, NO-OP instructions are sent to these
other devices.
When programming multiple devices, Sending these NO-OPS is a
missed opportunity for optimization. Instead of configuring
one device at a time, it is more efficient to collect
instructions for all deices, and align them so multiple
devices can be configured at the same time.
WAT THIS METHOD DOES:
This method takes in a list of Primitives, groups the device
specific primitives by target device, aligns the sequences of
device instructions, and expands the aligned sequences into a
flat list of device agnostic primitives.
Args:
debug: A boolean for if debug information should be generated.
stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True.
stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True.
"""
############### GROUPING BY EXEC BOUNDARIES!################
fences = []
fence = [self[0]]
for p in self[1:]:
if type(fence[0])._layer == type(p)._layer and\
isinstance(fence[0], DeviceTarget) == \
isinstance(p, DeviceTarget):
fence.append(p)
else:
fences.append(fence)
fence = [p]
fences.append(fence)
if debug: #pragma: no cover
formatted_fences = []
for fence in fences:
formatted_fence = [p.snapshot() for p in fence]
formatted_fences.append(formatted_fence)
formatted_fences.append([])
stages.append(formatted_fences[:-1]) #Ignore trailing []
stagenames.append("Fencing off execution boundaries")
############## SPLIT GROUPS BY DEVICE TARGET! ##############
split_fences = []
for fence in fences:
tmp_chains = {}
for p in fence:
k = p._device_index \
if isinstance(p, DeviceTarget) else "chain"
subchain = tmp_chains.setdefault(k, []).append(p)
split_fences.append(list(tmp_chains.values()))
if debug:#pragma: no cover
formatted_split_fences = []
for fence in split_fences:
for group in fence:
formatted_split_fences.append([p.snapshot()
for p in group])
formatted_split_fences.append([])
stages.append(formatted_split_fences[:-1])
stagenames.append("Grouping prims of each boundary by "
"target device")
############## ALIGN SEQUENCES AND PAD FRAMES ##############
#FIRST DEV REQUIRED LINE
grouped_fences = [
FrameSequence(self._chain, *fence).finalize()
for f_i, fence in enumerate(split_fences)
]
if debug:#pragma: no cover
formatted_grouped_fences = []
for fence in grouped_fences:
formatted_grouped_fences += fence.snapshot() + [[]]
stages.append(formatted_grouped_fences[:-1])
stagenames.append("Aligning and combining each group dev "
"prim stream")
################## RECOMBINE FRAME GROUPS ##################
ingested_chain = grouped_fences[0]
for fence in grouped_fences[1:]:
ingested_chain += fence
if debug:#pragma: no cover
stages.append(ingested_chain.snapshot())
stagenames.append("Recombining sanitized exec boundaries")
###################### POST INGESTION ######################
################ Flatten out LV3 Primitives ################
while(any((f._layer == 3 for f in ingested_chain))):
################# COMBINE COMPATIBLE PRIMS #################
ingested_chain = _merge_prims(ingested_chain)
if debug:#pragma: no cover
stages.append(ingested_chain.snapshot())
stagenames.append("Combining compatible lv3 prims.")
################ TRANSLATION TO LOWER LAYER ################
sm = JTAGStateMachine(self._chain._sm.state)
expanded_prims = FrameSequence(self._chain)
for f in ingested_chain:
if f._layer == 3:
expanded_prims += f.expand_macro(sm)
else:
expanded_prims.append(f)
expanded_prims.finalize()
ingested_chain = expanded_prims
if self._fsm is None:
self._fsm = sm
assert self._fsm == sm, "Target %s != Actual %s"%\
(self._fsm.state, sm.state)
if debug:#pragma: no cover
stages.append(ingested_chain.snapshot())
stagenames.append("Expanding lv3 prims")
############## Flatten out Dev LV2 Primitives ##############
while(any((isinstance(f._valid_prim, DeviceTarget)
for f in ingested_chain))):
################# COMBINE COMPATIBLE PRIMS #################
ingested_chain = _merge_prims(ingested_chain)
if debug:#pragma: no cover
stages.append(ingested_chain.snapshot())
stagenames.append("Merging Device Specific Prims")
################ TRANSLATION TO LOWER LAYER ################
sm = JTAGStateMachine(self._chain._sm.state)
expanded_prims = FrameSequence(self._chain)
for f in ingested_chain:
if issubclass(f._prim_type, DeviceTarget):
expanded_prims += f.expand_macro(sm)
else:
f[0].apply_tap_effect(sm)
expanded_prims.append(f)
expanded_prims.finalize()
ingested_chain = expanded_prims
if self._fsm is None:
self._fsm = sm
assert self._fsm == sm, "Target %s != Actual %s"%\
(self._fsm.state, sm.state)
if debug:#pragma: no cover
stages.append(ingested_chain.snapshot())
stagenames.append("Expanding Device Specific Prims")
############ Convert FrameSequence to flat array ###########
flattened_prims = [f._valid_prim for f in ingested_chain]
if debug:#pragma: no cover
stages.append([[p.snapshot() for p in flattened_prims]])
stagenames.append("Converting format to single stream.")
return flattened_prims | [
"def",
"_compile_device_specific_prims",
"(",
"self",
",",
"debug",
"=",
"False",
",",
"stages",
"=",
"None",
",",
"stagenames",
"=",
"None",
")",
":",
"############### GROUPING BY EXEC BOUNDARIES!################",
"fences",
"=",
"[",
"]",
"fence",
"=",
"[",
"self",
"[",
"0",
"]",
"]",
"for",
"p",
"in",
"self",
"[",
"1",
":",
"]",
":",
"if",
"type",
"(",
"fence",
"[",
"0",
"]",
")",
".",
"_layer",
"==",
"type",
"(",
"p",
")",
".",
"_layer",
"and",
"isinstance",
"(",
"fence",
"[",
"0",
"]",
",",
"DeviceTarget",
")",
"==",
"isinstance",
"(",
"p",
",",
"DeviceTarget",
")",
":",
"fence",
".",
"append",
"(",
"p",
")",
"else",
":",
"fences",
".",
"append",
"(",
"fence",
")",
"fence",
"=",
"[",
"p",
"]",
"fences",
".",
"append",
"(",
"fence",
")",
"if",
"debug",
":",
"#pragma: no cover",
"formatted_fences",
"=",
"[",
"]",
"for",
"fence",
"in",
"fences",
":",
"formatted_fence",
"=",
"[",
"p",
".",
"snapshot",
"(",
")",
"for",
"p",
"in",
"fence",
"]",
"formatted_fences",
".",
"append",
"(",
"formatted_fence",
")",
"formatted_fences",
".",
"append",
"(",
"[",
"]",
")",
"stages",
".",
"append",
"(",
"formatted_fences",
"[",
":",
"-",
"1",
"]",
")",
"#Ignore trailing []",
"stagenames",
".",
"append",
"(",
"\"Fencing off execution boundaries\"",
")",
"############## SPLIT GROUPS BY DEVICE TARGET! ##############",
"split_fences",
"=",
"[",
"]",
"for",
"fence",
"in",
"fences",
":",
"tmp_chains",
"=",
"{",
"}",
"for",
"p",
"in",
"fence",
":",
"k",
"=",
"p",
".",
"_device_index",
"if",
"isinstance",
"(",
"p",
",",
"DeviceTarget",
")",
"else",
"\"chain\"",
"subchain",
"=",
"tmp_chains",
".",
"setdefault",
"(",
"k",
",",
"[",
"]",
")",
".",
"append",
"(",
"p",
")",
"split_fences",
".",
"append",
"(",
"list",
"(",
"tmp_chains",
".",
"values",
"(",
")",
")",
")",
"if",
"debug",
":",
"#pragma: no cover",
"formatted_split_fences",
"=",
"[",
"]",
"for",
"fence",
"in",
"split_fences",
":",
"for",
"group",
"in",
"fence",
":",
"formatted_split_fences",
".",
"append",
"(",
"[",
"p",
".",
"snapshot",
"(",
")",
"for",
"p",
"in",
"group",
"]",
")",
"formatted_split_fences",
".",
"append",
"(",
"[",
"]",
")",
"stages",
".",
"append",
"(",
"formatted_split_fences",
"[",
":",
"-",
"1",
"]",
")",
"stagenames",
".",
"append",
"(",
"\"Grouping prims of each boundary by \"",
"\"target device\"",
")",
"############## ALIGN SEQUENCES AND PAD FRAMES ##############",
"#FIRST DEV REQUIRED LINE",
"grouped_fences",
"=",
"[",
"FrameSequence",
"(",
"self",
".",
"_chain",
",",
"*",
"fence",
")",
".",
"finalize",
"(",
")",
"for",
"f_i",
",",
"fence",
"in",
"enumerate",
"(",
"split_fences",
")",
"]",
"if",
"debug",
":",
"#pragma: no cover",
"formatted_grouped_fences",
"=",
"[",
"]",
"for",
"fence",
"in",
"grouped_fences",
":",
"formatted_grouped_fences",
"+=",
"fence",
".",
"snapshot",
"(",
")",
"+",
"[",
"[",
"]",
"]",
"stages",
".",
"append",
"(",
"formatted_grouped_fences",
"[",
":",
"-",
"1",
"]",
")",
"stagenames",
".",
"append",
"(",
"\"Aligning and combining each group dev \"",
"\"prim stream\"",
")",
"################## RECOMBINE FRAME GROUPS ##################",
"ingested_chain",
"=",
"grouped_fences",
"[",
"0",
"]",
"for",
"fence",
"in",
"grouped_fences",
"[",
"1",
":",
"]",
":",
"ingested_chain",
"+=",
"fence",
"if",
"debug",
":",
"#pragma: no cover",
"stages",
".",
"append",
"(",
"ingested_chain",
".",
"snapshot",
"(",
")",
")",
"stagenames",
".",
"append",
"(",
"\"Recombining sanitized exec boundaries\"",
")",
"###################### POST INGESTION ######################",
"################ Flatten out LV3 Primitives ################",
"while",
"(",
"any",
"(",
"(",
"f",
".",
"_layer",
"==",
"3",
"for",
"f",
"in",
"ingested_chain",
")",
")",
")",
":",
"################# COMBINE COMPATIBLE PRIMS #################",
"ingested_chain",
"=",
"_merge_prims",
"(",
"ingested_chain",
")",
"if",
"debug",
":",
"#pragma: no cover",
"stages",
".",
"append",
"(",
"ingested_chain",
".",
"snapshot",
"(",
")",
")",
"stagenames",
".",
"append",
"(",
"\"Combining compatible lv3 prims.\"",
")",
"################ TRANSLATION TO LOWER LAYER ################",
"sm",
"=",
"JTAGStateMachine",
"(",
"self",
".",
"_chain",
".",
"_sm",
".",
"state",
")",
"expanded_prims",
"=",
"FrameSequence",
"(",
"self",
".",
"_chain",
")",
"for",
"f",
"in",
"ingested_chain",
":",
"if",
"f",
".",
"_layer",
"==",
"3",
":",
"expanded_prims",
"+=",
"f",
".",
"expand_macro",
"(",
"sm",
")",
"else",
":",
"expanded_prims",
".",
"append",
"(",
"f",
")",
"expanded_prims",
".",
"finalize",
"(",
")",
"ingested_chain",
"=",
"expanded_prims",
"if",
"self",
".",
"_fsm",
"is",
"None",
":",
"self",
".",
"_fsm",
"=",
"sm",
"assert",
"self",
".",
"_fsm",
"==",
"sm",
",",
"\"Target %s != Actual %s\"",
"%",
"(",
"self",
".",
"_fsm",
".",
"state",
",",
"sm",
".",
"state",
")",
"if",
"debug",
":",
"#pragma: no cover",
"stages",
".",
"append",
"(",
"ingested_chain",
".",
"snapshot",
"(",
")",
")",
"stagenames",
".",
"append",
"(",
"\"Expanding lv3 prims\"",
")",
"############## Flatten out Dev LV2 Primitives ##############",
"while",
"(",
"any",
"(",
"(",
"isinstance",
"(",
"f",
".",
"_valid_prim",
",",
"DeviceTarget",
")",
"for",
"f",
"in",
"ingested_chain",
")",
")",
")",
":",
"################# COMBINE COMPATIBLE PRIMS #################",
"ingested_chain",
"=",
"_merge_prims",
"(",
"ingested_chain",
")",
"if",
"debug",
":",
"#pragma: no cover",
"stages",
".",
"append",
"(",
"ingested_chain",
".",
"snapshot",
"(",
")",
")",
"stagenames",
".",
"append",
"(",
"\"Merging Device Specific Prims\"",
")",
"################ TRANSLATION TO LOWER LAYER ################",
"sm",
"=",
"JTAGStateMachine",
"(",
"self",
".",
"_chain",
".",
"_sm",
".",
"state",
")",
"expanded_prims",
"=",
"FrameSequence",
"(",
"self",
".",
"_chain",
")",
"for",
"f",
"in",
"ingested_chain",
":",
"if",
"issubclass",
"(",
"f",
".",
"_prim_type",
",",
"DeviceTarget",
")",
":",
"expanded_prims",
"+=",
"f",
".",
"expand_macro",
"(",
"sm",
")",
"else",
":",
"f",
"[",
"0",
"]",
".",
"apply_tap_effect",
"(",
"sm",
")",
"expanded_prims",
".",
"append",
"(",
"f",
")",
"expanded_prims",
".",
"finalize",
"(",
")",
"ingested_chain",
"=",
"expanded_prims",
"if",
"self",
".",
"_fsm",
"is",
"None",
":",
"self",
".",
"_fsm",
"=",
"sm",
"assert",
"self",
".",
"_fsm",
"==",
"sm",
",",
"\"Target %s != Actual %s\"",
"%",
"(",
"self",
".",
"_fsm",
".",
"state",
",",
"sm",
".",
"state",
")",
"if",
"debug",
":",
"#pragma: no cover",
"stages",
".",
"append",
"(",
"ingested_chain",
".",
"snapshot",
"(",
")",
")",
"stagenames",
".",
"append",
"(",
"\"Expanding Device Specific Prims\"",
")",
"############ Convert FrameSequence to flat array ###########",
"flattened_prims",
"=",
"[",
"f",
".",
"_valid_prim",
"for",
"f",
"in",
"ingested_chain",
"]",
"if",
"debug",
":",
"#pragma: no cover",
"stages",
".",
"append",
"(",
"[",
"[",
"p",
".",
"snapshot",
"(",
")",
"for",
"p",
"in",
"flattened_prims",
"]",
"]",
")",
"stagenames",
".",
"append",
"(",
"\"Converting format to single stream.\"",
")",
"return",
"flattened_prims"
] | Using the data stored in the CommandQueue, Extract and align compatible sequences of Primitives and compile/optimize the Primitives down into a stream of Level 2 device agnostic primitives.
BACKGROUND:
Device Specific primitives present a special opportunity for
optimization. Many JTAG systems program one device on the
chain at a time. But because all devices on a JTAG chain are
sent information at once, NO-OP instructions are sent to these
other devices.
When programming multiple devices, Sending these NO-OPS is a
missed opportunity for optimization. Instead of configuring
one device at a time, it is more efficient to collect
instructions for all deices, and align them so multiple
devices can be configured at the same time.
WAT THIS METHOD DOES:
This method takes in a list of Primitives, groups the device
specific primitives by target device, aligns the sequences of
device instructions, and expands the aligned sequences into a
flat list of device agnostic primitives.
Args:
debug: A boolean for if debug information should be generated.
stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True.
stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True. | [
"Using",
"the",
"data",
"stored",
"in",
"the",
"CommandQueue",
"Extract",
"and",
"align",
"compatible",
"sequences",
"of",
"Primitives",
"and",
"compile",
"/",
"optimize",
"the",
"Primitives",
"down",
"into",
"a",
"stream",
"of",
"Level",
"2",
"device",
"agnostic",
"primitives",
"."
] | python | train |
django-danceschool/django-danceschool | danceschool/guestlist/models.py | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/guestlist/models.py#L48-L58 | def currentEvent(self):
'''
Return the first event that hasn't ended yet, or if there are no
future events, the last one to end.
'''
currentEvent = self.recentEvents.filter(endTime__gte=timezone.now()).order_by('startTime').first()
if not currentEvent:
currentEvent = self.recentEvents.filter(
endTime__lte=timezone.now()
).order_by('-endTime').first()
return currentEvent | [
"def",
"currentEvent",
"(",
"self",
")",
":",
"currentEvent",
"=",
"self",
".",
"recentEvents",
".",
"filter",
"(",
"endTime__gte",
"=",
"timezone",
".",
"now",
"(",
")",
")",
".",
"order_by",
"(",
"'startTime'",
")",
".",
"first",
"(",
")",
"if",
"not",
"currentEvent",
":",
"currentEvent",
"=",
"self",
".",
"recentEvents",
".",
"filter",
"(",
"endTime__lte",
"=",
"timezone",
".",
"now",
"(",
")",
")",
".",
"order_by",
"(",
"'-endTime'",
")",
".",
"first",
"(",
")",
"return",
"currentEvent"
] | Return the first event that hasn't ended yet, or if there are no
future events, the last one to end. | [
"Return",
"the",
"first",
"event",
"that",
"hasn",
"t",
"ended",
"yet",
"or",
"if",
"there",
"are",
"no",
"future",
"events",
"the",
"last",
"one",
"to",
"end",
"."
] | python | train |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L104-L118 | def array_to_csv(array_like): # type: (np.array or Iterable or int or float) -> str
"""Convert an array like object to CSV.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to CSV.
Returns:
(str): object serialized to CSV
"""
stream = StringIO()
np.savetxt(stream, array_like, delimiter=',', fmt='%s')
return stream.getvalue() | [
"def",
"array_to_csv",
"(",
"array_like",
")",
":",
"# type: (np.array or Iterable or int or float) -> str",
"stream",
"=",
"StringIO",
"(",
")",
"np",
".",
"savetxt",
"(",
"stream",
",",
"array_like",
",",
"delimiter",
"=",
"','",
",",
"fmt",
"=",
"'%s'",
")",
"return",
"stream",
".",
"getvalue",
"(",
")"
] | Convert an array like object to CSV.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to CSV.
Returns:
(str): object serialized to CSV | [
"Convert",
"an",
"array",
"like",
"object",
"to",
"CSV",
"."
] | python | train |
calmjs/calmjs.parse | src/calmjs/parse/parsers/es5.py | https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L400-L416 | def p_property_assignment(self, p):
"""property_assignment \
: property_name COLON assignment_expr
| GETPROP property_name LPAREN RPAREN LBRACE function_body RBRACE
| SETPROP property_name LPAREN property_set_parameter_list RPAREN\
LBRACE function_body RBRACE
"""
if len(p) == 4:
p[0] = self.asttypes.Assign(left=p[1], op=p[2], right=p[3])
p[0].setpos(p, 2)
elif len(p) == 8:
p[0] = self.asttypes.GetPropAssign(prop_name=p[2], elements=p[6])
p[0].setpos(p)
else:
p[0] = self.asttypes.SetPropAssign(
prop_name=p[2], parameter=p[4], elements=p[7])
p[0].setpos(p) | [
"def",
"p_property_assignment",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"Assign",
"(",
"left",
"=",
"p",
"[",
"1",
"]",
",",
"op",
"=",
"p",
"[",
"2",
"]",
",",
"right",
"=",
"p",
"[",
"3",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
",",
"2",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"8",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"GetPropAssign",
"(",
"prop_name",
"=",
"p",
"[",
"2",
"]",
",",
"elements",
"=",
"p",
"[",
"6",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"SetPropAssign",
"(",
"prop_name",
"=",
"p",
"[",
"2",
"]",
",",
"parameter",
"=",
"p",
"[",
"4",
"]",
",",
"elements",
"=",
"p",
"[",
"7",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
] | property_assignment \
: property_name COLON assignment_expr
| GETPROP property_name LPAREN RPAREN LBRACE function_body RBRACE
| SETPROP property_name LPAREN property_set_parameter_list RPAREN\
LBRACE function_body RBRACE | [
"property_assignment",
"\\",
":",
"property_name",
"COLON",
"assignment_expr",
"|",
"GETPROP",
"property_name",
"LPAREN",
"RPAREN",
"LBRACE",
"function_body",
"RBRACE",
"|",
"SETPROP",
"property_name",
"LPAREN",
"property_set_parameter_list",
"RPAREN",
"\\",
"LBRACE",
"function_body",
"RBRACE"
] | python | train |
androguard/androguard | androguard/core/bytecodes/dvm.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L97-L110 | def read_null_terminated_string(f):
"""
Read a null terminated string from a file-like object.
:param f: file-like object
:rtype: bytearray
"""
x = bytearray()
while True:
z = f.read(1)
if ord(z) == 0:
return x
else:
x.append(ord(z)) | [
"def",
"read_null_terminated_string",
"(",
"f",
")",
":",
"x",
"=",
"bytearray",
"(",
")",
"while",
"True",
":",
"z",
"=",
"f",
".",
"read",
"(",
"1",
")",
"if",
"ord",
"(",
"z",
")",
"==",
"0",
":",
"return",
"x",
"else",
":",
"x",
".",
"append",
"(",
"ord",
"(",
"z",
")",
")"
] | Read a null terminated string from a file-like object.
:param f: file-like object
:rtype: bytearray | [
"Read",
"a",
"null",
"terminated",
"string",
"from",
"a",
"file",
"-",
"like",
"object",
"."
] | python | train |
UCBerkeleySETI/blimpy | blimpy/sigproc.py | https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/sigproc.py#L160-L196 | def read_header(filename, return_idxs=False):
""" Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
returns
"""
with open(filename, 'rb') as fh:
header_dict = {}
header_idxs = {}
# Check this is a blimpy file
keyword, value, idx = read_next_header_keyword(fh)
try:
assert keyword == b'HEADER_START'
except AssertionError:
raise RuntimeError("Not a valid blimpy file.")
while True:
keyword, value, idx = read_next_header_keyword(fh)
if keyword == b'HEADER_END':
break
else:
header_dict[keyword] = value
header_idxs[keyword] = idx
if return_idxs:
return header_idxs
else:
return header_dict | [
"def",
"read_header",
"(",
"filename",
",",
"return_idxs",
"=",
"False",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"fh",
":",
"header_dict",
"=",
"{",
"}",
"header_idxs",
"=",
"{",
"}",
"# Check this is a blimpy file",
"keyword",
",",
"value",
",",
"idx",
"=",
"read_next_header_keyword",
"(",
"fh",
")",
"try",
":",
"assert",
"keyword",
"==",
"b'HEADER_START'",
"except",
"AssertionError",
":",
"raise",
"RuntimeError",
"(",
"\"Not a valid blimpy file.\"",
")",
"while",
"True",
":",
"keyword",
",",
"value",
",",
"idx",
"=",
"read_next_header_keyword",
"(",
"fh",
")",
"if",
"keyword",
"==",
"b'HEADER_END'",
":",
"break",
"else",
":",
"header_dict",
"[",
"keyword",
"]",
"=",
"value",
"header_idxs",
"[",
"keyword",
"]",
"=",
"idx",
"if",
"return_idxs",
":",
"return",
"header_idxs",
"else",
":",
"return",
"header_dict"
] | Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
returns | [
"Read",
"blimpy",
"header",
"and",
"return",
"a",
"Python",
"dictionary",
"of",
"key",
":",
"value",
"pairs"
] | python | test |
ecell/ecell4 | ecell4/util/viz.py | https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L1774-L1906 | def plot_movie_with_attractive_mpl(
worlds, marker_size=6, figsize=6, grid=True,
wireframe=False, species_list=None, max_count=None, angle=None, noaxis=False,
interval=0.16, repeat_delay=3000, stride=1, rotate=None,
legend=True, whratio=1.33, scale=1, output=None, crf=10, bitrate='1M', **kwargs):
"""
Generate a move from the received list of instances of World,
and show it on IPython notebook. This function may require ffmpeg.
Parameters
----------
worlds : list or FixedIntervalHDF5Observer
A list of Worlds to render.
marker_size : float, default 3
Marker size for all species. Size is passed to scatter function
as argument, s=(2 ** marker_size).
figsize : float, default 6
Size of the plotting area. Given in inch.
species_list : array of string, default None
If set, plot_world will not search the list of species.
max_count : Integer, default None
The maximum number of particles to show for each species.
None means no limitation.
angle : tuple, default None
A tuple of view angle which is given as (azim, elev, dist).
If None, use default assumed to be (-60, 30, 10).
interval : Integer, default 0.16
Parameters for matplotlib.animation.ArtistAnimation.
stride : Integer, default 1
Stride per frame.
rotate : tuple, default None
A pair of rotation angles, elev and azim, for animation.
None means no rotation, same as (0, 0).
legend : bool, default True
whratio : float, default 1.33
A ratio between figure width and height.
Customize this to keep a legend within the figure.
scale : float, default 1
A length-scaling factor
crf : int, default 10
The CRF value can be from 4-63. Lower values mean better quality.
bitrate : str, default '1M'
Target bitrate
output : str, default None
An output filename. '.webm' or '.mp4' is only accepted.
If None, display a movie on IPython Notebook.
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from ecell4_base.core import Species, FixedIntervalHDF5Observer
from .simulation import load_world
import os.path
# print("Start generating species_list ...")
if isinstance(worlds, FixedIntervalHDF5Observer):
obs = worlds
worlds = []
for i in range(0, obs.num_steps(), stride):
filename = obs.filename(i)
if os.path.isfile(filename):
worlds.append(load_world(filename))
elif len(worlds) >0:
worlds.append(worlds[-1])
else:
worlds = worlds[:: stride]
if species_list is None:
species_list = []
for world in worlds:
species_list.extend(
[p.species().serial() for pid, p in world.list_particles()])
species_list = sorted(
set(species_list), key=species_list.index) # XXX: pick unique ones
# print("Start preparing mplot3d ...")
fig, ax = __prepare_mplot3d_with_attractive_mpl(
__get_range_of_world(worlds[0], scale), figsize, grid, wireframe, angle,
noaxis, whratio)
from mpl_toolkits.mplot3d.art3d import juggle_axes
def _update_plot(i, scatters, worlds, species_list):
world = worlds[i]
for i, name in enumerate(species_list):
xs, ys, zs = [], [], []
particles = world.list_particles_exact(Species(name))
if max_count is not None and len(particles) > max_count:
particles = random.sample(particles, max_count)
for pid, p in particles:
pos = p.position() * scale
xs.append(pos[0])
ys.append(pos[1])
zs.append(pos[2])
scatters[i]._offsets3d = juggle_axes(xs, ys, zs, 'z')
if rotate is not None:
ax.elev += rotate[0]
ax.azim += rotate[1]
fig.canvas.draw()
# print("Start making animation ...")
color_scale = attractive_mpl_color_scale({})
scatters = []
for i, name in enumerate(species_list):
opts = dict(marker='o', s=(2 ** marker_size), edgecolors='white', alpha=0.7)
opts.update(kwargs)
scatters.append(
ax.scatter(
[], [], [], facecolor=color_scale.get_color(name), label=name, **opts))
# if legend:
# ax.legend(loc='best', shadow=True)
if legend is not None and legend is not False:
legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5),
'shadow': False, 'frameon': False, 'fontsize': 'x-large',
'scatterpoints': 1}
if isinstance(legend, dict):
legend_opts.update(legend)
ax.legend(**legend_opts)
ani = animation.FuncAnimation(
fig, _update_plot, fargs=(scatters, worlds, species_list),
frames=len(worlds), interval=interval, blit=False)
plt.close(ani._fig)
# print("Start generating a movie ...")
display_anim(ani, output, fps=1.0 / interval, crf=crf, bitrate=bitrate) | [
"def",
"plot_movie_with_attractive_mpl",
"(",
"worlds",
",",
"marker_size",
"=",
"6",
",",
"figsize",
"=",
"6",
",",
"grid",
"=",
"True",
",",
"wireframe",
"=",
"False",
",",
"species_list",
"=",
"None",
",",
"max_count",
"=",
"None",
",",
"angle",
"=",
"None",
",",
"noaxis",
"=",
"False",
",",
"interval",
"=",
"0.16",
",",
"repeat_delay",
"=",
"3000",
",",
"stride",
"=",
"1",
",",
"rotate",
"=",
"None",
",",
"legend",
"=",
"True",
",",
"whratio",
"=",
"1.33",
",",
"scale",
"=",
"1",
",",
"output",
"=",
"None",
",",
"crf",
"=",
"10",
",",
"bitrate",
"=",
"'1M'",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"matplotlib",
".",
"animation",
"as",
"animation",
"from",
"ecell4_base",
".",
"core",
"import",
"Species",
",",
"FixedIntervalHDF5Observer",
"from",
".",
"simulation",
"import",
"load_world",
"import",
"os",
".",
"path",
"# print(\"Start generating species_list ...\")",
"if",
"isinstance",
"(",
"worlds",
",",
"FixedIntervalHDF5Observer",
")",
":",
"obs",
"=",
"worlds",
"worlds",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"obs",
".",
"num_steps",
"(",
")",
",",
"stride",
")",
":",
"filename",
"=",
"obs",
".",
"filename",
"(",
"i",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"worlds",
".",
"append",
"(",
"load_world",
"(",
"filename",
")",
")",
"elif",
"len",
"(",
"worlds",
")",
">",
"0",
":",
"worlds",
".",
"append",
"(",
"worlds",
"[",
"-",
"1",
"]",
")",
"else",
":",
"worlds",
"=",
"worlds",
"[",
":",
":",
"stride",
"]",
"if",
"species_list",
"is",
"None",
":",
"species_list",
"=",
"[",
"]",
"for",
"world",
"in",
"worlds",
":",
"species_list",
".",
"extend",
"(",
"[",
"p",
".",
"species",
"(",
")",
".",
"serial",
"(",
")",
"for",
"pid",
",",
"p",
"in",
"world",
".",
"list_particles",
"(",
")",
"]",
")",
"species_list",
"=",
"sorted",
"(",
"set",
"(",
"species_list",
")",
",",
"key",
"=",
"species_list",
".",
"index",
")",
"# XXX: pick unique ones",
"# print(\"Start preparing mplot3d ...\")",
"fig",
",",
"ax",
"=",
"__prepare_mplot3d_with_attractive_mpl",
"(",
"__get_range_of_world",
"(",
"worlds",
"[",
"0",
"]",
",",
"scale",
")",
",",
"figsize",
",",
"grid",
",",
"wireframe",
",",
"angle",
",",
"noaxis",
",",
"whratio",
")",
"from",
"mpl_toolkits",
".",
"mplot3d",
".",
"art3d",
"import",
"juggle_axes",
"def",
"_update_plot",
"(",
"i",
",",
"scatters",
",",
"worlds",
",",
"species_list",
")",
":",
"world",
"=",
"worlds",
"[",
"i",
"]",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"species_list",
")",
":",
"xs",
",",
"ys",
",",
"zs",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"particles",
"=",
"world",
".",
"list_particles_exact",
"(",
"Species",
"(",
"name",
")",
")",
"if",
"max_count",
"is",
"not",
"None",
"and",
"len",
"(",
"particles",
")",
">",
"max_count",
":",
"particles",
"=",
"random",
".",
"sample",
"(",
"particles",
",",
"max_count",
")",
"for",
"pid",
",",
"p",
"in",
"particles",
":",
"pos",
"=",
"p",
".",
"position",
"(",
")",
"*",
"scale",
"xs",
".",
"append",
"(",
"pos",
"[",
"0",
"]",
")",
"ys",
".",
"append",
"(",
"pos",
"[",
"1",
"]",
")",
"zs",
".",
"append",
"(",
"pos",
"[",
"2",
"]",
")",
"scatters",
"[",
"i",
"]",
".",
"_offsets3d",
"=",
"juggle_axes",
"(",
"xs",
",",
"ys",
",",
"zs",
",",
"'z'",
")",
"if",
"rotate",
"is",
"not",
"None",
":",
"ax",
".",
"elev",
"+=",
"rotate",
"[",
"0",
"]",
"ax",
".",
"azim",
"+=",
"rotate",
"[",
"1",
"]",
"fig",
".",
"canvas",
".",
"draw",
"(",
")",
"# print(\"Start making animation ...\")",
"color_scale",
"=",
"attractive_mpl_color_scale",
"(",
"{",
"}",
")",
"scatters",
"=",
"[",
"]",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"species_list",
")",
":",
"opts",
"=",
"dict",
"(",
"marker",
"=",
"'o'",
",",
"s",
"=",
"(",
"2",
"**",
"marker_size",
")",
",",
"edgecolors",
"=",
"'white'",
",",
"alpha",
"=",
"0.7",
")",
"opts",
".",
"update",
"(",
"kwargs",
")",
"scatters",
".",
"append",
"(",
"ax",
".",
"scatter",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"facecolor",
"=",
"color_scale",
".",
"get_color",
"(",
"name",
")",
",",
"label",
"=",
"name",
",",
"*",
"*",
"opts",
")",
")",
"# if legend:",
"# ax.legend(loc='best', shadow=True)",
"if",
"legend",
"is",
"not",
"None",
"and",
"legend",
"is",
"not",
"False",
":",
"legend_opts",
"=",
"{",
"'loc'",
":",
"'center left'",
",",
"'bbox_to_anchor'",
":",
"(",
"1.0",
",",
"0.5",
")",
",",
"'shadow'",
":",
"False",
",",
"'frameon'",
":",
"False",
",",
"'fontsize'",
":",
"'x-large'",
",",
"'scatterpoints'",
":",
"1",
"}",
"if",
"isinstance",
"(",
"legend",
",",
"dict",
")",
":",
"legend_opts",
".",
"update",
"(",
"legend",
")",
"ax",
".",
"legend",
"(",
"*",
"*",
"legend_opts",
")",
"ani",
"=",
"animation",
".",
"FuncAnimation",
"(",
"fig",
",",
"_update_plot",
",",
"fargs",
"=",
"(",
"scatters",
",",
"worlds",
",",
"species_list",
")",
",",
"frames",
"=",
"len",
"(",
"worlds",
")",
",",
"interval",
"=",
"interval",
",",
"blit",
"=",
"False",
")",
"plt",
".",
"close",
"(",
"ani",
".",
"_fig",
")",
"# print(\"Start generating a movie ...\")",
"display_anim",
"(",
"ani",
",",
"output",
",",
"fps",
"=",
"1.0",
"/",
"interval",
",",
"crf",
"=",
"crf",
",",
"bitrate",
"=",
"bitrate",
")"
] | Generate a move from the received list of instances of World,
and show it on IPython notebook. This function may require ffmpeg.
Parameters
----------
worlds : list or FixedIntervalHDF5Observer
A list of Worlds to render.
marker_size : float, default 3
Marker size for all species. Size is passed to scatter function
as argument, s=(2 ** marker_size).
figsize : float, default 6
Size of the plotting area. Given in inch.
species_list : array of string, default None
If set, plot_world will not search the list of species.
max_count : Integer, default None
The maximum number of particles to show for each species.
None means no limitation.
angle : tuple, default None
A tuple of view angle which is given as (azim, elev, dist).
If None, use default assumed to be (-60, 30, 10).
interval : Integer, default 0.16
Parameters for matplotlib.animation.ArtistAnimation.
stride : Integer, default 1
Stride per frame.
rotate : tuple, default None
A pair of rotation angles, elev and azim, for animation.
None means no rotation, same as (0, 0).
legend : bool, default True
whratio : float, default 1.33
A ratio between figure width and height.
Customize this to keep a legend within the figure.
scale : float, default 1
A length-scaling factor
crf : int, default 10
The CRF value can be from 4-63. Lower values mean better quality.
bitrate : str, default '1M'
Target bitrate
output : str, default None
An output filename. '.webm' or '.mp4' is only accepted.
If None, display a movie on IPython Notebook. | [
"Generate",
"a",
"move",
"from",
"the",
"received",
"list",
"of",
"instances",
"of",
"World",
"and",
"show",
"it",
"on",
"IPython",
"notebook",
".",
"This",
"function",
"may",
"require",
"ffmpeg",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.