repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
thiagopbueno/rddl2tf | rddl2tf/compiler.py | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L441-L461 | def reward_scope(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
'''Returns the complete reward fluent scope for the
current `state`, `action` fluents, and `next_state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
next_state (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = {}
scope.update(self.non_fluents_scope())
scope.update(self.state_scope(state))
scope.update(self.action_scope(action))
scope.update(self.next_state_scope(next_state))
return scope | [
"def",
"reward_scope",
"(",
"self",
",",
"state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"action",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"next_state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
":",
"scope",
"=",
"{",
"}",
"scope",
".",
"update",
"(",
"self",
".",
"non_fluents_scope",
"(",
")",
")",
"scope",
".",
"update",
"(",
"self",
".",
"state_scope",
"(",
"state",
")",
")",
"scope",
".",
"update",
"(",
"self",
".",
"action_scope",
"(",
"action",
")",
")",
"scope",
".",
"update",
"(",
"self",
".",
"next_state_scope",
"(",
"next_state",
")",
")",
"return",
"scope"
] | Returns the complete reward fluent scope for the
current `state`, `action` fluents, and `next_state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
next_state (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. | [
"Returns",
"the",
"complete",
"reward",
"fluent",
"scope",
"for",
"the",
"current",
"state",
"action",
"fluents",
"and",
"next_state",
"fluents",
"."
] | python | train |
Azure/blobxfer | blobxfer/operations/upload.py | https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/upload.py#L260-L281 | def _check_for_uploads_from_md5(self):
# type: (Uploader) -> None
"""Check queue for a file to upload
:param Uploader self: this
"""
cv = self._md5_offload.done_cv
while not self.termination_check_md5:
result = None
cv.acquire()
while True:
result = self._md5_offload.pop_done_queue()
if result is None:
# use cv timeout due to possible non-wake while running
cv.wait(1)
# check for terminating conditions
if self.termination_check_md5:
break
else:
break
cv.release()
if result is not None:
self._post_md5_skip_on_check(result[0], result[3]) | [
"def",
"_check_for_uploads_from_md5",
"(",
"self",
")",
":",
"# type: (Uploader) -> None",
"cv",
"=",
"self",
".",
"_md5_offload",
".",
"done_cv",
"while",
"not",
"self",
".",
"termination_check_md5",
":",
"result",
"=",
"None",
"cv",
".",
"acquire",
"(",
")",
"while",
"True",
":",
"result",
"=",
"self",
".",
"_md5_offload",
".",
"pop_done_queue",
"(",
")",
"if",
"result",
"is",
"None",
":",
"# use cv timeout due to possible non-wake while running",
"cv",
".",
"wait",
"(",
"1",
")",
"# check for terminating conditions",
"if",
"self",
".",
"termination_check_md5",
":",
"break",
"else",
":",
"break",
"cv",
".",
"release",
"(",
")",
"if",
"result",
"is",
"not",
"None",
":",
"self",
".",
"_post_md5_skip_on_check",
"(",
"result",
"[",
"0",
"]",
",",
"result",
"[",
"3",
"]",
")"
] | Check queue for a file to upload
:param Uploader self: this | [
"Check",
"queue",
"for",
"a",
"file",
"to",
"upload",
":",
"param",
"Uploader",
"self",
":",
"this"
] | python | train |
fermiPy/fermipy | fermipy/hpx_utils.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/hpx_utils.py#L559-L571 | def create_from_hdu(cls, hdu, ebins=None):
""" Creates an HPX object from a FITS header.
hdu : The FITS hdu
ebins : Energy bin edges [optional]
"""
convname = HPX.identify_HPX_convention(hdu.header)
conv = HPX_FITS_CONVENTIONS[convname]
try:
pixels = hdu.data[conv.idxstring]
except KeyError:
pixels = None
return cls.create_from_header(hdu.header, ebins, pixels) | [
"def",
"create_from_hdu",
"(",
"cls",
",",
"hdu",
",",
"ebins",
"=",
"None",
")",
":",
"convname",
"=",
"HPX",
".",
"identify_HPX_convention",
"(",
"hdu",
".",
"header",
")",
"conv",
"=",
"HPX_FITS_CONVENTIONS",
"[",
"convname",
"]",
"try",
":",
"pixels",
"=",
"hdu",
".",
"data",
"[",
"conv",
".",
"idxstring",
"]",
"except",
"KeyError",
":",
"pixels",
"=",
"None",
"return",
"cls",
".",
"create_from_header",
"(",
"hdu",
".",
"header",
",",
"ebins",
",",
"pixels",
")"
] | Creates an HPX object from a FITS header.
hdu : The FITS hdu
ebins : Energy bin edges [optional] | [
"Creates",
"an",
"HPX",
"object",
"from",
"a",
"FITS",
"header",
"."
] | python | train |
fbcotter/py3nvml | py3nvml/py3nvml.py | https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L2128-L2154 | def nvmlDeviceGetMultiGpuBoard(handle):
r"""
/**
* Retrieves whether the device is on a Multi-GPU Board
* Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value.
*
* For Fermi &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param multiGpuBool Reference in which to return a zero or non-zero value
* to indicate whether the device is on a multi GPU board
*
* @return
* - \ref NVML_SUCCESS if \a multiGpuBool has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard
"""
c_multiGpu = c_uint();
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMultiGpuBoard")
ret = fn(handle, byref(c_multiGpu))
_nvmlCheckReturn(ret)
return bytes_to_str(c_multiGpu.value) | [
"def",
"nvmlDeviceGetMultiGpuBoard",
"(",
"handle",
")",
":",
"c_multiGpu",
"=",
"c_uint",
"(",
")",
"fn",
"=",
"_nvmlGetFunctionPointer",
"(",
"\"nvmlDeviceGetMultiGpuBoard\"",
")",
"ret",
"=",
"fn",
"(",
"handle",
",",
"byref",
"(",
"c_multiGpu",
")",
")",
"_nvmlCheckReturn",
"(",
"ret",
")",
"return",
"bytes_to_str",
"(",
"c_multiGpu",
".",
"value",
")"
] | r"""
/**
* Retrieves whether the device is on a Multi-GPU Board
* Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value.
*
* For Fermi &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param multiGpuBool Reference in which to return a zero or non-zero value
* to indicate whether the device is on a multi GPU board
*
* @return
* - \ref NVML_SUCCESS if \a multiGpuBool has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard | [
"r",
"/",
"**",
"*",
"Retrieves",
"whether",
"the",
"device",
"is",
"on",
"a",
"Multi",
"-",
"GPU",
"Board",
"*",
"Devices",
"that",
"are",
"on",
"multi",
"-",
"GPU",
"boards",
"will",
"set",
"\\",
"a",
"multiGpuBool",
"to",
"a",
"non",
"-",
"zero",
"value",
".",
"*",
"*",
"For",
"Fermi",
"&tm",
";",
"or",
"newer",
"fully",
"supported",
"devices",
".",
"*",
"*"
] | python | train |
dwavesystems/dimod | dimod/higherorder/polynomial.py | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/higherorder/polynomial.py#L461-L491 | def to_binary(self, copy=False):
"""Return a binary polynomial over `{0, 1}` variables.
Args:
copy (optional, default=False):
If True, the returned polynomial is always a copy. Otherwise,
if the polynomial is binary-valued already it returns itself.
Returns:
:obj:`.BinaryPolynomial`
"""
if self.vartype is Vartype.BINARY:
if copy:
return self.copy()
else:
return self
new = BinaryPolynomial({}, Vartype.BINARY)
# s = 2x - 1
for term, bias in self.items():
for t in map(frozenset, powerset(term)):
newbias = bias * 2**len(t) * (-1)**(len(term) - len(t))
if t in new:
new[t] += newbias
else:
new[t] = newbias
return new | [
"def",
"to_binary",
"(",
"self",
",",
"copy",
"=",
"False",
")",
":",
"if",
"self",
".",
"vartype",
"is",
"Vartype",
".",
"BINARY",
":",
"if",
"copy",
":",
"return",
"self",
".",
"copy",
"(",
")",
"else",
":",
"return",
"self",
"new",
"=",
"BinaryPolynomial",
"(",
"{",
"}",
",",
"Vartype",
".",
"BINARY",
")",
"# s = 2x - 1",
"for",
"term",
",",
"bias",
"in",
"self",
".",
"items",
"(",
")",
":",
"for",
"t",
"in",
"map",
"(",
"frozenset",
",",
"powerset",
"(",
"term",
")",
")",
":",
"newbias",
"=",
"bias",
"*",
"2",
"**",
"len",
"(",
"t",
")",
"*",
"(",
"-",
"1",
")",
"**",
"(",
"len",
"(",
"term",
")",
"-",
"len",
"(",
"t",
")",
")",
"if",
"t",
"in",
"new",
":",
"new",
"[",
"t",
"]",
"+=",
"newbias",
"else",
":",
"new",
"[",
"t",
"]",
"=",
"newbias",
"return",
"new"
] | Return a binary polynomial over `{0, 1}` variables.
Args:
copy (optional, default=False):
If True, the returned polynomial is always a copy. Otherwise,
if the polynomial is binary-valued already it returns itself.
Returns:
:obj:`.BinaryPolynomial` | [
"Return",
"a",
"binary",
"polynomial",
"over",
"{",
"0",
"1",
"}",
"variables",
"."
] | python | train |
apple/turicreate | src/unity/python/turicreate/data_structures/sframe.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L1845-L1942 | def to_sql(self, conn, table_name, dbapi_module=None,
use_python_type_specifiers=False, use_exact_column_names=True):
"""
Convert an SFrame to a single table in a SQL database.
This function does not attempt to create the table or check if a table
named `table_name` exists in the database. It simply assumes that
`table_name` exists in the database and appends to it.
`to_sql` can be thought of as a convenience wrapper around
parameterized SQL insert statements.
Parameters
----------
conn : dbapi2.Connection
A DBAPI2 connection object. Any connection object originating from
the 'connect' method of a DBAPI2-compliant package can be used.
table_name : str
The name of the table to append the data in this SFrame.
dbapi_module : module | package, optional
The top-level DBAPI2 module/package that constructed the given
connection object. By default, a best guess of which module the
connection came from is made. In the event that this guess is wrong,
this will need to be specified.
use_python_type_specifiers : bool, optional
If the DBAPI2 module's parameter marker style is 'format' or
'pyformat', attempt to use accurate type specifiers for each value
('s' for string, 'd' for integer, etc.). Many DBAPI2 modules simply
use 's' for all types if they use these parameter markers, so this is
False by default.
use_exact_column_names : bool, optional
Specify the column names of the SFrame when inserting its contents
into the DB. If the specified table does not have the exact same
column names as the SFrame, inserting the data will fail. If False,
the columns in the SFrame are inserted in order without care of the
schema of the DB table. True by default.
"""
mod_info = _get_global_dbapi_info(dbapi_module, conn)
c = conn.cursor()
col_info = list(zip(self.column_names(), self.column_types()))
if not use_python_type_specifiers:
_pytype_to_printf = lambda x: 's'
# DBAPI2 standard allows for five different ways to specify parameters
sql_param = {
'qmark' : lambda name,col_num,col_type: '?',
'numeric' : lambda name,col_num,col_type:':'+str(col_num+1),
'named' : lambda name,col_num,col_type:':'+str(name),
'format' : lambda name,col_num,col_type:'%'+_pytype_to_printf(col_type),
'pyformat': lambda name,col_num,col_type:'%('+str(name)+')'+_pytype_to_printf(col_type),
}
get_sql_param = sql_param[mod_info['paramstyle']]
# form insert string
ins_str = "INSERT INTO " + str(table_name)
value_str = " VALUES ("
col_str = " ("
count = 0
for i in col_info:
col_str += i[0]
value_str += get_sql_param(i[0],count,i[1])
if count < len(col_info)-1:
col_str += ","
value_str += ","
count += 1
col_str += ")"
value_str += ")"
if use_exact_column_names:
ins_str += col_str
ins_str += value_str
# Some formats require values in an iterable, some a dictionary
if (mod_info['paramstyle'] == 'named' or\
mod_info['paramstyle'] == 'pyformat'):
prepare_sf_row = lambda x:x
else:
col_names = self.column_names()
prepare_sf_row = lambda x: [x[i] for i in col_names]
for i in self:
try:
c.execute(ins_str, prepare_sf_row(i))
except mod_info['Error'] as e:
if hasattr(conn, 'rollback'):
conn.rollback()
raise e
conn.commit()
c.close() | [
"def",
"to_sql",
"(",
"self",
",",
"conn",
",",
"table_name",
",",
"dbapi_module",
"=",
"None",
",",
"use_python_type_specifiers",
"=",
"False",
",",
"use_exact_column_names",
"=",
"True",
")",
":",
"mod_info",
"=",
"_get_global_dbapi_info",
"(",
"dbapi_module",
",",
"conn",
")",
"c",
"=",
"conn",
".",
"cursor",
"(",
")",
"col_info",
"=",
"list",
"(",
"zip",
"(",
"self",
".",
"column_names",
"(",
")",
",",
"self",
".",
"column_types",
"(",
")",
")",
")",
"if",
"not",
"use_python_type_specifiers",
":",
"_pytype_to_printf",
"=",
"lambda",
"x",
":",
"'s'",
"# DBAPI2 standard allows for five different ways to specify parameters",
"sql_param",
"=",
"{",
"'qmark'",
":",
"lambda",
"name",
",",
"col_num",
",",
"col_type",
":",
"'?'",
",",
"'numeric'",
":",
"lambda",
"name",
",",
"col_num",
",",
"col_type",
":",
"':'",
"+",
"str",
"(",
"col_num",
"+",
"1",
")",
",",
"'named'",
":",
"lambda",
"name",
",",
"col_num",
",",
"col_type",
":",
"':'",
"+",
"str",
"(",
"name",
")",
",",
"'format'",
":",
"lambda",
"name",
",",
"col_num",
",",
"col_type",
":",
"'%'",
"+",
"_pytype_to_printf",
"(",
"col_type",
")",
",",
"'pyformat'",
":",
"lambda",
"name",
",",
"col_num",
",",
"col_type",
":",
"'%('",
"+",
"str",
"(",
"name",
")",
"+",
"')'",
"+",
"_pytype_to_printf",
"(",
"col_type",
")",
",",
"}",
"get_sql_param",
"=",
"sql_param",
"[",
"mod_info",
"[",
"'paramstyle'",
"]",
"]",
"# form insert string",
"ins_str",
"=",
"\"INSERT INTO \"",
"+",
"str",
"(",
"table_name",
")",
"value_str",
"=",
"\" VALUES (\"",
"col_str",
"=",
"\" (\"",
"count",
"=",
"0",
"for",
"i",
"in",
"col_info",
":",
"col_str",
"+=",
"i",
"[",
"0",
"]",
"value_str",
"+=",
"get_sql_param",
"(",
"i",
"[",
"0",
"]",
",",
"count",
",",
"i",
"[",
"1",
"]",
")",
"if",
"count",
"<",
"len",
"(",
"col_info",
")",
"-",
"1",
":",
"col_str",
"+=",
"\",\"",
"value_str",
"+=",
"\",\"",
"count",
"+=",
"1",
"col_str",
"+=",
"\")\"",
"value_str",
"+=",
"\")\"",
"if",
"use_exact_column_names",
":",
"ins_str",
"+=",
"col_str",
"ins_str",
"+=",
"value_str",
"# Some formats require values in an iterable, some a dictionary",
"if",
"(",
"mod_info",
"[",
"'paramstyle'",
"]",
"==",
"'named'",
"or",
"mod_info",
"[",
"'paramstyle'",
"]",
"==",
"'pyformat'",
")",
":",
"prepare_sf_row",
"=",
"lambda",
"x",
":",
"x",
"else",
":",
"col_names",
"=",
"self",
".",
"column_names",
"(",
")",
"prepare_sf_row",
"=",
"lambda",
"x",
":",
"[",
"x",
"[",
"i",
"]",
"for",
"i",
"in",
"col_names",
"]",
"for",
"i",
"in",
"self",
":",
"try",
":",
"c",
".",
"execute",
"(",
"ins_str",
",",
"prepare_sf_row",
"(",
"i",
")",
")",
"except",
"mod_info",
"[",
"'Error'",
"]",
"as",
"e",
":",
"if",
"hasattr",
"(",
"conn",
",",
"'rollback'",
")",
":",
"conn",
".",
"rollback",
"(",
")",
"raise",
"e",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"close",
"(",
")"
] | Convert an SFrame to a single table in a SQL database.
This function does not attempt to create the table or check if a table
named `table_name` exists in the database. It simply assumes that
`table_name` exists in the database and appends to it.
`to_sql` can be thought of as a convenience wrapper around
parameterized SQL insert statements.
Parameters
----------
conn : dbapi2.Connection
A DBAPI2 connection object. Any connection object originating from
the 'connect' method of a DBAPI2-compliant package can be used.
table_name : str
The name of the table to append the data in this SFrame.
dbapi_module : module | package, optional
The top-level DBAPI2 module/package that constructed the given
connection object. By default, a best guess of which module the
connection came from is made. In the event that this guess is wrong,
this will need to be specified.
use_python_type_specifiers : bool, optional
If the DBAPI2 module's parameter marker style is 'format' or
'pyformat', attempt to use accurate type specifiers for each value
('s' for string, 'd' for integer, etc.). Many DBAPI2 modules simply
use 's' for all types if they use these parameter markers, so this is
False by default.
use_exact_column_names : bool, optional
Specify the column names of the SFrame when inserting its contents
into the DB. If the specified table does not have the exact same
column names as the SFrame, inserting the data will fail. If False,
the columns in the SFrame are inserted in order without care of the
schema of the DB table. True by default. | [
"Convert",
"an",
"SFrame",
"to",
"a",
"single",
"table",
"in",
"a",
"SQL",
"database",
"."
] | python | train |
not-na/peng3d | peng3d/gui/__init__.py | https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/gui/__init__.py#L68-L84 | def changeSubMenu(self,submenu):
"""
Changes the submenu that is displayed.
:raises ValueError: if the name was not previously registered
"""
if submenu not in self.submenus:
raise ValueError("Submenu %s does not exist!"%submenu)
elif submenu == self.activeSubMenu:
return # Ignore double submenu activation to prevent bugs in submenu initializer
old = self.activeSubMenu
self.activeSubMenu = submenu
if old is not None:
self.submenus[old].on_exit(submenu)
self.submenus[old].doAction("exit")
self.submenu.on_enter(old)
self.submenu.doAction("enter") | [
"def",
"changeSubMenu",
"(",
"self",
",",
"submenu",
")",
":",
"if",
"submenu",
"not",
"in",
"self",
".",
"submenus",
":",
"raise",
"ValueError",
"(",
"\"Submenu %s does not exist!\"",
"%",
"submenu",
")",
"elif",
"submenu",
"==",
"self",
".",
"activeSubMenu",
":",
"return",
"# Ignore double submenu activation to prevent bugs in submenu initializer",
"old",
"=",
"self",
".",
"activeSubMenu",
"self",
".",
"activeSubMenu",
"=",
"submenu",
"if",
"old",
"is",
"not",
"None",
":",
"self",
".",
"submenus",
"[",
"old",
"]",
".",
"on_exit",
"(",
"submenu",
")",
"self",
".",
"submenus",
"[",
"old",
"]",
".",
"doAction",
"(",
"\"exit\"",
")",
"self",
".",
"submenu",
".",
"on_enter",
"(",
"old",
")",
"self",
".",
"submenu",
".",
"doAction",
"(",
"\"enter\"",
")"
] | Changes the submenu that is displayed.
:raises ValueError: if the name was not previously registered | [
"Changes",
"the",
"submenu",
"that",
"is",
"displayed",
".",
":",
"raises",
"ValueError",
":",
"if",
"the",
"name",
"was",
"not",
"previously",
"registered"
] | python | test |
elastic/elasticsearch-py | elasticsearch/transport.py | https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/transport.py#L258-L350 | def perform_request(self, method, url, headers=None, params=None, body=None):
"""
Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was succesful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
:arg headers: dictionary of headers, will be handed over to the
underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serializes using serializer and
passed to the connection
"""
if body is not None:
body = self.serializer.dumps(body)
# some clients or environments don't support sending GET with body
if method in ('HEAD', 'GET') and self.send_get_body_as != 'GET':
# send it as post instead
if self.send_get_body_as == 'POST':
method = 'POST'
# or as source parameter
elif self.send_get_body_as == 'source':
if params is None:
params = {}
params['source'] = body
body = None
if body is not None:
try:
body = body.encode('utf-8', 'surrogatepass')
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
ignore = ()
timeout = None
if params:
timeout = params.pop('request_timeout', None)
ignore = params.pop('ignore', ())
if isinstance(ignore, int):
ignore = (ignore, )
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
# add a delay before attempting the next retry
# 0, 1, 3, 7, etc...
delay = 2**attempt - 1
time.sleep(delay)
status, headers_response, data = connection.perform_request(method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
return False
retry = False
if isinstance(e, ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
# only mark as dead if we are retrying
self.mark_dead(connection)
# raise exception on last retry
if attempt == self.max_retries:
raise
else:
raise
else:
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
if method == 'HEAD':
return 200 <= status < 300
if data:
data = self.deserializer.loads(data, headers_response.get('content-type'))
return data | [
"def",
"perform_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
"=",
"None",
",",
"params",
"=",
"None",
",",
"body",
"=",
"None",
")",
":",
"if",
"body",
"is",
"not",
"None",
":",
"body",
"=",
"self",
".",
"serializer",
".",
"dumps",
"(",
"body",
")",
"# some clients or environments don't support sending GET with body",
"if",
"method",
"in",
"(",
"'HEAD'",
",",
"'GET'",
")",
"and",
"self",
".",
"send_get_body_as",
"!=",
"'GET'",
":",
"# send it as post instead",
"if",
"self",
".",
"send_get_body_as",
"==",
"'POST'",
":",
"method",
"=",
"'POST'",
"# or as source parameter",
"elif",
"self",
".",
"send_get_body_as",
"==",
"'source'",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"params",
"[",
"'source'",
"]",
"=",
"body",
"body",
"=",
"None",
"if",
"body",
"is",
"not",
"None",
":",
"try",
":",
"body",
"=",
"body",
".",
"encode",
"(",
"'utf-8'",
",",
"'surrogatepass'",
")",
"except",
"(",
"UnicodeDecodeError",
",",
"AttributeError",
")",
":",
"# bytes/str - no need to re-encode",
"pass",
"ignore",
"=",
"(",
")",
"timeout",
"=",
"None",
"if",
"params",
":",
"timeout",
"=",
"params",
".",
"pop",
"(",
"'request_timeout'",
",",
"None",
")",
"ignore",
"=",
"params",
".",
"pop",
"(",
"'ignore'",
",",
"(",
")",
")",
"if",
"isinstance",
"(",
"ignore",
",",
"int",
")",
":",
"ignore",
"=",
"(",
"ignore",
",",
")",
"for",
"attempt",
"in",
"range",
"(",
"self",
".",
"max_retries",
"+",
"1",
")",
":",
"connection",
"=",
"self",
".",
"get_connection",
"(",
")",
"try",
":",
"# add a delay before attempting the next retry",
"# 0, 1, 3, 7, etc...",
"delay",
"=",
"2",
"**",
"attempt",
"-",
"1",
"time",
".",
"sleep",
"(",
"delay",
")",
"status",
",",
"headers_response",
",",
"data",
"=",
"connection",
".",
"perform_request",
"(",
"method",
",",
"url",
",",
"params",
",",
"body",
",",
"headers",
"=",
"headers",
",",
"ignore",
"=",
"ignore",
",",
"timeout",
"=",
"timeout",
")",
"except",
"TransportError",
"as",
"e",
":",
"if",
"method",
"==",
"'HEAD'",
"and",
"e",
".",
"status_code",
"==",
"404",
":",
"return",
"False",
"retry",
"=",
"False",
"if",
"isinstance",
"(",
"e",
",",
"ConnectionTimeout",
")",
":",
"retry",
"=",
"self",
".",
"retry_on_timeout",
"elif",
"isinstance",
"(",
"e",
",",
"ConnectionError",
")",
":",
"retry",
"=",
"True",
"elif",
"e",
".",
"status_code",
"in",
"self",
".",
"retry_on_status",
":",
"retry",
"=",
"True",
"if",
"retry",
":",
"# only mark as dead if we are retrying",
"self",
".",
"mark_dead",
"(",
"connection",
")",
"# raise exception on last retry",
"if",
"attempt",
"==",
"self",
".",
"max_retries",
":",
"raise",
"else",
":",
"raise",
"else",
":",
"# connection didn't fail, confirm it's live status",
"self",
".",
"connection_pool",
".",
"mark_live",
"(",
"connection",
")",
"if",
"method",
"==",
"'HEAD'",
":",
"return",
"200",
"<=",
"status",
"<",
"300",
"if",
"data",
":",
"data",
"=",
"self",
".",
"deserializer",
".",
"loads",
"(",
"data",
",",
"headers_response",
".",
"get",
"(",
"'content-type'",
")",
")",
"return",
"data"
] | Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was succesful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
:arg headers: dictionary of headers, will be handed over to the
underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serializes using serializer and
passed to the connection | [
"Perform",
"the",
"actual",
"request",
".",
"Retrieve",
"a",
"connection",
"from",
"the",
"connection",
"pool",
"pass",
"all",
"the",
"information",
"to",
"it",
"s",
"perform_request",
"method",
"and",
"return",
"the",
"data",
"."
] | python | train |
threeML/astromodels | astromodels/core/parameter.py | https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L325-L346 | def in_unit_of(self, unit, as_quantity=False):
"""
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
"""
new_unit = u.Unit(unit)
new_quantity = self.as_quantity.to(new_unit)
if as_quantity:
return new_quantity
else:
return new_quantity.value | [
"def",
"in_unit_of",
"(",
"self",
",",
"unit",
",",
"as_quantity",
"=",
"False",
")",
":",
"new_unit",
"=",
"u",
".",
"Unit",
"(",
"unit",
")",
"new_quantity",
"=",
"self",
".",
"as_quantity",
".",
"to",
"(",
"new_unit",
")",
"if",
"as_quantity",
":",
"return",
"new_quantity",
"else",
":",
"return",
"new_quantity",
".",
"value"
] | Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity" | [
"Return",
"the",
"current",
"value",
"transformed",
"to",
"the",
"new",
"units"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/psutil/_common.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/_common.py#L106-L119 | def isfile_strict(path):
"""Same as os.path.isfile() but does not swallow EACCES / EPERM
exceptions, see:
http://mail.python.org/pipermail/python-dev/2012-June/120787.html
"""
try:
st = os.stat(path)
except OSError:
err = sys.exc_info()[1]
if err.errno in (errno.EPERM, errno.EACCES):
raise
return False
else:
return stat.S_ISREG(st.st_mode) | [
"def",
"isfile_strict",
"(",
"path",
")",
":",
"try",
":",
"st",
"=",
"os",
".",
"stat",
"(",
"path",
")",
"except",
"OSError",
":",
"err",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"if",
"err",
".",
"errno",
"in",
"(",
"errno",
".",
"EPERM",
",",
"errno",
".",
"EACCES",
")",
":",
"raise",
"return",
"False",
"else",
":",
"return",
"stat",
".",
"S_ISREG",
"(",
"st",
".",
"st_mode",
")"
] | Same as os.path.isfile() but does not swallow EACCES / EPERM
exceptions, see:
http://mail.python.org/pipermail/python-dev/2012-June/120787.html | [
"Same",
"as",
"os",
".",
"path",
".",
"isfile",
"()",
"but",
"does",
"not",
"swallow",
"EACCES",
"/",
"EPERM",
"exceptions",
"see",
":",
"http",
":",
"//",
"mail",
".",
"python",
".",
"org",
"/",
"pipermail",
"/",
"python",
"-",
"dev",
"/",
"2012",
"-",
"June",
"/",
"120787",
".",
"html"
] | python | test |
pjmark/NIMPA | niftypet/nimpa/prc/imio.py | https://github.com/pjmark/NIMPA/blob/3f4231fed2934a1d92e4cd8e9e153b0118e29d86/niftypet/nimpa/prc/imio.py#L130-L187 | def array2nii(im, A, fnii, descrip='', trnsp=(), flip=(), storage_as=[]):
'''Store the numpy array 'im' to a NIfTI file 'fnii'.
----
Arguments:
'im': image to be stored in NIfTI
'A': affine transformation
'fnii': output NIfTI file name.
'descrip': the description given to the file
'trsnp': transpose/permute the dimensions.
In NIfTI it has to be in this order: [x,y,z,t,...])
'flip': flip tupple for flipping the direction of x,y,z axes.
(1: no flip, -1: flip)
'storage_as': uses the flip and displacement as given by the following
NifTI dictionary, obtained using
nimpa.getnii(filepath, output='all').
'''
if not len(trnsp) in [0,3,4] and not len(flip) in [0,3]:
raise ValueError('e> number of flip and/or transpose elements is incorrect.')
#---------------------------------------------------------------------------
#> TRANSLATIONS and FLIPS
#> get the same geometry as the input NIfTI file in the form of dictionary,
#>>as obtained from getnii(..., output='all')
#> permute the axis order in the image array
if isinstance(storage_as, dict) and 'transpose' in storage_as \
and 'flip' in storage_as:
trnsp = (storage_as['transpose'].index(0),
storage_as['transpose'].index(1),
storage_as['transpose'].index(2))
flip = storage_as['flip']
if trnsp==():
im = im.transpose()
#> check if the image is 4D (dynamic) and modify as needed
elif len(trnsp)==3 and im.ndim==4:
trnsp = tuple([t+1 for t in trnsp] + [0])
im = im.transpose(trnsp)
else:
im = im.transpose(trnsp)
#> perform flip of x,y,z axes after transposition into proper NIfTI order
if flip!=() and len(flip)==3:
im = im[::-flip[0], ::-flip[1], ::-flip[2], ...]
#---------------------------------------------------------------------------
nii = nib.Nifti1Image(im, A)
hdr = nii.header
hdr.set_sform(None, code='scanner')
hdr['cal_max'] = np.max(im) #np.percentile(im, 90) #
hdr['cal_min'] = np.min(im)
hdr['descrip'] = descrip
nib.save(nii, fnii) | [
"def",
"array2nii",
"(",
"im",
",",
"A",
",",
"fnii",
",",
"descrip",
"=",
"''",
",",
"trnsp",
"=",
"(",
")",
",",
"flip",
"=",
"(",
")",
",",
"storage_as",
"=",
"[",
"]",
")",
":",
"if",
"not",
"len",
"(",
"trnsp",
")",
"in",
"[",
"0",
",",
"3",
",",
"4",
"]",
"and",
"not",
"len",
"(",
"flip",
")",
"in",
"[",
"0",
",",
"3",
"]",
":",
"raise",
"ValueError",
"(",
"'e> number of flip and/or transpose elements is incorrect.'",
")",
"#---------------------------------------------------------------------------",
"#> TRANSLATIONS and FLIPS",
"#> get the same geometry as the input NIfTI file in the form of dictionary,",
"#>>as obtained from getnii(..., output='all')",
"#> permute the axis order in the image array",
"if",
"isinstance",
"(",
"storage_as",
",",
"dict",
")",
"and",
"'transpose'",
"in",
"storage_as",
"and",
"'flip'",
"in",
"storage_as",
":",
"trnsp",
"=",
"(",
"storage_as",
"[",
"'transpose'",
"]",
".",
"index",
"(",
"0",
")",
",",
"storage_as",
"[",
"'transpose'",
"]",
".",
"index",
"(",
"1",
")",
",",
"storage_as",
"[",
"'transpose'",
"]",
".",
"index",
"(",
"2",
")",
")",
"flip",
"=",
"storage_as",
"[",
"'flip'",
"]",
"if",
"trnsp",
"==",
"(",
")",
":",
"im",
"=",
"im",
".",
"transpose",
"(",
")",
"#> check if the image is 4D (dynamic) and modify as needed",
"elif",
"len",
"(",
"trnsp",
")",
"==",
"3",
"and",
"im",
".",
"ndim",
"==",
"4",
":",
"trnsp",
"=",
"tuple",
"(",
"[",
"t",
"+",
"1",
"for",
"t",
"in",
"trnsp",
"]",
"+",
"[",
"0",
"]",
")",
"im",
"=",
"im",
".",
"transpose",
"(",
"trnsp",
")",
"else",
":",
"im",
"=",
"im",
".",
"transpose",
"(",
"trnsp",
")",
"#> perform flip of x,y,z axes after transposition into proper NIfTI order",
"if",
"flip",
"!=",
"(",
")",
"and",
"len",
"(",
"flip",
")",
"==",
"3",
":",
"im",
"=",
"im",
"[",
":",
":",
"-",
"flip",
"[",
"0",
"]",
",",
":",
":",
"-",
"flip",
"[",
"1",
"]",
",",
":",
":",
"-",
"flip",
"[",
"2",
"]",
",",
"...",
"]",
"#---------------------------------------------------------------------------",
"nii",
"=",
"nib",
".",
"Nifti1Image",
"(",
"im",
",",
"A",
")",
"hdr",
"=",
"nii",
".",
"header",
"hdr",
".",
"set_sform",
"(",
"None",
",",
"code",
"=",
"'scanner'",
")",
"hdr",
"[",
"'cal_max'",
"]",
"=",
"np",
".",
"max",
"(",
"im",
")",
"#np.percentile(im, 90) #",
"hdr",
"[",
"'cal_min'",
"]",
"=",
"np",
".",
"min",
"(",
"im",
")",
"hdr",
"[",
"'descrip'",
"]",
"=",
"descrip",
"nib",
".",
"save",
"(",
"nii",
",",
"fnii",
")"
] | Store the numpy array 'im' to a NIfTI file 'fnii'.
----
Arguments:
'im': image to be stored in NIfTI
'A': affine transformation
'fnii': output NIfTI file name.
'descrip': the description given to the file
'trsnp': transpose/permute the dimensions.
In NIfTI it has to be in this order: [x,y,z,t,...])
'flip': flip tupple for flipping the direction of x,y,z axes.
(1: no flip, -1: flip)
'storage_as': uses the flip and displacement as given by the following
NifTI dictionary, obtained using
nimpa.getnii(filepath, output='all'). | [
"Store",
"the",
"numpy",
"array",
"im",
"to",
"a",
"NIfTI",
"file",
"fnii",
".",
"----",
"Arguments",
":",
"im",
":",
"image",
"to",
"be",
"stored",
"in",
"NIfTI",
"A",
":",
"affine",
"transformation",
"fnii",
":",
"output",
"NIfTI",
"file",
"name",
".",
"descrip",
":",
"the",
"description",
"given",
"to",
"the",
"file",
"trsnp",
":",
"transpose",
"/",
"permute",
"the",
"dimensions",
".",
"In",
"NIfTI",
"it",
"has",
"to",
"be",
"in",
"this",
"order",
":",
"[",
"x",
"y",
"z",
"t",
"...",
"]",
")",
"flip",
":",
"flip",
"tupple",
"for",
"flipping",
"the",
"direction",
"of",
"x",
"y",
"z",
"axes",
".",
"(",
"1",
":",
"no",
"flip",
"-",
"1",
":",
"flip",
")",
"storage_as",
":",
"uses",
"the",
"flip",
"and",
"displacement",
"as",
"given",
"by",
"the",
"following",
"NifTI",
"dictionary",
"obtained",
"using",
"nimpa",
".",
"getnii",
"(",
"filepath",
"output",
"=",
"all",
")",
"."
] | python | train |
mschwager/cohesion | lib/cohesion/parser.py | https://github.com/mschwager/cohesion/blob/b242ad59770940f3a0904931f27755ede009f491/lib/cohesion/parser.py#L32-L36 | def get_attribute_name_id(attr):
"""
Return the attribute name identifier
"""
return attr.value.id if isinstance(attr.value, ast.Name) else None | [
"def",
"get_attribute_name_id",
"(",
"attr",
")",
":",
"return",
"attr",
".",
"value",
".",
"id",
"if",
"isinstance",
"(",
"attr",
".",
"value",
",",
"ast",
".",
"Name",
")",
"else",
"None"
] | Return the attribute name identifier | [
"Return",
"the",
"attribute",
"name",
"identifier"
] | python | train |
bwesterb/mirte | src/mirteFile.py | https://github.com/bwesterb/mirte/blob/c58db8c993cd15ffdc64b52703cd466213913200/src/mirteFile.py#L95-L109 | def load_mirteFile(path, m, logger=None):
""" Loads the mirte-file at <path> into the manager <m>. """
l = logging.getLogger('load_mirteFile') if logger is None else logger
had = set()
for name, path, d in walk_mirteFiles(path, logger):
if os.path.realpath(path) in m.loaded_mirteFiles:
continue
identifier = name
if name in had:
identifier = path
else:
had.add(name)
l.info('loading %s' % identifier)
m.loaded_mirteFiles.add(os.path.realpath(path))
_load_mirteFile(d, m) | [
"def",
"load_mirteFile",
"(",
"path",
",",
"m",
",",
"logger",
"=",
"None",
")",
":",
"l",
"=",
"logging",
".",
"getLogger",
"(",
"'load_mirteFile'",
")",
"if",
"logger",
"is",
"None",
"else",
"logger",
"had",
"=",
"set",
"(",
")",
"for",
"name",
",",
"path",
",",
"d",
"in",
"walk_mirteFiles",
"(",
"path",
",",
"logger",
")",
":",
"if",
"os",
".",
"path",
".",
"realpath",
"(",
"path",
")",
"in",
"m",
".",
"loaded_mirteFiles",
":",
"continue",
"identifier",
"=",
"name",
"if",
"name",
"in",
"had",
":",
"identifier",
"=",
"path",
"else",
":",
"had",
".",
"add",
"(",
"name",
")",
"l",
".",
"info",
"(",
"'loading %s'",
"%",
"identifier",
")",
"m",
".",
"loaded_mirteFiles",
".",
"add",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"path",
")",
")",
"_load_mirteFile",
"(",
"d",
",",
"m",
")"
] | Loads the mirte-file at <path> into the manager <m>. | [
"Loads",
"the",
"mirte",
"-",
"file",
"at",
"<path",
">",
"into",
"the",
"manager",
"<m",
">",
"."
] | python | train |
OCR-D/core | ocrd_validators/ocrd_validators/page_validator.py | https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd_validators/ocrd_validators/page_validator.py#L100-L114 | def get_text(node, strategy):
"""
Get the most confident text results, either those with @index = 1 or the first text results or empty string.
"""
textEquivs = node.get_TextEquiv()
if not textEquivs:
log.debug("No text results on %s %s", node, node.id)
return ''
# elif strategy == 'index1':
else:
if len(textEquivs) > 1:
index1 = [x for x in textEquivs if x.index == 1]
if index1:
return index1[0].get_Unicode().strip()
return textEquivs[0].get_Unicode().strip() | [
"def",
"get_text",
"(",
"node",
",",
"strategy",
")",
":",
"textEquivs",
"=",
"node",
".",
"get_TextEquiv",
"(",
")",
"if",
"not",
"textEquivs",
":",
"log",
".",
"debug",
"(",
"\"No text results on %s %s\"",
",",
"node",
",",
"node",
".",
"id",
")",
"return",
"''",
"# elif strategy == 'index1':",
"else",
":",
"if",
"len",
"(",
"textEquivs",
")",
">",
"1",
":",
"index1",
"=",
"[",
"x",
"for",
"x",
"in",
"textEquivs",
"if",
"x",
".",
"index",
"==",
"1",
"]",
"if",
"index1",
":",
"return",
"index1",
"[",
"0",
"]",
".",
"get_Unicode",
"(",
")",
".",
"strip",
"(",
")",
"return",
"textEquivs",
"[",
"0",
"]",
".",
"get_Unicode",
"(",
")",
".",
"strip",
"(",
")"
] | Get the most confident text results, either those with @index = 1 or the first text results or empty string. | [
"Get",
"the",
"most",
"confident",
"text",
"results",
"either",
"those",
"with"
] | python | train |
HttpRunner/HttpRunner | httprunner/logger.py | https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/logger.py#L21-L46 | def setup_logger(log_level, log_file=None):
"""setup root logger with ColoredFormatter."""
level = getattr(logging, log_level.upper(), None)
if not level:
color_print("Invalid log level: %s" % log_level, "RED")
sys.exit(1)
# hide traceback when log level is INFO/WARNING/ERROR/CRITICAL
if level >= logging.INFO:
sys.tracebacklimit = 0
formatter = ColoredFormatter(
u"%(log_color)s%(bg_white)s%(levelname)-8s%(reset)s %(message)s",
datefmt=None,
reset=True,
log_colors=log_colors_config
)
if log_file:
handler = logging.FileHandler(log_file, encoding="utf-8")
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level) | [
"def",
"setup_logger",
"(",
"log_level",
",",
"log_file",
"=",
"None",
")",
":",
"level",
"=",
"getattr",
"(",
"logging",
",",
"log_level",
".",
"upper",
"(",
")",
",",
"None",
")",
"if",
"not",
"level",
":",
"color_print",
"(",
"\"Invalid log level: %s\"",
"%",
"log_level",
",",
"\"RED\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# hide traceback when log level is INFO/WARNING/ERROR/CRITICAL",
"if",
"level",
">=",
"logging",
".",
"INFO",
":",
"sys",
".",
"tracebacklimit",
"=",
"0",
"formatter",
"=",
"ColoredFormatter",
"(",
"u\"%(log_color)s%(bg_white)s%(levelname)-8s%(reset)s %(message)s\"",
",",
"datefmt",
"=",
"None",
",",
"reset",
"=",
"True",
",",
"log_colors",
"=",
"log_colors_config",
")",
"if",
"log_file",
":",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"log_file",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"else",
":",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"setLevel",
"(",
"level",
")"
] | setup root logger with ColoredFormatter. | [
"setup",
"root",
"logger",
"with",
"ColoredFormatter",
"."
] | python | train |
saltstack/salt | salt/modules/debian_ip.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/debian_ip.py#L1453-L1463 | def _read_temp(data):
'''
Return what would be written to disk
'''
tout = StringIO()
tout.write(data)
tout.seek(0)
output = tout.readlines()
tout.close()
return output | [
"def",
"_read_temp",
"(",
"data",
")",
":",
"tout",
"=",
"StringIO",
"(",
")",
"tout",
".",
"write",
"(",
"data",
")",
"tout",
".",
"seek",
"(",
"0",
")",
"output",
"=",
"tout",
".",
"readlines",
"(",
")",
"tout",
".",
"close",
"(",
")",
"return",
"output"
] | Return what would be written to disk | [
"Return",
"what",
"would",
"be",
"written",
"to",
"disk"
] | python | train |
weld-project/weld | python/numpy/weldnumpy/weldnumpy.py | https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/numpy/weldnumpy/weldnumpy.py#L44-L53 | def get_supported_binary_ops():
'''
Returns a dictionary of the Weld supported binary ops, with values being their Weld symbol.
'''
binary_ops = {}
binary_ops[np.add.__name__] = '+'
binary_ops[np.subtract.__name__] = '-'
binary_ops[np.multiply.__name__] = '*'
binary_ops[np.divide.__name__] = '/'
return binary_ops | [
"def",
"get_supported_binary_ops",
"(",
")",
":",
"binary_ops",
"=",
"{",
"}",
"binary_ops",
"[",
"np",
".",
"add",
".",
"__name__",
"]",
"=",
"'+'",
"binary_ops",
"[",
"np",
".",
"subtract",
".",
"__name__",
"]",
"=",
"'-'",
"binary_ops",
"[",
"np",
".",
"multiply",
".",
"__name__",
"]",
"=",
"'*'",
"binary_ops",
"[",
"np",
".",
"divide",
".",
"__name__",
"]",
"=",
"'/'",
"return",
"binary_ops"
] | Returns a dictionary of the Weld supported binary ops, with values being their Weld symbol. | [
"Returns",
"a",
"dictionary",
"of",
"the",
"Weld",
"supported",
"binary",
"ops",
"with",
"values",
"being",
"their",
"Weld",
"symbol",
"."
] | python | train |
ecederstrand/exchangelib | exchangelib/folders.py | https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/folders.py#L1594-L1602 | def folder_cls_from_folder_name(cls, folder_name, locale):
"""Returns the folder class that matches a localized folder name.
locale is a string, e.g. 'da_DK'
"""
for folder_cls in cls.WELLKNOWN_FOLDERS + NON_DELETEABLE_FOLDERS:
if folder_name.lower() in folder_cls.localized_names(locale):
return folder_cls
raise KeyError() | [
"def",
"folder_cls_from_folder_name",
"(",
"cls",
",",
"folder_name",
",",
"locale",
")",
":",
"for",
"folder_cls",
"in",
"cls",
".",
"WELLKNOWN_FOLDERS",
"+",
"NON_DELETEABLE_FOLDERS",
":",
"if",
"folder_name",
".",
"lower",
"(",
")",
"in",
"folder_cls",
".",
"localized_names",
"(",
"locale",
")",
":",
"return",
"folder_cls",
"raise",
"KeyError",
"(",
")"
] | Returns the folder class that matches a localized folder name.
locale is a string, e.g. 'da_DK' | [
"Returns",
"the",
"folder",
"class",
"that",
"matches",
"a",
"localized",
"folder",
"name",
"."
] | python | train |
noxdafox/pebble | pebble/common.py | https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/common.py#L177-L183 | def send_result(pipe, data):
"""Send result handling pickling and communication errors."""
try:
pipe.send(data)
except (pickle.PicklingError, TypeError) as error:
error.traceback = format_exc()
pipe.send(RemoteException(error, error.traceback)) | [
"def",
"send_result",
"(",
"pipe",
",",
"data",
")",
":",
"try",
":",
"pipe",
".",
"send",
"(",
"data",
")",
"except",
"(",
"pickle",
".",
"PicklingError",
",",
"TypeError",
")",
"as",
"error",
":",
"error",
".",
"traceback",
"=",
"format_exc",
"(",
")",
"pipe",
".",
"send",
"(",
"RemoteException",
"(",
"error",
",",
"error",
".",
"traceback",
")",
")"
] | Send result handling pickling and communication errors. | [
"Send",
"result",
"handling",
"pickling",
"and",
"communication",
"errors",
"."
] | python | train |
codenerix/django-codenerix | codenerix/templatetags/codenerix_common.py | https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/templatetags/codenerix_common.py#L241-L262 | def objectatrib(instance, atrib):
'''
this filter is going to be useful to execute an object method or get an
object attribute dynamically. this method is going to take into account
the atrib param can contains underscores
'''
atrib = atrib.replace("__", ".")
atribs = []
atribs = atrib.split(".")
obj = instance
for atrib in atribs:
if type(obj) == dict:
result = obj[atrib]
else:
try:
result = getattr(obj, atrib)()
except Exception:
result = getattr(obj, atrib)
obj = result
return result | [
"def",
"objectatrib",
"(",
"instance",
",",
"atrib",
")",
":",
"atrib",
"=",
"atrib",
".",
"replace",
"(",
"\"__\"",
",",
"\".\"",
")",
"atribs",
"=",
"[",
"]",
"atribs",
"=",
"atrib",
".",
"split",
"(",
"\".\"",
")",
"obj",
"=",
"instance",
"for",
"atrib",
"in",
"atribs",
":",
"if",
"type",
"(",
"obj",
")",
"==",
"dict",
":",
"result",
"=",
"obj",
"[",
"atrib",
"]",
"else",
":",
"try",
":",
"result",
"=",
"getattr",
"(",
"obj",
",",
"atrib",
")",
"(",
")",
"except",
"Exception",
":",
"result",
"=",
"getattr",
"(",
"obj",
",",
"atrib",
")",
"obj",
"=",
"result",
"return",
"result"
] | this filter is going to be useful to execute an object method or get an
object attribute dynamically. this method is going to take into account
the atrib param can contains underscores | [
"this",
"filter",
"is",
"going",
"to",
"be",
"useful",
"to",
"execute",
"an",
"object",
"method",
"or",
"get",
"an",
"object",
"attribute",
"dynamically",
".",
"this",
"method",
"is",
"going",
"to",
"take",
"into",
"account",
"the",
"atrib",
"param",
"can",
"contains",
"underscores"
] | python | train |
projectshift/shift-schema | shiftschema/validators/email.py | https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/validators/email.py#L26-L43 | def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation and return result
:param value: value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
"""
regex = self.regex()
match = regex.match(value)
if not match:
return Error(self.not_email)
# success otherwise
return Error() | [
"def",
"validate",
"(",
"self",
",",
"value",
",",
"model",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"regex",
"=",
"self",
".",
"regex",
"(",
")",
"match",
"=",
"regex",
".",
"match",
"(",
"value",
")",
"if",
"not",
"match",
":",
"return",
"Error",
"(",
"self",
".",
"not_email",
")",
"# success otherwise",
"return",
"Error",
"(",
")"
] | Validate
Perform value validation and return result
:param value: value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult | [
"Validate",
"Perform",
"value",
"validation",
"and",
"return",
"result"
] | python | train |
cggh/scikit-allel | allel/model/ndarray.py | https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2510-L2520 | def distinct_counts(self):
"""Return counts for each distinct haplotype."""
# hash the haplotypes
k = [hash(self.values[:, i].tobytes()) for i in range(self.shape[1])]
# count and sort
# noinspection PyArgumentList
counts = sorted(collections.Counter(k).values(), reverse=True)
return np.asarray(counts) | [
"def",
"distinct_counts",
"(",
"self",
")",
":",
"# hash the haplotypes",
"k",
"=",
"[",
"hash",
"(",
"self",
".",
"values",
"[",
":",
",",
"i",
"]",
".",
"tobytes",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"# count and sort",
"# noinspection PyArgumentList",
"counts",
"=",
"sorted",
"(",
"collections",
".",
"Counter",
"(",
"k",
")",
".",
"values",
"(",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"np",
".",
"asarray",
"(",
"counts",
")"
] | Return counts for each distinct haplotype. | [
"Return",
"counts",
"for",
"each",
"distinct",
"haplotype",
"."
] | python | train |
gwpy/gwpy | gwpy/timeseries/timeseries.py | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L968-L1003 | def zpk(self, zeros, poles, gain, analog=True, **kwargs):
"""Filter this `TimeSeries` by applying a zero-pole-gain filter
Parameters
----------
zeros : `array-like`
list of zero frequencies (in Hertz)
poles : `array-like`
list of pole frequencies (in Hertz)
gain : `float`
DC gain of filter
analog : `bool`, optional
type of ZPK being applied, if `analog=True` all parameters
will be converted in the Z-domain for digital filtering
Returns
-------
timeseries : `TimeSeries`
the filtered version of the input data
See Also
--------
TimeSeries.filter
for details on how a digital ZPK-format filter is applied
Examples
--------
To apply a zpk filter with file poles at 100 Hz, and five zeros at
1 Hz (giving an overall DC gain of 1e-10)::
>>> data2 = data.zpk([100]*5, [1]*5, 1e-10)
"""
return self.filter(zeros, poles, gain, analog=analog, **kwargs) | [
"def",
"zpk",
"(",
"self",
",",
"zeros",
",",
"poles",
",",
"gain",
",",
"analog",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"filter",
"(",
"zeros",
",",
"poles",
",",
"gain",
",",
"analog",
"=",
"analog",
",",
"*",
"*",
"kwargs",
")"
] | Filter this `TimeSeries` by applying a zero-pole-gain filter
Parameters
----------
zeros : `array-like`
list of zero frequencies (in Hertz)
poles : `array-like`
list of pole frequencies (in Hertz)
gain : `float`
DC gain of filter
analog : `bool`, optional
type of ZPK being applied, if `analog=True` all parameters
will be converted in the Z-domain for digital filtering
Returns
-------
timeseries : `TimeSeries`
the filtered version of the input data
See Also
--------
TimeSeries.filter
for details on how a digital ZPK-format filter is applied
Examples
--------
To apply a zpk filter with file poles at 100 Hz, and five zeros at
1 Hz (giving an overall DC gain of 1e-10)::
>>> data2 = data.zpk([100]*5, [1]*5, 1e-10) | [
"Filter",
"this",
"TimeSeries",
"by",
"applying",
"a",
"zero",
"-",
"pole",
"-",
"gain",
"filter"
] | python | train |
portfors-lab/sparkle | sparkle/gui/stim/stim_detail.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stim_detail.py#L24-L36 | def setDoc(self, doc):
"""Presents the documentation
:param doc: documentation for StimulusModel. i.e. returned from
:meth:`componentDoc<sparkle.stim.stimulus_model.StimulusModel.componentDoc>`
or :meth:`templateDoc<sparkle.stim.stimulus_model.StimulusModel.templateDoc>`
"""
self.ui.overAtten.setNum(doc['overloaded_attenuation'])
# also set composite stim type
# self.ui.traceType.setText(doc['testtype'])
self.ui.componentDetails.clearDoc()
self.ui.componentDetails.setDoc(doc['components']) | [
"def",
"setDoc",
"(",
"self",
",",
"doc",
")",
":",
"self",
".",
"ui",
".",
"overAtten",
".",
"setNum",
"(",
"doc",
"[",
"'overloaded_attenuation'",
"]",
")",
"# also set composite stim type",
"# self.ui.traceType.setText(doc['testtype'])",
"self",
".",
"ui",
".",
"componentDetails",
".",
"clearDoc",
"(",
")",
"self",
".",
"ui",
".",
"componentDetails",
".",
"setDoc",
"(",
"doc",
"[",
"'components'",
"]",
")"
] | Presents the documentation
:param doc: documentation for StimulusModel. i.e. returned from
:meth:`componentDoc<sparkle.stim.stimulus_model.StimulusModel.componentDoc>`
or :meth:`templateDoc<sparkle.stim.stimulus_model.StimulusModel.templateDoc>` | [
"Presents",
"the",
"documentation"
] | python | train |
nvdv/vprof | vprof/flame_graph.py | https://github.com/nvdv/vprof/blob/4c3ff78f8920ab10cb9c00b14143452aa09ff6bb/vprof/flame_graph.py#L75-L79 | def _fill_sample_count(self, node):
"""Counts and fills sample counts inside call tree."""
node['sampleCount'] += sum(
self._fill_sample_count(child) for child in node['children'])
return node['sampleCount'] | [
"def",
"_fill_sample_count",
"(",
"self",
",",
"node",
")",
":",
"node",
"[",
"'sampleCount'",
"]",
"+=",
"sum",
"(",
"self",
".",
"_fill_sample_count",
"(",
"child",
")",
"for",
"child",
"in",
"node",
"[",
"'children'",
"]",
")",
"return",
"node",
"[",
"'sampleCount'",
"]"
] | Counts and fills sample counts inside call tree. | [
"Counts",
"and",
"fills",
"sample",
"counts",
"inside",
"call",
"tree",
"."
] | python | test |
glottobank/python-newick | src/newick.py | https://github.com/glottobank/python-newick/blob/e8d4d1e4610f271d0f0e5cb86c0e0360b43bd702/src/newick.py#L360-L371 | def loads(s, strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted string.
:param s: Newick formatted string.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return [parse_node(ss.strip(), **kw) for ss in s.split(';') if ss.strip()] | [
"def",
"loads",
"(",
"s",
",",
"strip_comments",
"=",
"False",
",",
"*",
"*",
"kw",
")",
":",
"kw",
"[",
"'strip_comments'",
"]",
"=",
"strip_comments",
"return",
"[",
"parse_node",
"(",
"ss",
".",
"strip",
"(",
")",
",",
"*",
"*",
"kw",
")",
"for",
"ss",
"in",
"s",
".",
"split",
"(",
"';'",
")",
"if",
"ss",
".",
"strip",
"(",
")",
"]"
] | Load a list of trees from a Newick formatted string.
:param s: Newick formatted string.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects. | [
"Load",
"a",
"list",
"of",
"trees",
"from",
"a",
"Newick",
"formatted",
"string",
"."
] | python | test |
swistakm/graceful | src/graceful/authentication.py | https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/authentication.py#L288-L314 | def process_resource(self, req, resp, resource, uri_kwargs=None):
"""Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None``
"""
if 'user' in req.context:
return
identifier = self.identify(req, resp, resource, uri_kwargs)
user = self.try_storage(identifier, req, resp, resource, uri_kwargs)
if user is not None:
req.context['user'] = user
# if did not succeed then we need to add this to list of available
# challenges.
elif self.challenge is not None:
req.context.setdefault(
'challenges', list()
).append(self.challenge) | [
"def",
"process_resource",
"(",
"self",
",",
"req",
",",
"resp",
",",
"resource",
",",
"uri_kwargs",
"=",
"None",
")",
":",
"if",
"'user'",
"in",
"req",
".",
"context",
":",
"return",
"identifier",
"=",
"self",
".",
"identify",
"(",
"req",
",",
"resp",
",",
"resource",
",",
"uri_kwargs",
")",
"user",
"=",
"self",
".",
"try_storage",
"(",
"identifier",
",",
"req",
",",
"resp",
",",
"resource",
",",
"uri_kwargs",
")",
"if",
"user",
"is",
"not",
"None",
":",
"req",
".",
"context",
"[",
"'user'",
"]",
"=",
"user",
"# if did not succeed then we need to add this to list of available",
"# challenges.",
"elif",
"self",
".",
"challenge",
"is",
"not",
"None",
":",
"req",
".",
"context",
".",
"setdefault",
"(",
"'challenges'",
",",
"list",
"(",
")",
")",
".",
"append",
"(",
"self",
".",
"challenge",
")"
] | Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None`` | [
"Process",
"resource",
"after",
"routing",
"to",
"it",
"."
] | python | train |
qiniu/python-sdk | qiniu/services/compute/qcos_api.py | https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/services/compute/qcos_api.py#L128-L142 | def start_stack(self, stack):
"""启动服务组
启动服务组中的所有停止状态的服务。
Args:
- stack: 服务所属的服务组名称
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/start'.format(self.host, stack)
return self.__post(url) | [
"def",
"start_stack",
"(",
"self",
",",
"stack",
")",
":",
"url",
"=",
"'{0}/v3/stacks/{1}/start'",
".",
"format",
"(",
"self",
".",
"host",
",",
"stack",
")",
"return",
"self",
".",
"__post",
"(",
"url",
")"
] | 启动服务组
启动服务组中的所有停止状态的服务。
Args:
- stack: 服务所属的服务组名称
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 | [
"启动服务组"
] | python | train |
mrstephenneal/pdfconduit | pdf/gui/config/images.py | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/gui/config/images.py#L25-L29 | def remove(image):
"""Remove an image to the GUI img library."""
path = os.path.join(IMG_DIR, image)
if os.path.isfile(path):
os.remove(path) | [
"def",
"remove",
"(",
"image",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"IMG_DIR",
",",
"image",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"os",
".",
"remove",
"(",
"path",
")"
] | Remove an image to the GUI img library. | [
"Remove",
"an",
"image",
"to",
"the",
"GUI",
"img",
"library",
"."
] | python | train |
sveetch/boussole | boussole/watcher.py | https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/watcher.py#L236-L252 | def on_deleted(self, event):
"""
Called when a file or directory is deleted.
Todo:
May be bugged with inspector and sass compiler since the does not
exists anymore.
Args:
event: Watchdog event, ``watchdog.events.DirDeletedEvent`` or
``watchdog.events.FileDeletedEvent``.
"""
if not self._event_error:
self.logger.info(u"Change detected from deletion of: %s",
event.src_path)
# Never try to compile the deleted source
self.compile_dependencies(event.src_path, include_self=False) | [
"def",
"on_deleted",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"self",
".",
"_event_error",
":",
"self",
".",
"logger",
".",
"info",
"(",
"u\"Change detected from deletion of: %s\"",
",",
"event",
".",
"src_path",
")",
"# Never try to compile the deleted source",
"self",
".",
"compile_dependencies",
"(",
"event",
".",
"src_path",
",",
"include_self",
"=",
"False",
")"
] | Called when a file or directory is deleted.
Todo:
May be bugged with inspector and sass compiler since the does not
exists anymore.
Args:
event: Watchdog event, ``watchdog.events.DirDeletedEvent`` or
``watchdog.events.FileDeletedEvent``. | [
"Called",
"when",
"a",
"file",
"or",
"directory",
"is",
"deleted",
"."
] | python | train |
silver-castle/mach9 | mach9/static.py | https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/static.py#L19-L104 | def register(app, uri, file_or_directory, pattern,
use_modified_since, use_content_range):
# TODO: Though mach9 is not a file server, I feel like we should at least
# make a good effort here. Modified-since is nice, but we could
# also look into etags, expires, and caching
"""
Register a static directory handler with Mach9 by adding a route to the
router and registering a handler.
:param app: Mach9
:param file_or_directory: File or directory path to serve from
:param uri: URL to serve from
:param pattern: regular expression used to match files in the URL
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the
server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
"""
# If we're not trying to match a file directly,
# serve from the folder
if not path.isfile(file_or_directory):
uri += '<file_uri:' + pattern + '>'
async def _handler(request, file_uri=None):
# Using this to determine if the URL is trying to break out of the path
# served. os.path.realpath seems to be very slow
if file_uri and '../' in file_uri:
raise InvalidUsage("Invalid URL")
# Merge served directory and requested file if provided
# Strip all / that in the beginning of the URL to help prevent python
# from herping a derp and treating the uri as an absolute path
root_path = file_path = file_or_directory
if file_uri:
file_path = path.join(
file_or_directory, sub('^[/]*', '', file_uri))
# URL decode the path sent by the browser otherwise we won't be able to
# match filenames which got encoded (filenames with spaces etc)
file_path = path.abspath(unquote(file_path))
if not file_path.startswith(path.abspath(unquote(root_path))):
raise FileNotFound('File not found',
path=file_or_directory,
relative_url=file_uri)
try:
headers = {}
# Check if the client has been sent this file before
# and it has not been modified since
stats = None
if use_modified_since:
stats = await stat(file_path)
modified_since = strftime(
'%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime))
if request.headers.get('If-Modified-Since') == modified_since:
return HTTPResponse(status=304)
headers['Last-Modified'] = modified_since
_range = None
if use_content_range:
_range = None
if not stats:
stats = await stat(file_path)
headers['Accept-Ranges'] = 'bytes'
headers['Content-Length'] = str(stats.st_size)
if request.method != 'HEAD':
try:
_range = ContentRangeHandler(request, stats)
except HeaderNotFound:
pass
else:
del headers['Content-Length']
for key, value in _range.headers.items():
headers[key] = value
if request.method == 'HEAD':
return HTTPResponse(
headers=headers,
content_type=guess_type(file_path)[0] or 'text/plain')
else:
return await file(file_path, headers=headers, _range=_range)
except ContentRangeError:
raise
except Exception:
raise FileNotFound('File not found',
path=file_or_directory,
relative_url=file_uri)
app.route(uri, methods=['GET', 'HEAD'])(_handler) | [
"def",
"register",
"(",
"app",
",",
"uri",
",",
"file_or_directory",
",",
"pattern",
",",
"use_modified_since",
",",
"use_content_range",
")",
":",
"# TODO: Though mach9 is not a file server, I feel like we should at least",
"# make a good effort here. Modified-since is nice, but we could",
"# also look into etags, expires, and caching",
"# If we're not trying to match a file directly,",
"# serve from the folder",
"if",
"not",
"path",
".",
"isfile",
"(",
"file_or_directory",
")",
":",
"uri",
"+=",
"'<file_uri:'",
"+",
"pattern",
"+",
"'>'",
"async",
"def",
"_handler",
"(",
"request",
",",
"file_uri",
"=",
"None",
")",
":",
"# Using this to determine if the URL is trying to break out of the path",
"# served. os.path.realpath seems to be very slow",
"if",
"file_uri",
"and",
"'../'",
"in",
"file_uri",
":",
"raise",
"InvalidUsage",
"(",
"\"Invalid URL\"",
")",
"# Merge served directory and requested file if provided",
"# Strip all / that in the beginning of the URL to help prevent python",
"# from herping a derp and treating the uri as an absolute path",
"root_path",
"=",
"file_path",
"=",
"file_or_directory",
"if",
"file_uri",
":",
"file_path",
"=",
"path",
".",
"join",
"(",
"file_or_directory",
",",
"sub",
"(",
"'^[/]*'",
",",
"''",
",",
"file_uri",
")",
")",
"# URL decode the path sent by the browser otherwise we won't be able to",
"# match filenames which got encoded (filenames with spaces etc)",
"file_path",
"=",
"path",
".",
"abspath",
"(",
"unquote",
"(",
"file_path",
")",
")",
"if",
"not",
"file_path",
".",
"startswith",
"(",
"path",
".",
"abspath",
"(",
"unquote",
"(",
"root_path",
")",
")",
")",
":",
"raise",
"FileNotFound",
"(",
"'File not found'",
",",
"path",
"=",
"file_or_directory",
",",
"relative_url",
"=",
"file_uri",
")",
"try",
":",
"headers",
"=",
"{",
"}",
"# Check if the client has been sent this file before",
"# and it has not been modified since",
"stats",
"=",
"None",
"if",
"use_modified_since",
":",
"stats",
"=",
"await",
"stat",
"(",
"file_path",
")",
"modified_since",
"=",
"strftime",
"(",
"'%a, %d %b %Y %H:%M:%S GMT'",
",",
"gmtime",
"(",
"stats",
".",
"st_mtime",
")",
")",
"if",
"request",
".",
"headers",
".",
"get",
"(",
"'If-Modified-Since'",
")",
"==",
"modified_since",
":",
"return",
"HTTPResponse",
"(",
"status",
"=",
"304",
")",
"headers",
"[",
"'Last-Modified'",
"]",
"=",
"modified_since",
"_range",
"=",
"None",
"if",
"use_content_range",
":",
"_range",
"=",
"None",
"if",
"not",
"stats",
":",
"stats",
"=",
"await",
"stat",
"(",
"file_path",
")",
"headers",
"[",
"'Accept-Ranges'",
"]",
"=",
"'bytes'",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"str",
"(",
"stats",
".",
"st_size",
")",
"if",
"request",
".",
"method",
"!=",
"'HEAD'",
":",
"try",
":",
"_range",
"=",
"ContentRangeHandler",
"(",
"request",
",",
"stats",
")",
"except",
"HeaderNotFound",
":",
"pass",
"else",
":",
"del",
"headers",
"[",
"'Content-Length'",
"]",
"for",
"key",
",",
"value",
"in",
"_range",
".",
"headers",
".",
"items",
"(",
")",
":",
"headers",
"[",
"key",
"]",
"=",
"value",
"if",
"request",
".",
"method",
"==",
"'HEAD'",
":",
"return",
"HTTPResponse",
"(",
"headers",
"=",
"headers",
",",
"content_type",
"=",
"guess_type",
"(",
"file_path",
")",
"[",
"0",
"]",
"or",
"'text/plain'",
")",
"else",
":",
"return",
"await",
"file",
"(",
"file_path",
",",
"headers",
"=",
"headers",
",",
"_range",
"=",
"_range",
")",
"except",
"ContentRangeError",
":",
"raise",
"except",
"Exception",
":",
"raise",
"FileNotFound",
"(",
"'File not found'",
",",
"path",
"=",
"file_or_directory",
",",
"relative_url",
"=",
"file_uri",
")",
"app",
".",
"route",
"(",
"uri",
",",
"methods",
"=",
"[",
"'GET'",
",",
"'HEAD'",
"]",
")",
"(",
"_handler",
")"
] | Register a static directory handler with Mach9 by adding a route to the
router and registering a handler.
:param app: Mach9
:param file_or_directory: File or directory path to serve from
:param uri: URL to serve from
:param pattern: regular expression used to match files in the URL
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the
server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested | [
"Register",
"a",
"static",
"directory",
"handler",
"with",
"Mach9",
"by",
"adding",
"a",
"route",
"to",
"the",
"router",
"and",
"registering",
"a",
"handler",
"."
] | python | train |
radical-cybertools/radical.entk | setup.py | https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/setup.py#L155-L174 | def visit((prefix, strip, found), dirname, names):
""" Visit directory, create distutil tuple
Add distutil tuple for each directory using this format:
(destination, [dirname/file1, dirname/file2, ...])
distutil will copy later file1, file2, ... info destination.
"""
files = []
# Iterate over a copy of names, modify names
for name in names[:]:
path = os.path.join(dirname, name)
# Ignore directories - we will visit later
if os.path.isdir(path):
# Remove directories we don't want to visit later
if isbad(name):
names.remove(name)
continue
elif isgood(name):
files.append(path)
destination = os.path.join(prefix, dirname[strip:])
found.append((destination, files)) | [
"def",
"visit",
"(",
"(",
"prefix",
",",
"strip",
",",
"found",
")",
",",
"dirname",
",",
"names",
")",
":",
"files",
"=",
"[",
"]",
"# Iterate over a copy of names, modify names",
"for",
"name",
"in",
"names",
"[",
":",
"]",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"name",
")",
"# Ignore directories - we will visit later",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"# Remove directories we don't want to visit later",
"if",
"isbad",
"(",
"name",
")",
":",
"names",
".",
"remove",
"(",
"name",
")",
"continue",
"elif",
"isgood",
"(",
"name",
")",
":",
"files",
".",
"append",
"(",
"path",
")",
"destination",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"dirname",
"[",
"strip",
":",
"]",
")",
"found",
".",
"append",
"(",
"(",
"destination",
",",
"files",
")",
")"
] | Visit directory, create distutil tuple
Add distutil tuple for each directory using this format:
(destination, [dirname/file1, dirname/file2, ...])
distutil will copy later file1, file2, ... info destination. | [
"Visit",
"directory",
"create",
"distutil",
"tuple",
"Add",
"distutil",
"tuple",
"for",
"each",
"directory",
"using",
"this",
"format",
":",
"(",
"destination",
"[",
"dirname",
"/",
"file1",
"dirname",
"/",
"file2",
"...",
"]",
")",
"distutil",
"will",
"copy",
"later",
"file1",
"file2",
"...",
"info",
"destination",
"."
] | python | train |
scott-griffiths/bitstring | bitstring.py | https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L1784-L1802 | def _readsie(self, pos):
"""Return interpretation of next bits as a signed interleaved exponential-Golomb code.
Advances position to after the read code.
Raises ReadError if the end of the bitstring is encountered while
reading the code.
"""
codenum, pos = self._readuie(pos)
if not codenum:
return 0, pos
try:
if self[pos]:
return -codenum, pos + 1
else:
return codenum, pos + 1
except IndexError:
raise ReadError("Read off end of bitstring trying to read code.") | [
"def",
"_readsie",
"(",
"self",
",",
"pos",
")",
":",
"codenum",
",",
"pos",
"=",
"self",
".",
"_readuie",
"(",
"pos",
")",
"if",
"not",
"codenum",
":",
"return",
"0",
",",
"pos",
"try",
":",
"if",
"self",
"[",
"pos",
"]",
":",
"return",
"-",
"codenum",
",",
"pos",
"+",
"1",
"else",
":",
"return",
"codenum",
",",
"pos",
"+",
"1",
"except",
"IndexError",
":",
"raise",
"ReadError",
"(",
"\"Read off end of bitstring trying to read code.\"",
")"
] | Return interpretation of next bits as a signed interleaved exponential-Golomb code.
Advances position to after the read code.
Raises ReadError if the end of the bitstring is encountered while
reading the code. | [
"Return",
"interpretation",
"of",
"next",
"bits",
"as",
"a",
"signed",
"interleaved",
"exponential",
"-",
"Golomb",
"code",
"."
] | python | train |
blazelibs/blazeutils | blazeutils/rst.py | https://github.com/blazelibs/blazeutils/blob/c94476325146007553cbddeeb9ef83394756babf/blazeutils/rst.py#L182-L190 | def rst2html(rst_src, **kwargs):
"""
Convert a reStructuredText string into a unicode HTML fragment.
For `kwargs`, see `default_rst_opts` and
http://docutils.sourceforge.net/docs/user/config.html
"""
pub = rst2pub(rst_src, settings_overrides=kwargs, writer_name='html')
return pub.writer.parts['body'] | [
"def",
"rst2html",
"(",
"rst_src",
",",
"*",
"*",
"kwargs",
")",
":",
"pub",
"=",
"rst2pub",
"(",
"rst_src",
",",
"settings_overrides",
"=",
"kwargs",
",",
"writer_name",
"=",
"'html'",
")",
"return",
"pub",
".",
"writer",
".",
"parts",
"[",
"'body'",
"]"
] | Convert a reStructuredText string into a unicode HTML fragment.
For `kwargs`, see `default_rst_opts` and
http://docutils.sourceforge.net/docs/user/config.html | [
"Convert",
"a",
"reStructuredText",
"string",
"into",
"a",
"unicode",
"HTML",
"fragment",
"."
] | python | train |
mlperf/training | object_detection/pytorch/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py#L189-L302 | def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
} | [
"def",
"evaluate_box_proposals",
"(",
"predictions",
",",
"dataset",
",",
"thresholds",
"=",
"None",
",",
"area",
"=",
"\"all\"",
",",
"limit",
"=",
"None",
")",
":",
"# Record max overlap value for each gt box",
"# Return vector of overlap values",
"areas",
"=",
"{",
"\"all\"",
":",
"0",
",",
"\"small\"",
":",
"1",
",",
"\"medium\"",
":",
"2",
",",
"\"large\"",
":",
"3",
",",
"\"96-128\"",
":",
"4",
",",
"\"128-256\"",
":",
"5",
",",
"\"256-512\"",
":",
"6",
",",
"\"512-inf\"",
":",
"7",
",",
"}",
"area_ranges",
"=",
"[",
"[",
"0",
"**",
"2",
",",
"1e5",
"**",
"2",
"]",
",",
"# all",
"[",
"0",
"**",
"2",
",",
"32",
"**",
"2",
"]",
",",
"# small",
"[",
"32",
"**",
"2",
",",
"96",
"**",
"2",
"]",
",",
"# medium",
"[",
"96",
"**",
"2",
",",
"1e5",
"**",
"2",
"]",
",",
"# large",
"[",
"96",
"**",
"2",
",",
"128",
"**",
"2",
"]",
",",
"# 96-128",
"[",
"128",
"**",
"2",
",",
"256",
"**",
"2",
"]",
",",
"# 128-256",
"[",
"256",
"**",
"2",
",",
"512",
"**",
"2",
"]",
",",
"# 256-512",
"[",
"512",
"**",
"2",
",",
"1e5",
"**",
"2",
"]",
",",
"]",
"# 512-inf",
"assert",
"area",
"in",
"areas",
",",
"\"Unknown area range: {}\"",
".",
"format",
"(",
"area",
")",
"area_range",
"=",
"area_ranges",
"[",
"areas",
"[",
"area",
"]",
"]",
"gt_overlaps",
"=",
"[",
"]",
"num_pos",
"=",
"0",
"for",
"image_id",
",",
"prediction",
"in",
"enumerate",
"(",
"predictions",
")",
":",
"original_id",
"=",
"dataset",
".",
"id_to_img_map",
"[",
"image_id",
"]",
"img_info",
"=",
"dataset",
".",
"get_img_info",
"(",
"image_id",
")",
"image_width",
"=",
"img_info",
"[",
"\"width\"",
"]",
"image_height",
"=",
"img_info",
"[",
"\"height\"",
"]",
"prediction",
"=",
"prediction",
".",
"resize",
"(",
"(",
"image_width",
",",
"image_height",
")",
")",
"# sort predictions in descending order",
"# TODO maybe remove this and make it explicit in the documentation",
"inds",
"=",
"prediction",
".",
"get_field",
"(",
"\"objectness\"",
")",
".",
"sort",
"(",
"descending",
"=",
"True",
")",
"[",
"1",
"]",
"prediction",
"=",
"prediction",
"[",
"inds",
"]",
"ann_ids",
"=",
"dataset",
".",
"coco",
".",
"getAnnIds",
"(",
"imgIds",
"=",
"original_id",
")",
"anno",
"=",
"dataset",
".",
"coco",
".",
"loadAnns",
"(",
"ann_ids",
")",
"gt_boxes",
"=",
"[",
"obj",
"[",
"\"bbox\"",
"]",
"for",
"obj",
"in",
"anno",
"if",
"obj",
"[",
"\"iscrowd\"",
"]",
"==",
"0",
"]",
"gt_boxes",
"=",
"torch",
".",
"as_tensor",
"(",
"gt_boxes",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"4",
")",
"# guard against no boxes",
"gt_boxes",
"=",
"BoxList",
"(",
"gt_boxes",
",",
"(",
"image_width",
",",
"image_height",
")",
",",
"mode",
"=",
"\"xywh\"",
")",
".",
"convert",
"(",
"\"xyxy\"",
")",
"gt_areas",
"=",
"torch",
".",
"as_tensor",
"(",
"[",
"obj",
"[",
"\"area\"",
"]",
"for",
"obj",
"in",
"anno",
"if",
"obj",
"[",
"\"iscrowd\"",
"]",
"==",
"0",
"]",
")",
"if",
"len",
"(",
"gt_boxes",
")",
"==",
"0",
":",
"continue",
"valid_gt_inds",
"=",
"(",
"gt_areas",
">=",
"area_range",
"[",
"0",
"]",
")",
"&",
"(",
"gt_areas",
"<=",
"area_range",
"[",
"1",
"]",
")",
"gt_boxes",
"=",
"gt_boxes",
"[",
"valid_gt_inds",
"]",
"num_pos",
"+=",
"len",
"(",
"gt_boxes",
")",
"if",
"len",
"(",
"gt_boxes",
")",
"==",
"0",
":",
"continue",
"if",
"len",
"(",
"prediction",
")",
"==",
"0",
":",
"continue",
"if",
"limit",
"is",
"not",
"None",
"and",
"len",
"(",
"prediction",
")",
">",
"limit",
":",
"prediction",
"=",
"prediction",
"[",
":",
"limit",
"]",
"overlaps",
"=",
"boxlist_iou",
"(",
"prediction",
",",
"gt_boxes",
")",
"_gt_overlaps",
"=",
"torch",
".",
"zeros",
"(",
"len",
"(",
"gt_boxes",
")",
")",
"for",
"j",
"in",
"range",
"(",
"min",
"(",
"len",
"(",
"prediction",
")",
",",
"len",
"(",
"gt_boxes",
")",
")",
")",
":",
"# find which proposal box maximally covers each gt box",
"# and get the iou amount of coverage for each gt box",
"max_overlaps",
",",
"argmax_overlaps",
"=",
"overlaps",
".",
"max",
"(",
"dim",
"=",
"0",
")",
"# find which gt box is 'best' covered (i.e. 'best' = most iou)",
"gt_ovr",
",",
"gt_ind",
"=",
"max_overlaps",
".",
"max",
"(",
"dim",
"=",
"0",
")",
"assert",
"gt_ovr",
">=",
"0",
"# find the proposal box that covers the best covered gt box",
"box_ind",
"=",
"argmax_overlaps",
"[",
"gt_ind",
"]",
"# record the iou coverage of this gt box",
"_gt_overlaps",
"[",
"j",
"]",
"=",
"overlaps",
"[",
"box_ind",
",",
"gt_ind",
"]",
"assert",
"_gt_overlaps",
"[",
"j",
"]",
"==",
"gt_ovr",
"# mark the proposal box and the gt box as used",
"overlaps",
"[",
"box_ind",
",",
":",
"]",
"=",
"-",
"1",
"overlaps",
"[",
":",
",",
"gt_ind",
"]",
"=",
"-",
"1",
"# append recorded iou coverage level",
"gt_overlaps",
".",
"append",
"(",
"_gt_overlaps",
")",
"gt_overlaps",
"=",
"torch",
".",
"cat",
"(",
"gt_overlaps",
",",
"dim",
"=",
"0",
")",
"gt_overlaps",
",",
"_",
"=",
"torch",
".",
"sort",
"(",
"gt_overlaps",
")",
"if",
"thresholds",
"is",
"None",
":",
"step",
"=",
"0.05",
"thresholds",
"=",
"torch",
".",
"arange",
"(",
"0.5",
",",
"0.95",
"+",
"1e-5",
",",
"step",
",",
"dtype",
"=",
"torch",
".",
"float32",
")",
"recalls",
"=",
"torch",
".",
"zeros_like",
"(",
"thresholds",
")",
"# compute recall for each iou threshold",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"thresholds",
")",
":",
"recalls",
"[",
"i",
"]",
"=",
"(",
"gt_overlaps",
">=",
"t",
")",
".",
"float",
"(",
")",
".",
"sum",
"(",
")",
"/",
"float",
"(",
"num_pos",
")",
"# ar = 2 * np.trapz(recalls, thresholds)",
"ar",
"=",
"recalls",
".",
"mean",
"(",
")",
"return",
"{",
"\"ar\"",
":",
"ar",
",",
"\"recalls\"",
":",
"recalls",
",",
"\"thresholds\"",
":",
"thresholds",
",",
"\"gt_overlaps\"",
":",
"gt_overlaps",
",",
"\"num_pos\"",
":",
"num_pos",
",",
"}"
] | Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results. | [
"Evaluate",
"detection",
"proposal",
"recall",
"metrics",
".",
"This",
"function",
"is",
"a",
"much",
"faster",
"alternative",
"to",
"the",
"official",
"COCO",
"API",
"recall",
"evaluation",
"code",
".",
"However",
"it",
"produces",
"slightly",
"different",
"results",
"."
] | python | train |
totalgood/nlpia | src/nlpia/loaders.py | https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L820-L858 | def download_unzip(names=None, normalize_filenames=False, verbose=True):
r""" Download CSV or HTML tables listed in `names`, unzip and to DATA_PATH/`names`.csv .txt etc
TODO: move to web or data_utils or futils
Also normalizes file name extensions (.bin.gz -> .w2v.bin.gz).
Uses table in data_info.csv (internal DATA_INFO) to determine URL or file path from dataset name.
Also looks
If names or [names] is a valid URL then download it and create a name
from the url in BIG_URLS (not yet pushed to data_info.csv)
"""
names = [names] if isinstance(names, (str, basestring)) else names
# names = names or list(BIG_URLS.keys()) # download them all, if none specified!
file_paths = {}
for name in names:
created = create_big_url(name)
name = (created or name).lower().strip()
if name in BIG_URLS:
filepath = download_name(name, verbose=verbose)
if not filepath:
continue
file_paths[name] = normalize_ext_rename(filepath)
logger.debug('downloaded name={} to filepath={}'.format(name, file_paths[name]))
fplower = file_paths[name].lower()
if fplower.endswith('.tar.gz'):
logger.info('Extracting {}'.format(file_paths[name]))
file_paths[name] = untar(file_paths[name], verbose=verbose)
logger.debug('download_untar.filepaths=' + str(file_paths))
elif file_paths[name].lower().endswith('.zip'):
file_paths[name] = unzip(file_paths[name], verbose=verbose)
logger.debug('download_unzip.filepaths=' + str(file_paths))
else:
df = pd.read_html(DATA_INFO['url'][name], **DATA_INFO['downloader_kwargs'][name])[-1]
df.columns = clean_columns(df.columns)
file_paths[name] = os.path.join(DATA_PATH, name + '.csv')
df.to_csv(file_paths[name])
file_paths[name] = normalize_ext_rename(file_paths[name])
return file_paths | [
"def",
"download_unzip",
"(",
"names",
"=",
"None",
",",
"normalize_filenames",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"names",
"=",
"[",
"names",
"]",
"if",
"isinstance",
"(",
"names",
",",
"(",
"str",
",",
"basestring",
")",
")",
"else",
"names",
"# names = names or list(BIG_URLS.keys()) # download them all, if none specified!",
"file_paths",
"=",
"{",
"}",
"for",
"name",
"in",
"names",
":",
"created",
"=",
"create_big_url",
"(",
"name",
")",
"name",
"=",
"(",
"created",
"or",
"name",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"if",
"name",
"in",
"BIG_URLS",
":",
"filepath",
"=",
"download_name",
"(",
"name",
",",
"verbose",
"=",
"verbose",
")",
"if",
"not",
"filepath",
":",
"continue",
"file_paths",
"[",
"name",
"]",
"=",
"normalize_ext_rename",
"(",
"filepath",
")",
"logger",
".",
"debug",
"(",
"'downloaded name={} to filepath={}'",
".",
"format",
"(",
"name",
",",
"file_paths",
"[",
"name",
"]",
")",
")",
"fplower",
"=",
"file_paths",
"[",
"name",
"]",
".",
"lower",
"(",
")",
"if",
"fplower",
".",
"endswith",
"(",
"'.tar.gz'",
")",
":",
"logger",
".",
"info",
"(",
"'Extracting {}'",
".",
"format",
"(",
"file_paths",
"[",
"name",
"]",
")",
")",
"file_paths",
"[",
"name",
"]",
"=",
"untar",
"(",
"file_paths",
"[",
"name",
"]",
",",
"verbose",
"=",
"verbose",
")",
"logger",
".",
"debug",
"(",
"'download_untar.filepaths='",
"+",
"str",
"(",
"file_paths",
")",
")",
"elif",
"file_paths",
"[",
"name",
"]",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.zip'",
")",
":",
"file_paths",
"[",
"name",
"]",
"=",
"unzip",
"(",
"file_paths",
"[",
"name",
"]",
",",
"verbose",
"=",
"verbose",
")",
"logger",
".",
"debug",
"(",
"'download_unzip.filepaths='",
"+",
"str",
"(",
"file_paths",
")",
")",
"else",
":",
"df",
"=",
"pd",
".",
"read_html",
"(",
"DATA_INFO",
"[",
"'url'",
"]",
"[",
"name",
"]",
",",
"*",
"*",
"DATA_INFO",
"[",
"'downloader_kwargs'",
"]",
"[",
"name",
"]",
")",
"[",
"-",
"1",
"]",
"df",
".",
"columns",
"=",
"clean_columns",
"(",
"df",
".",
"columns",
")",
"file_paths",
"[",
"name",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATA_PATH",
",",
"name",
"+",
"'.csv'",
")",
"df",
".",
"to_csv",
"(",
"file_paths",
"[",
"name",
"]",
")",
"file_paths",
"[",
"name",
"]",
"=",
"normalize_ext_rename",
"(",
"file_paths",
"[",
"name",
"]",
")",
"return",
"file_paths"
] | r""" Download CSV or HTML tables listed in `names`, unzip and to DATA_PATH/`names`.csv .txt etc
TODO: move to web or data_utils or futils
Also normalizes file name extensions (.bin.gz -> .w2v.bin.gz).
Uses table in data_info.csv (internal DATA_INFO) to determine URL or file path from dataset name.
Also looks
If names or [names] is a valid URL then download it and create a name
from the url in BIG_URLS (not yet pushed to data_info.csv) | [
"r",
"Download",
"CSV",
"or",
"HTML",
"tables",
"listed",
"in",
"names",
"unzip",
"and",
"to",
"DATA_PATH",
"/",
"names",
".",
"csv",
".",
"txt",
"etc"
] | python | train |
cloudendpoints/endpoints-python | endpoints/users_id_token.py | https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L750-L794 | def get_verified_jwt(
providers, audiences,
check_authorization_header=True, check_query_arg=True,
request=None, cache=memcache):
"""
This function will extract, verify, and parse a JWT token from the
Authorization header or access_token query argument.
The JWT is assumed to contain an issuer and audience claim, as well
as issued-at and expiration timestamps. The signature will be
cryptographically verified, the claims and timestamps will be
checked, and the resulting parsed JWT body is returned.
If at any point the JWT is missing or found to be invalid, the
return result will be None.
Arguments:
providers - An iterable of dicts each containing 'issuer' and 'cert_uri' keys
audiences - An iterable of valid audiences
check_authorization_header - Boolean; check 'Authorization: Bearer' header
check_query_arg - Boolean; check 'access_token' query arg
request - Must be the request object if check_query_arg is true; otherwise ignored.
cache - In testing, override the certificate cache
"""
if not (check_authorization_header or check_query_arg):
raise ValueError(
'Either check_authorization_header or check_query_arg must be True.')
if check_query_arg and request is None:
raise ValueError(
'Cannot check query arg without request object.')
schemes = ('Bearer',) if check_authorization_header else ()
keys = ('access_token',) if check_query_arg else ()
token = _get_token(
request=request, allowed_auth_schemes=schemes, allowed_query_keys=keys)
if token is None:
return None
time_now = long(time.time())
for provider in providers:
parsed_token = _parse_and_verify_jwt(
token, time_now, (provider['issuer'],), audiences, provider['cert_uri'], cache)
if parsed_token is not None:
return parsed_token
return None | [
"def",
"get_verified_jwt",
"(",
"providers",
",",
"audiences",
",",
"check_authorization_header",
"=",
"True",
",",
"check_query_arg",
"=",
"True",
",",
"request",
"=",
"None",
",",
"cache",
"=",
"memcache",
")",
":",
"if",
"not",
"(",
"check_authorization_header",
"or",
"check_query_arg",
")",
":",
"raise",
"ValueError",
"(",
"'Either check_authorization_header or check_query_arg must be True.'",
")",
"if",
"check_query_arg",
"and",
"request",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Cannot check query arg without request object.'",
")",
"schemes",
"=",
"(",
"'Bearer'",
",",
")",
"if",
"check_authorization_header",
"else",
"(",
")",
"keys",
"=",
"(",
"'access_token'",
",",
")",
"if",
"check_query_arg",
"else",
"(",
")",
"token",
"=",
"_get_token",
"(",
"request",
"=",
"request",
",",
"allowed_auth_schemes",
"=",
"schemes",
",",
"allowed_query_keys",
"=",
"keys",
")",
"if",
"token",
"is",
"None",
":",
"return",
"None",
"time_now",
"=",
"long",
"(",
"time",
".",
"time",
"(",
")",
")",
"for",
"provider",
"in",
"providers",
":",
"parsed_token",
"=",
"_parse_and_verify_jwt",
"(",
"token",
",",
"time_now",
",",
"(",
"provider",
"[",
"'issuer'",
"]",
",",
")",
",",
"audiences",
",",
"provider",
"[",
"'cert_uri'",
"]",
",",
"cache",
")",
"if",
"parsed_token",
"is",
"not",
"None",
":",
"return",
"parsed_token",
"return",
"None"
] | This function will extract, verify, and parse a JWT token from the
Authorization header or access_token query argument.
The JWT is assumed to contain an issuer and audience claim, as well
as issued-at and expiration timestamps. The signature will be
cryptographically verified, the claims and timestamps will be
checked, and the resulting parsed JWT body is returned.
If at any point the JWT is missing or found to be invalid, the
return result will be None.
Arguments:
providers - An iterable of dicts each containing 'issuer' and 'cert_uri' keys
audiences - An iterable of valid audiences
check_authorization_header - Boolean; check 'Authorization: Bearer' header
check_query_arg - Boolean; check 'access_token' query arg
request - Must be the request object if check_query_arg is true; otherwise ignored.
cache - In testing, override the certificate cache | [
"This",
"function",
"will",
"extract",
"verify",
"and",
"parse",
"a",
"JWT",
"token",
"from",
"the",
"Authorization",
"header",
"or",
"access_token",
"query",
"argument",
"."
] | python | train |
dwavesystems/dwave_networkx | dwave_networkx/drawing/chimera_layout.py | https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/drawing/chimera_layout.py#L122-L203 | def chimera_node_placer_2d(m, n, t, scale=1., center=None, dim=2):
"""Generates a function that converts Chimera indices to x, y
coordinates for a plot.
Parameters
----------
m : int
Number of rows in the Chimera lattice.
n : int
Number of columns in the Chimera lattice.
t : int
Size of the shore within each Chimera tile.
scale : float (default 1.)
Scale factor. When scale = 1, all positions fit within [0, 1]
on the x-axis and [-1, 0] on the y-axis.
center : None or array (default None)
Coordinates of the top left corner.
dim : int (default 2)
Number of dimensions. When dim > 2, all extra dimensions are
set to 0.
Returns
-------
xy_coords : function
A function that maps a Chimera index (i, j, u, k) in an
(m, n, t) Chimera lattice to x,y coordinates such as
used by a plot.
"""
import numpy as np
tile_center = t // 2
tile_length = t + 3 # 1 for middle of cross, 2 for spacing between tiles
# want the enter plot to fill in [0, 1] when scale=1
scale /= max(m, n) * tile_length - 3
grid_offsets = {}
if center is None:
center = np.zeros(dim)
else:
center = np.asarray(center)
paddims = dim - 2
if paddims < 0:
raise ValueError("layout must have at least two dimensions")
if len(center) != dim:
raise ValueError("length of center coordinates must match dimension of layout")
def _xy_coords(i, j, u, k):
# row, col, shore, shore index
# first get the coordinatiates within the tile
if k < tile_center:
p = k
else:
p = k + 1
if u:
xy = np.array([tile_center, -1 * p])
else:
xy = np.array([p, -1 * tile_center])
# next offset the corrdinates based on the which tile
if i > 0 or j > 0:
if (i, j) in grid_offsets:
xy += grid_offsets[(i, j)]
else:
off = np.array([j * tile_length, -1 * i * tile_length])
xy += off
grid_offsets[(i, j)] = off
# convention for Chimera-lattice pictures is to invert the y-axis
return np.hstack((xy * scale, np.zeros(paddims))) + center
return _xy_coords | [
"def",
"chimera_node_placer_2d",
"(",
"m",
",",
"n",
",",
"t",
",",
"scale",
"=",
"1.",
",",
"center",
"=",
"None",
",",
"dim",
"=",
"2",
")",
":",
"import",
"numpy",
"as",
"np",
"tile_center",
"=",
"t",
"//",
"2",
"tile_length",
"=",
"t",
"+",
"3",
"# 1 for middle of cross, 2 for spacing between tiles",
"# want the enter plot to fill in [0, 1] when scale=1",
"scale",
"/=",
"max",
"(",
"m",
",",
"n",
")",
"*",
"tile_length",
"-",
"3",
"grid_offsets",
"=",
"{",
"}",
"if",
"center",
"is",
"None",
":",
"center",
"=",
"np",
".",
"zeros",
"(",
"dim",
")",
"else",
":",
"center",
"=",
"np",
".",
"asarray",
"(",
"center",
")",
"paddims",
"=",
"dim",
"-",
"2",
"if",
"paddims",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"layout must have at least two dimensions\"",
")",
"if",
"len",
"(",
"center",
")",
"!=",
"dim",
":",
"raise",
"ValueError",
"(",
"\"length of center coordinates must match dimension of layout\"",
")",
"def",
"_xy_coords",
"(",
"i",
",",
"j",
",",
"u",
",",
"k",
")",
":",
"# row, col, shore, shore index",
"# first get the coordinatiates within the tile",
"if",
"k",
"<",
"tile_center",
":",
"p",
"=",
"k",
"else",
":",
"p",
"=",
"k",
"+",
"1",
"if",
"u",
":",
"xy",
"=",
"np",
".",
"array",
"(",
"[",
"tile_center",
",",
"-",
"1",
"*",
"p",
"]",
")",
"else",
":",
"xy",
"=",
"np",
".",
"array",
"(",
"[",
"p",
",",
"-",
"1",
"*",
"tile_center",
"]",
")",
"# next offset the corrdinates based on the which tile",
"if",
"i",
">",
"0",
"or",
"j",
">",
"0",
":",
"if",
"(",
"i",
",",
"j",
")",
"in",
"grid_offsets",
":",
"xy",
"+=",
"grid_offsets",
"[",
"(",
"i",
",",
"j",
")",
"]",
"else",
":",
"off",
"=",
"np",
".",
"array",
"(",
"[",
"j",
"*",
"tile_length",
",",
"-",
"1",
"*",
"i",
"*",
"tile_length",
"]",
")",
"xy",
"+=",
"off",
"grid_offsets",
"[",
"(",
"i",
",",
"j",
")",
"]",
"=",
"off",
"# convention for Chimera-lattice pictures is to invert the y-axis",
"return",
"np",
".",
"hstack",
"(",
"(",
"xy",
"*",
"scale",
",",
"np",
".",
"zeros",
"(",
"paddims",
")",
")",
")",
"+",
"center",
"return",
"_xy_coords"
] | Generates a function that converts Chimera indices to x, y
coordinates for a plot.
Parameters
----------
m : int
Number of rows in the Chimera lattice.
n : int
Number of columns in the Chimera lattice.
t : int
Size of the shore within each Chimera tile.
scale : float (default 1.)
Scale factor. When scale = 1, all positions fit within [0, 1]
on the x-axis and [-1, 0] on the y-axis.
center : None or array (default None)
Coordinates of the top left corner.
dim : int (default 2)
Number of dimensions. When dim > 2, all extra dimensions are
set to 0.
Returns
-------
xy_coords : function
A function that maps a Chimera index (i, j, u, k) in an
(m, n, t) Chimera lattice to x,y coordinates such as
used by a plot. | [
"Generates",
"a",
"function",
"that",
"converts",
"Chimera",
"indices",
"to",
"x",
"y",
"coordinates",
"for",
"a",
"plot",
"."
] | python | train |
oasis-open/cti-stix-validator | stix2validator/validator.py | https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/validator.py#L309-L339 | def get_json_files(files, recursive=False):
"""Return a list of files to validate from `files`. If a member of `files`
is a directory, its children with a ``.json`` extension will be added to
the return value.
Args:
files: A list of file paths and/or directory paths.
recursive: If ``true``, this will descend into any subdirectories
of input directories.
Returns:
A list of file paths to validate.
"""
json_files = []
if not files:
return json_files
for fn in files:
if os.path.isdir(fn):
children = list_json_files(fn, recursive)
json_files.extend(children)
elif is_json(fn):
json_files.append(fn)
else:
continue
if not json_files:
raise NoJSONFileFoundError("No JSON files found!")
return json_files | [
"def",
"get_json_files",
"(",
"files",
",",
"recursive",
"=",
"False",
")",
":",
"json_files",
"=",
"[",
"]",
"if",
"not",
"files",
":",
"return",
"json_files",
"for",
"fn",
"in",
"files",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"fn",
")",
":",
"children",
"=",
"list_json_files",
"(",
"fn",
",",
"recursive",
")",
"json_files",
".",
"extend",
"(",
"children",
")",
"elif",
"is_json",
"(",
"fn",
")",
":",
"json_files",
".",
"append",
"(",
"fn",
")",
"else",
":",
"continue",
"if",
"not",
"json_files",
":",
"raise",
"NoJSONFileFoundError",
"(",
"\"No JSON files found!\"",
")",
"return",
"json_files"
] | Return a list of files to validate from `files`. If a member of `files`
is a directory, its children with a ``.json`` extension will be added to
the return value.
Args:
files: A list of file paths and/or directory paths.
recursive: If ``true``, this will descend into any subdirectories
of input directories.
Returns:
A list of file paths to validate. | [
"Return",
"a",
"list",
"of",
"files",
"to",
"validate",
"from",
"files",
".",
"If",
"a",
"member",
"of",
"files",
"is",
"a",
"directory",
"its",
"children",
"with",
"a",
".",
"json",
"extension",
"will",
"be",
"added",
"to",
"the",
"return",
"value",
"."
] | python | train |
KnowledgeLinks/rdfframework | rdfframework/rdfclass/rdffactories.py | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdffactories.py#L178-L265 | def make(self):
""" reads through the definitions and generates an python class for each
definition """
log.setLevel(self.log_level)
created = []
self.set_class_dict()
start = datetime.datetime.now()
log.info(" # of classes to create: %s" % len(self.class_dict))
log.debug(" creating classes that are not subclassed")
for name, cls_defs in self.class_dict.items():
# if name in ['bf_Organization', 'bf_Agent']:
# pdb.set_trace()
if not self.class_dict[name].get('rdfs_subClassOf'):
created.append(name)
setattr(MODULE.rdfclass,
name,
types.new_class(name,
(RdfClassBase,),
{#'metaclass': RdfClassMeta,
'cls_defs': cls_defs}))
log.debug(" created %s classes in: %s",
len(created),
(datetime.datetime.now() - start))
for name in created:
del self.class_dict[name]
left = len(self.class_dict)
classes = []
while left > 0:
new = []
for name, cls_defs in self.class_dict.items():
# if name in ['bf_Organization', 'bf_Agent']:
# pdb.set_trace()
parents = self.class_dict[name].get('rdfs_subClassOf')
if not parents:
bases += (RdfClassBase, )
else:
for parent in make_list(parents):
bases = tuple()
if parent in created or parent in classes:
if parent in classes:
bases += (RdfClassBase, )
else:
base = getattr(MODULE.rdfclass, parent)
bases += (base,) + base.__bases__
if len(bases) > 0:
created.append(name)
setattr(MODULE.rdfclass,
name,
types.new_class(name,
bases,
{#'metaclass': RdfClassMeta,
'cls_defs': cls_defs}))
for name in created:
try:
del self.class_dict[name]
except KeyError:
pass
if left == len(self.class_dict):
# c_list = [self.class_dict[name].get('rdfs_subClassOf') \
# for name in self.class_dict]
missing_parents = []
for name in self.class_dict:
missing_parents += \
self.class_dict[name].get('rdfs_subClassOf', [])
missing_parents = set(missing_parents)
still_valid = set([name for name in self.class_dict
if name not in missing_parents])
classes = list(missing_parents.difference(\
set(self.class_dict.keys())))
# classess = []
# for cl in c_list:
# for item in cl:
# classes.append(item)
for name in self.class_dict:
if name in classes:
classes.remove(name)
for p_name in self.class_dict[name].get('rdfs_subClassOf',
[]).copy():
if p_name in classes:
self.class_dict[name]['rdfs_subClassOf'].remove(\
p_name)
# pdb.set_trace()
left = len(self.class_dict)
# self.tie_properties(created)
log.info(" created all classes in %s",
(datetime.datetime.now() - start)) | [
"def",
"make",
"(",
"self",
")",
":",
"log",
".",
"setLevel",
"(",
"self",
".",
"log_level",
")",
"created",
"=",
"[",
"]",
"self",
".",
"set_class_dict",
"(",
")",
"start",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"log",
".",
"info",
"(",
"\" # of classes to create: %s\"",
"%",
"len",
"(",
"self",
".",
"class_dict",
")",
")",
"log",
".",
"debug",
"(",
"\" creating classes that are not subclassed\"",
")",
"for",
"name",
",",
"cls_defs",
"in",
"self",
".",
"class_dict",
".",
"items",
"(",
")",
":",
"# if name in ['bf_Organization', 'bf_Agent']:",
"# pdb.set_trace()",
"if",
"not",
"self",
".",
"class_dict",
"[",
"name",
"]",
".",
"get",
"(",
"'rdfs_subClassOf'",
")",
":",
"created",
".",
"append",
"(",
"name",
")",
"setattr",
"(",
"MODULE",
".",
"rdfclass",
",",
"name",
",",
"types",
".",
"new_class",
"(",
"name",
",",
"(",
"RdfClassBase",
",",
")",
",",
"{",
"#'metaclass': RdfClassMeta,",
"'cls_defs'",
":",
"cls_defs",
"}",
")",
")",
"log",
".",
"debug",
"(",
"\" created %s classes in: %s\"",
",",
"len",
"(",
"created",
")",
",",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
")",
"for",
"name",
"in",
"created",
":",
"del",
"self",
".",
"class_dict",
"[",
"name",
"]",
"left",
"=",
"len",
"(",
"self",
".",
"class_dict",
")",
"classes",
"=",
"[",
"]",
"while",
"left",
">",
"0",
":",
"new",
"=",
"[",
"]",
"for",
"name",
",",
"cls_defs",
"in",
"self",
".",
"class_dict",
".",
"items",
"(",
")",
":",
"# if name in ['bf_Organization', 'bf_Agent']:",
"# pdb.set_trace()",
"parents",
"=",
"self",
".",
"class_dict",
"[",
"name",
"]",
".",
"get",
"(",
"'rdfs_subClassOf'",
")",
"if",
"not",
"parents",
":",
"bases",
"+=",
"(",
"RdfClassBase",
",",
")",
"else",
":",
"for",
"parent",
"in",
"make_list",
"(",
"parents",
")",
":",
"bases",
"=",
"tuple",
"(",
")",
"if",
"parent",
"in",
"created",
"or",
"parent",
"in",
"classes",
":",
"if",
"parent",
"in",
"classes",
":",
"bases",
"+=",
"(",
"RdfClassBase",
",",
")",
"else",
":",
"base",
"=",
"getattr",
"(",
"MODULE",
".",
"rdfclass",
",",
"parent",
")",
"bases",
"+=",
"(",
"base",
",",
")",
"+",
"base",
".",
"__bases__",
"if",
"len",
"(",
"bases",
")",
">",
"0",
":",
"created",
".",
"append",
"(",
"name",
")",
"setattr",
"(",
"MODULE",
".",
"rdfclass",
",",
"name",
",",
"types",
".",
"new_class",
"(",
"name",
",",
"bases",
",",
"{",
"#'metaclass': RdfClassMeta,",
"'cls_defs'",
":",
"cls_defs",
"}",
")",
")",
"for",
"name",
"in",
"created",
":",
"try",
":",
"del",
"self",
".",
"class_dict",
"[",
"name",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"left",
"==",
"len",
"(",
"self",
".",
"class_dict",
")",
":",
"# c_list = [self.class_dict[name].get('rdfs_subClassOf') \\",
"# for name in self.class_dict]",
"missing_parents",
"=",
"[",
"]",
"for",
"name",
"in",
"self",
".",
"class_dict",
":",
"missing_parents",
"+=",
"self",
".",
"class_dict",
"[",
"name",
"]",
".",
"get",
"(",
"'rdfs_subClassOf'",
",",
"[",
"]",
")",
"missing_parents",
"=",
"set",
"(",
"missing_parents",
")",
"still_valid",
"=",
"set",
"(",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"class_dict",
"if",
"name",
"not",
"in",
"missing_parents",
"]",
")",
"classes",
"=",
"list",
"(",
"missing_parents",
".",
"difference",
"(",
"set",
"(",
"self",
".",
"class_dict",
".",
"keys",
"(",
")",
")",
")",
")",
"# classess = []",
"# for cl in c_list:",
"# for item in cl:",
"# classes.append(item)",
"for",
"name",
"in",
"self",
".",
"class_dict",
":",
"if",
"name",
"in",
"classes",
":",
"classes",
".",
"remove",
"(",
"name",
")",
"for",
"p_name",
"in",
"self",
".",
"class_dict",
"[",
"name",
"]",
".",
"get",
"(",
"'rdfs_subClassOf'",
",",
"[",
"]",
")",
".",
"copy",
"(",
")",
":",
"if",
"p_name",
"in",
"classes",
":",
"self",
".",
"class_dict",
"[",
"name",
"]",
"[",
"'rdfs_subClassOf'",
"]",
".",
"remove",
"(",
"p_name",
")",
"# pdb.set_trace()",
"left",
"=",
"len",
"(",
"self",
".",
"class_dict",
")",
"# self.tie_properties(created)",
"log",
".",
"info",
"(",
"\" created all classes in %s\"",
",",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
")"
] | reads through the definitions and generates an python class for each
definition | [
"reads",
"through",
"the",
"definitions",
"and",
"generates",
"an",
"python",
"class",
"for",
"each",
"definition"
] | python | train |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxanalysis.py | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxanalysis.py#L148-L165 | def get_output_ref(self, field, index=None, metadata=None):
'''
:param field: Output field name of this analysis
:type field: string
:param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array
:type index: int
:param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome"
:type metadata: string
Returns a dict containing a valid reference to an output of this analysis.
'''
link = {"$dnanexus_link": {"analysis": self._dxid, "field": field}}
if index is not None:
link["$dnanexus_link"]["index"] = index
if metadata is not None:
link["$dnanexus_link"]["metadata"] = metadata
return link | [
"def",
"get_output_ref",
"(",
"self",
",",
"field",
",",
"index",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"link",
"=",
"{",
"\"$dnanexus_link\"",
":",
"{",
"\"analysis\"",
":",
"self",
".",
"_dxid",
",",
"\"field\"",
":",
"field",
"}",
"}",
"if",
"index",
"is",
"not",
"None",
":",
"link",
"[",
"\"$dnanexus_link\"",
"]",
"[",
"\"index\"",
"]",
"=",
"index",
"if",
"metadata",
"is",
"not",
"None",
":",
"link",
"[",
"\"$dnanexus_link\"",
"]",
"[",
"\"metadata\"",
"]",
"=",
"metadata",
"return",
"link"
] | :param field: Output field name of this analysis
:type field: string
:param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array
:type index: int
:param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome"
:type metadata: string
Returns a dict containing a valid reference to an output of this analysis. | [
":",
"param",
"field",
":",
"Output",
"field",
"name",
"of",
"this",
"analysis",
":",
"type",
"field",
":",
"string",
":",
"param",
"index",
":",
"If",
"the",
"referenced",
"field",
"is",
"an",
"array",
"optionally",
"specify",
"an",
"index",
"(",
"starting",
"from",
"0",
")",
"to",
"indicate",
"a",
"particular",
"member",
"of",
"the",
"array",
":",
"type",
"index",
":",
"int",
":",
"param",
"metadata",
":",
"If",
"the",
"referenced",
"field",
"is",
"of",
"a",
"data",
"object",
"class",
"a",
"string",
"indicating",
"the",
"metadata",
"that",
"should",
"be",
"read",
"e",
".",
"g",
".",
"name",
"properties",
".",
"propkey",
"details",
".",
"refgenome",
":",
"type",
"metadata",
":",
"string"
] | python | train |
JarryShaw/PyPCAPKit | src/protocols/internet/hip.py | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hip.py#L1523-L1555 | def _read_para_echo_response_signed(self, code, cbit, clen, *, desc, length, version):
"""Read HIP ECHO_RESPONSE_SIGNED parameter.
Structure of HIP ECHO_RESPONSE_SIGNED parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Opaque data (variable length) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 echo_response_signed.type Parameter Type
1 15 echo_response_signed.critical Critical Bit
2 16 echo_response_signed.length Length of Contents
4 32 echo_response_signed.data Opaque Data
"""
_data = self._read_fileng(clen)
echo_response_signed = dict(
type=desc,
critical=cbit,
length=clen,
data=_data,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return echo_response_signed | [
"def",
"_read_para_echo_response_signed",
"(",
"self",
",",
"code",
",",
"cbit",
",",
"clen",
",",
"*",
",",
"desc",
",",
"length",
",",
"version",
")",
":",
"_data",
"=",
"self",
".",
"_read_fileng",
"(",
"clen",
")",
"echo_response_signed",
"=",
"dict",
"(",
"type",
"=",
"desc",
",",
"critical",
"=",
"cbit",
",",
"length",
"=",
"clen",
",",
"data",
"=",
"_data",
",",
")",
"_plen",
"=",
"length",
"-",
"clen",
"if",
"_plen",
":",
"self",
".",
"_read_fileng",
"(",
"_plen",
")",
"return",
"echo_response_signed"
] | Read HIP ECHO_RESPONSE_SIGNED parameter.
Structure of HIP ECHO_RESPONSE_SIGNED parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Opaque data (variable length) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 echo_response_signed.type Parameter Type
1 15 echo_response_signed.critical Critical Bit
2 16 echo_response_signed.length Length of Contents
4 32 echo_response_signed.data Opaque Data | [
"Read",
"HIP",
"ECHO_RESPONSE_SIGNED",
"parameter",
"."
] | python | train |
diux-dev/ncluster | ncluster/aws_util.py | https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L369-L398 | def ssh_to_task(task) -> paramiko.SSHClient:
"""Create ssh connection to task's machine
returns Paramiko SSH client connected to host.
"""
username = task.ssh_username
hostname = task.public_ip
ssh_key_fn = get_keypair_fn()
print(f"ssh -i {ssh_key_fn} {username}@{hostname}")
pkey = paramiko.RSAKey.from_private_key_file(ssh_key_fn)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
assert ssh_client
counter = 1
while True:
try:
ssh_client.connect(hostname=hostname, username=username, pkey=pkey)
if counter % 11 == 0: # occasionally re-obtain public ip, machine could've gotten restarted
hostname = task.public_ip
break
except Exception as e:
print(
f'{task.name}: Exception connecting to {hostname} via ssh (could be a timeout): {e}')
time.sleep(RETRY_INTERVAL_SEC)
return ssh_client | [
"def",
"ssh_to_task",
"(",
"task",
")",
"->",
"paramiko",
".",
"SSHClient",
":",
"username",
"=",
"task",
".",
"ssh_username",
"hostname",
"=",
"task",
".",
"public_ip",
"ssh_key_fn",
"=",
"get_keypair_fn",
"(",
")",
"print",
"(",
"f\"ssh -i {ssh_key_fn} {username}@{hostname}\"",
")",
"pkey",
"=",
"paramiko",
".",
"RSAKey",
".",
"from_private_key_file",
"(",
"ssh_key_fn",
")",
"ssh_client",
"=",
"paramiko",
".",
"SSHClient",
"(",
")",
"ssh_client",
".",
"set_missing_host_key_policy",
"(",
"paramiko",
".",
"AutoAddPolicy",
"(",
")",
")",
"assert",
"ssh_client",
"counter",
"=",
"1",
"while",
"True",
":",
"try",
":",
"ssh_client",
".",
"connect",
"(",
"hostname",
"=",
"hostname",
",",
"username",
"=",
"username",
",",
"pkey",
"=",
"pkey",
")",
"if",
"counter",
"%",
"11",
"==",
"0",
":",
"# occasionally re-obtain public ip, machine could've gotten restarted",
"hostname",
"=",
"task",
".",
"public_ip",
"break",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"f'{task.name}: Exception connecting to {hostname} via ssh (could be a timeout): {e}'",
")",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")",
"return",
"ssh_client"
] | Create ssh connection to task's machine
returns Paramiko SSH client connected to host. | [
"Create",
"ssh",
"connection",
"to",
"task",
"s",
"machine"
] | python | train |
LogicalDash/LiSE | ELiDE/ELiDE/board/arrow.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/arrow.py#L366-L392 | def add_widget(self, wid, index=0, canvas=None):
"""Put the :class:`Pawn` at a point along my length proportionate to
how close it is to finishing its travel through me.
Only :class:`Pawn` should ever be added as a child of :class:`Arrow`.
"""
super().add_widget(wid, index, canvas)
if not hasattr(wid, 'group'):
return
wid._no_use_canvas = True
mycanvas = (
self.canvas.before if canvas == 'before' else
self.canvas.after if canvas == 'after' else
self.canvas
)
mycanvas.remove(wid.canvas)
pawncanvas = (
self.board.spotlayout.canvas.before if canvas == 'before' else
self.board.spotlayout.canvas.after if canvas == 'after' else
self.board.spotlayout.canvas
)
for child in self.children:
if hasattr(child, 'group') and child.group in pawncanvas.children:
pawncanvas.remove(child.group)
pawncanvas.add(child.group)
self.pospawn(wid) | [
"def",
"add_widget",
"(",
"self",
",",
"wid",
",",
"index",
"=",
"0",
",",
"canvas",
"=",
"None",
")",
":",
"super",
"(",
")",
".",
"add_widget",
"(",
"wid",
",",
"index",
",",
"canvas",
")",
"if",
"not",
"hasattr",
"(",
"wid",
",",
"'group'",
")",
":",
"return",
"wid",
".",
"_no_use_canvas",
"=",
"True",
"mycanvas",
"=",
"(",
"self",
".",
"canvas",
".",
"before",
"if",
"canvas",
"==",
"'before'",
"else",
"self",
".",
"canvas",
".",
"after",
"if",
"canvas",
"==",
"'after'",
"else",
"self",
".",
"canvas",
")",
"mycanvas",
".",
"remove",
"(",
"wid",
".",
"canvas",
")",
"pawncanvas",
"=",
"(",
"self",
".",
"board",
".",
"spotlayout",
".",
"canvas",
".",
"before",
"if",
"canvas",
"==",
"'before'",
"else",
"self",
".",
"board",
".",
"spotlayout",
".",
"canvas",
".",
"after",
"if",
"canvas",
"==",
"'after'",
"else",
"self",
".",
"board",
".",
"spotlayout",
".",
"canvas",
")",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"hasattr",
"(",
"child",
",",
"'group'",
")",
"and",
"child",
".",
"group",
"in",
"pawncanvas",
".",
"children",
":",
"pawncanvas",
".",
"remove",
"(",
"child",
".",
"group",
")",
"pawncanvas",
".",
"add",
"(",
"child",
".",
"group",
")",
"self",
".",
"pospawn",
"(",
"wid",
")"
] | Put the :class:`Pawn` at a point along my length proportionate to
how close it is to finishing its travel through me.
Only :class:`Pawn` should ever be added as a child of :class:`Arrow`. | [
"Put",
"the",
":",
"class",
":",
"Pawn",
"at",
"a",
"point",
"along",
"my",
"length",
"proportionate",
"to",
"how",
"close",
"it",
"is",
"to",
"finishing",
"its",
"travel",
"through",
"me",
"."
] | python | train |
honzajavorek/redis-collections | redis_collections/sortedsets.py | https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/sortedsets.py#L538-L587 | def update(self, other):
"""
Update the collection with items from *other*. Accepts other
:class:`GeoDB` instances, dictionaries mapping places to
``{'latitude': latitude, 'longitude': longitude}`` dicts,
or sequences of ``(place, latitude, longitude)`` tuples.
"""
# other is another Sorted Set
def update_sortedset_trans(pipe):
items = other._data(pipe=pipe) if use_redis else other._data()
pipe.multi()
for member, score in items:
pipe.zadd(self.key, {self._pickle(member): float(score)})
# other is dict-like
def update_mapping_trans(pipe):
items = other.items(pipe=pipe) if use_redis else other.items()
pipe.multi()
for place, value in items:
self.set_location(
place, value['latitude'], value['longitude'], pipe=pipe
)
# other is a list of tuples
def update_tuples_trans(pipe):
items = (
other.__iter__(pipe=pipe) if use_redis else other.__iter__()
)
pipe.multi()
for place, latitude, longitude in items:
self.set_location(place, latitude, longitude, pipe=pipe)
watches = []
if self._same_redis(other, RedisCollection):
use_redis = True
watches.append(other.key)
else:
use_redis = False
if isinstance(other, SortedSetBase):
func = update_sortedset_trans
elif hasattr(other, 'items'):
func = update_mapping_trans
elif hasattr(other, '__iter__'):
func = update_tuples_trans
self._transaction(func, *watches) | [
"def",
"update",
"(",
"self",
",",
"other",
")",
":",
"# other is another Sorted Set",
"def",
"update_sortedset_trans",
"(",
"pipe",
")",
":",
"items",
"=",
"other",
".",
"_data",
"(",
"pipe",
"=",
"pipe",
")",
"if",
"use_redis",
"else",
"other",
".",
"_data",
"(",
")",
"pipe",
".",
"multi",
"(",
")",
"for",
"member",
",",
"score",
"in",
"items",
":",
"pipe",
".",
"zadd",
"(",
"self",
".",
"key",
",",
"{",
"self",
".",
"_pickle",
"(",
"member",
")",
":",
"float",
"(",
"score",
")",
"}",
")",
"# other is dict-like",
"def",
"update_mapping_trans",
"(",
"pipe",
")",
":",
"items",
"=",
"other",
".",
"items",
"(",
"pipe",
"=",
"pipe",
")",
"if",
"use_redis",
"else",
"other",
".",
"items",
"(",
")",
"pipe",
".",
"multi",
"(",
")",
"for",
"place",
",",
"value",
"in",
"items",
":",
"self",
".",
"set_location",
"(",
"place",
",",
"value",
"[",
"'latitude'",
"]",
",",
"value",
"[",
"'longitude'",
"]",
",",
"pipe",
"=",
"pipe",
")",
"# other is a list of tuples",
"def",
"update_tuples_trans",
"(",
"pipe",
")",
":",
"items",
"=",
"(",
"other",
".",
"__iter__",
"(",
"pipe",
"=",
"pipe",
")",
"if",
"use_redis",
"else",
"other",
".",
"__iter__",
"(",
")",
")",
"pipe",
".",
"multi",
"(",
")",
"for",
"place",
",",
"latitude",
",",
"longitude",
"in",
"items",
":",
"self",
".",
"set_location",
"(",
"place",
",",
"latitude",
",",
"longitude",
",",
"pipe",
"=",
"pipe",
")",
"watches",
"=",
"[",
"]",
"if",
"self",
".",
"_same_redis",
"(",
"other",
",",
"RedisCollection",
")",
":",
"use_redis",
"=",
"True",
"watches",
".",
"append",
"(",
"other",
".",
"key",
")",
"else",
":",
"use_redis",
"=",
"False",
"if",
"isinstance",
"(",
"other",
",",
"SortedSetBase",
")",
":",
"func",
"=",
"update_sortedset_trans",
"elif",
"hasattr",
"(",
"other",
",",
"'items'",
")",
":",
"func",
"=",
"update_mapping_trans",
"elif",
"hasattr",
"(",
"other",
",",
"'__iter__'",
")",
":",
"func",
"=",
"update_tuples_trans",
"self",
".",
"_transaction",
"(",
"func",
",",
"*",
"watches",
")"
] | Update the collection with items from *other*. Accepts other
:class:`GeoDB` instances, dictionaries mapping places to
``{'latitude': latitude, 'longitude': longitude}`` dicts,
or sequences of ``(place, latitude, longitude)`` tuples. | [
"Update",
"the",
"collection",
"with",
"items",
"from",
"*",
"other",
"*",
".",
"Accepts",
"other",
":",
"class",
":",
"GeoDB",
"instances",
"dictionaries",
"mapping",
"places",
"to",
"{",
"latitude",
":",
"latitude",
"longitude",
":",
"longitude",
"}",
"dicts",
"or",
"sequences",
"of",
"(",
"place",
"latitude",
"longitude",
")",
"tuples",
"."
] | python | train |
trendmicro/flask-ini | flask_ini.py | https://github.com/trendmicro/flask-ini/blob/a1e4baa598c9a01021a1333d9c15e4d99c8334dd/flask_ini.py#L12-L16 | def read(self, *args, **kwargs):
'''Overridden read() method to call parse_flask_section() at the end'''
ret = configparser.SafeConfigParser.read(self, *args, **kwargs)
self.parse_flask_section()
return ret | [
"def",
"read",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"configparser",
".",
"SafeConfigParser",
".",
"read",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"parse_flask_section",
"(",
")",
"return",
"ret"
] | Overridden read() method to call parse_flask_section() at the end | [
"Overridden",
"read",
"()",
"method",
"to",
"call",
"parse_flask_section",
"()",
"at",
"the",
"end"
] | python | train |
chrisrink10/basilisp | src/basilisp/lang/runtime.py | https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L881-L893 | def update(m, k, f, *args):
"""Updates the value for key k in associative data structure m with the return value from
calling f(old_v, *args). If m is None, use an empty map. If k is not in m, old_v will be
None."""
if m is None:
return lmap.Map.empty().assoc(k, f(None, *args))
if isinstance(m, IAssociative):
old_v = m.entry(k)
new_v = f(old_v, *args)
return m.assoc(k, new_v)
raise TypeError(
f"Object of type {type(m)} does not implement Associative interface"
) | [
"def",
"update",
"(",
"m",
",",
"k",
",",
"f",
",",
"*",
"args",
")",
":",
"if",
"m",
"is",
"None",
":",
"return",
"lmap",
".",
"Map",
".",
"empty",
"(",
")",
".",
"assoc",
"(",
"k",
",",
"f",
"(",
"None",
",",
"*",
"args",
")",
")",
"if",
"isinstance",
"(",
"m",
",",
"IAssociative",
")",
":",
"old_v",
"=",
"m",
".",
"entry",
"(",
"k",
")",
"new_v",
"=",
"f",
"(",
"old_v",
",",
"*",
"args",
")",
"return",
"m",
".",
"assoc",
"(",
"k",
",",
"new_v",
")",
"raise",
"TypeError",
"(",
"f\"Object of type {type(m)} does not implement Associative interface\"",
")"
] | Updates the value for key k in associative data structure m with the return value from
calling f(old_v, *args). If m is None, use an empty map. If k is not in m, old_v will be
None. | [
"Updates",
"the",
"value",
"for",
"key",
"k",
"in",
"associative",
"data",
"structure",
"m",
"with",
"the",
"return",
"value",
"from",
"calling",
"f",
"(",
"old_v",
"*",
"args",
")",
".",
"If",
"m",
"is",
"None",
"use",
"an",
"empty",
"map",
".",
"If",
"k",
"is",
"not",
"in",
"m",
"old_v",
"will",
"be",
"None",
"."
] | python | test |
dask/dask-ml | dask_ml/model_selection/_incremental.py | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_incremental.py#L569-L578 | def fit(self, X, y, **fit_params):
"""Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator.
"""
return default_client().sync(self._fit, X, y, **fit_params) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"*",
"*",
"fit_params",
")",
":",
"return",
"default_client",
"(",
")",
".",
"sync",
"(",
"self",
".",
"_fit",
",",
"X",
",",
"y",
",",
"*",
"*",
"fit_params",
")"
] | Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator. | [
"Find",
"the",
"best",
"parameters",
"for",
"a",
"particular",
"model",
"."
] | python | train |
rwl/pylon | pylon/opf.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/opf.py#L294-L309 | def _power_mismatch_dc(self, buses, generators, B, Pbusinj, base_mva):
""" Returns the power mismatch constraint (B*Va + Pg = Pd).
"""
nb, ng = len(buses), len(generators)
# Negative bus-generator incidence matrix.
gen_bus = array([g.bus._i for g in generators])
neg_Cg = csr_matrix((-ones(ng), (gen_bus, range(ng))), (nb, ng))
Amis = hstack([B, neg_Cg], format="csr")
Pd = array([bus.p_demand for bus in buses])
Gs = array([bus.g_shunt for bus in buses])
bmis = -(Pd - Gs) / base_mva - Pbusinj
return LinearConstraint("Pmis", Amis, bmis, bmis, ["Va", "Pg"]) | [
"def",
"_power_mismatch_dc",
"(",
"self",
",",
"buses",
",",
"generators",
",",
"B",
",",
"Pbusinj",
",",
"base_mva",
")",
":",
"nb",
",",
"ng",
"=",
"len",
"(",
"buses",
")",
",",
"len",
"(",
"generators",
")",
"# Negative bus-generator incidence matrix.",
"gen_bus",
"=",
"array",
"(",
"[",
"g",
".",
"bus",
".",
"_i",
"for",
"g",
"in",
"generators",
"]",
")",
"neg_Cg",
"=",
"csr_matrix",
"(",
"(",
"-",
"ones",
"(",
"ng",
")",
",",
"(",
"gen_bus",
",",
"range",
"(",
"ng",
")",
")",
")",
",",
"(",
"nb",
",",
"ng",
")",
")",
"Amis",
"=",
"hstack",
"(",
"[",
"B",
",",
"neg_Cg",
"]",
",",
"format",
"=",
"\"csr\"",
")",
"Pd",
"=",
"array",
"(",
"[",
"bus",
".",
"p_demand",
"for",
"bus",
"in",
"buses",
"]",
")",
"Gs",
"=",
"array",
"(",
"[",
"bus",
".",
"g_shunt",
"for",
"bus",
"in",
"buses",
"]",
")",
"bmis",
"=",
"-",
"(",
"Pd",
"-",
"Gs",
")",
"/",
"base_mva",
"-",
"Pbusinj",
"return",
"LinearConstraint",
"(",
"\"Pmis\"",
",",
"Amis",
",",
"bmis",
",",
"bmis",
",",
"[",
"\"Va\"",
",",
"\"Pg\"",
"]",
")"
] | Returns the power mismatch constraint (B*Va + Pg = Pd). | [
"Returns",
"the",
"power",
"mismatch",
"constraint",
"(",
"B",
"*",
"Va",
"+",
"Pg",
"=",
"Pd",
")",
"."
] | python | train |
Yelp/kafka-utils | kafka_utils/util/protocol.py | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/protocol.py#L27-L46 | def encode_offset_commit_request_kafka(cls, group, payloads):
"""
Encode an OffsetCommitRequest struct
Arguments:
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequestPayload
"""
return kafka.protocol.commit.OffsetCommitRequest[2](
consumer_group=group,
consumer_group_generation_id=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_GENERATION_ID,
consumer_id='',
retention_time=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,
topics=[(
topic,
[(
partition,
payload.offset,
payload.metadata)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) | [
"def",
"encode_offset_commit_request_kafka",
"(",
"cls",
",",
"group",
",",
"payloads",
")",
":",
"return",
"kafka",
".",
"protocol",
".",
"commit",
".",
"OffsetCommitRequest",
"[",
"2",
"]",
"(",
"consumer_group",
"=",
"group",
",",
"consumer_group_generation_id",
"=",
"kafka",
".",
"protocol",
".",
"commit",
".",
"OffsetCommitRequest",
"[",
"2",
"]",
".",
"DEFAULT_GENERATION_ID",
",",
"consumer_id",
"=",
"''",
",",
"retention_time",
"=",
"kafka",
".",
"protocol",
".",
"commit",
".",
"OffsetCommitRequest",
"[",
"2",
"]",
".",
"DEFAULT_RETENTION_TIME",
",",
"topics",
"=",
"[",
"(",
"topic",
",",
"[",
"(",
"partition",
",",
"payload",
".",
"offset",
",",
"payload",
".",
"metadata",
")",
"for",
"partition",
",",
"payload",
"in",
"six",
".",
"iteritems",
"(",
"topic_payloads",
")",
"]",
")",
"for",
"topic",
",",
"topic_payloads",
"in",
"six",
".",
"iteritems",
"(",
"group_by_topic_and_partition",
"(",
"payloads",
")",
")",
"]",
")"
] | Encode an OffsetCommitRequest struct
Arguments:
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequestPayload | [
"Encode",
"an",
"OffsetCommitRequest",
"struct",
"Arguments",
":",
"group",
":",
"string",
"the",
"consumer",
"group",
"you",
"are",
"committing",
"offsets",
"for",
"payloads",
":",
"list",
"of",
"OffsetCommitRequestPayload"
] | python | train |
yt-project/unyt | unyt/array.py | https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/array.py#L1738-L1772 | def copy(self, order="C"):
"""
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy`
are very similar, but have different default values for their
order= arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> from unyt import km
>>> x = [[1,2,3],[4,5,6]] * km
>>> y = x.copy()
>>> x.fill(0)
>>> print(x)
[[0 0 0]
[0 0 0]] km
>>> print(y)
[[1 2 3]
[4 5 6]] km
"""
return type(self)(np.copy(np.asarray(self)), self.units) | [
"def",
"copy",
"(",
"self",
",",
"order",
"=",
"\"C\"",
")",
":",
"return",
"type",
"(",
"self",
")",
"(",
"np",
".",
"copy",
"(",
"np",
".",
"asarray",
"(",
"self",
")",
")",
",",
"self",
".",
"units",
")"
] | Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy`
are very similar, but have different default values for their
order= arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> from unyt import km
>>> x = [[1,2,3],[4,5,6]] * km
>>> y = x.copy()
>>> x.fill(0)
>>> print(x)
[[0 0 0]
[0 0 0]] km
>>> print(y)
[[1 2 3]
[4 5 6]] km | [
"Return",
"a",
"copy",
"of",
"the",
"array",
"."
] | python | train |
edeposit/edeposit.amqp.ftp | src/edeposit/amqp/ftp/settings.py | https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/settings.py#L121-L133 | def conf_merger(user_dict, variable):
"""
Merge global configuration with user's personal configuration.
Global configuration has always higher priority.
"""
if variable not in globals().keys():
raise NameError("Unknown variable '%s'." % variable)
if variable not in user_dict:
return globals()[variable]
return globals()[variable] and user_dict[variable] | [
"def",
"conf_merger",
"(",
"user_dict",
",",
"variable",
")",
":",
"if",
"variable",
"not",
"in",
"globals",
"(",
")",
".",
"keys",
"(",
")",
":",
"raise",
"NameError",
"(",
"\"Unknown variable '%s'.\"",
"%",
"variable",
")",
"if",
"variable",
"not",
"in",
"user_dict",
":",
"return",
"globals",
"(",
")",
"[",
"variable",
"]",
"return",
"globals",
"(",
")",
"[",
"variable",
"]",
"and",
"user_dict",
"[",
"variable",
"]"
] | Merge global configuration with user's personal configuration.
Global configuration has always higher priority. | [
"Merge",
"global",
"configuration",
"with",
"user",
"s",
"personal",
"configuration",
"."
] | python | train |
jobovy/galpy | galpy/orbit/OrbitTop.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L372-L394 | def y(self,*args,**kwargs):
"""
NAME:
y
PURPOSE:
return y
INPUT:
t - (optional) time at which to get y
ro= (Object-wide default) physical scale for distances to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
y(t)
HISTORY:
2010-09-21 - Written - Bovy (NYU)
"""
thiso= self(*args,**kwargs)
if not len(thiso.shape) == 2: thiso= thiso.reshape((thiso.shape[0],1))
if len(thiso[:,0]) != 4 and len(thiso[:,0]) != 6:
raise AttributeError("orbit must track azimuth to use x()")
elif len(thiso[:,0]) == 4:
return thiso[0,:]*nu.sin(thiso[3,:])
else:
return thiso[0,:]*nu.sin(thiso[5,:]) | [
"def",
"y",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"thiso",
"=",
"self",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"len",
"(",
"thiso",
".",
"shape",
")",
"==",
"2",
":",
"thiso",
"=",
"thiso",
".",
"reshape",
"(",
"(",
"thiso",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"if",
"len",
"(",
"thiso",
"[",
":",
",",
"0",
"]",
")",
"!=",
"4",
"and",
"len",
"(",
"thiso",
"[",
":",
",",
"0",
"]",
")",
"!=",
"6",
":",
"raise",
"AttributeError",
"(",
"\"orbit must track azimuth to use x()\"",
")",
"elif",
"len",
"(",
"thiso",
"[",
":",
",",
"0",
"]",
")",
"==",
"4",
":",
"return",
"thiso",
"[",
"0",
",",
":",
"]",
"*",
"nu",
".",
"sin",
"(",
"thiso",
"[",
"3",
",",
":",
"]",
")",
"else",
":",
"return",
"thiso",
"[",
"0",
",",
":",
"]",
"*",
"nu",
".",
"sin",
"(",
"thiso",
"[",
"5",
",",
":",
"]",
")"
] | NAME:
y
PURPOSE:
return y
INPUT:
t - (optional) time at which to get y
ro= (Object-wide default) physical scale for distances to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
y(t)
HISTORY:
2010-09-21 - Written - Bovy (NYU) | [
"NAME",
":",
"y",
"PURPOSE",
":",
"return",
"y",
"INPUT",
":",
"t",
"-",
"(",
"optional",
")",
"time",
"at",
"which",
"to",
"get",
"y",
"ro",
"=",
"(",
"Object",
"-",
"wide",
"default",
")",
"physical",
"scale",
"for",
"distances",
"to",
"use",
"to",
"convert",
"use_physical",
"=",
"use",
"to",
"override",
"Object",
"-",
"wide",
"default",
"for",
"using",
"a",
"physical",
"scale",
"for",
"output",
"OUTPUT",
":",
"y",
"(",
"t",
")",
"HISTORY",
":",
"2010",
"-",
"09",
"-",
"21",
"-",
"Written",
"-",
"Bovy",
"(",
"NYU",
")"
] | python | train |
mfitzp/padua | padua/normalization.py | https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/normalization.py#L4-L22 | def subtract_column_median(df, prefix='Intensity '):
"""
Apply column-wise normalisation to expression columns.
Default is median transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return:
"""
df = df.copy()
df.replace([np.inf, -np.inf], np.nan, inplace=True)
mask = [l.startswith(prefix) for l in df.columns.values]
df.iloc[:, mask] = df.iloc[:, mask] - df.iloc[:, mask].median(axis=0)
return df | [
"def",
"subtract_column_median",
"(",
"df",
",",
"prefix",
"=",
"'Intensity '",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"df",
".",
"replace",
"(",
"[",
"np",
".",
"inf",
",",
"-",
"np",
".",
"inf",
"]",
",",
"np",
".",
"nan",
",",
"inplace",
"=",
"True",
")",
"mask",
"=",
"[",
"l",
".",
"startswith",
"(",
"prefix",
")",
"for",
"l",
"in",
"df",
".",
"columns",
".",
"values",
"]",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
"=",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
"-",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
".",
"median",
"(",
"axis",
"=",
"0",
")",
"return",
"df"
] | Apply column-wise normalisation to expression columns.
Default is median transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return: | [
"Apply",
"column",
"-",
"wise",
"normalisation",
"to",
"expression",
"columns",
"."
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/disco.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/disco.py#L501-L517 | def add_item(self,jid,node=None,name=None,action=None):
"""Add a new item to the `DiscoItems` object.
:Parameters:
- `jid`: item JID.
- `node`: item node name.
- `name`: item name.
- `action`: action for a "disco push".
:Types:
- `jid`: `pyxmpp.JID`
- `node`: `unicode`
- `name`: `unicode`
- `action`: `unicode`
:returns: the item created.
:returntype: `DiscoItem`."""
return DiscoItem(self,jid,node,name,action) | [
"def",
"add_item",
"(",
"self",
",",
"jid",
",",
"node",
"=",
"None",
",",
"name",
"=",
"None",
",",
"action",
"=",
"None",
")",
":",
"return",
"DiscoItem",
"(",
"self",
",",
"jid",
",",
"node",
",",
"name",
",",
"action",
")"
] | Add a new item to the `DiscoItems` object.
:Parameters:
- `jid`: item JID.
- `node`: item node name.
- `name`: item name.
- `action`: action for a "disco push".
:Types:
- `jid`: `pyxmpp.JID`
- `node`: `unicode`
- `name`: `unicode`
- `action`: `unicode`
:returns: the item created.
:returntype: `DiscoItem`. | [
"Add",
"a",
"new",
"item",
"to",
"the",
"DiscoItems",
"object",
"."
] | python | valid |
wavycloud/pyboto3 | pyboto3/elasticbeanstalk.py | https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/elasticbeanstalk.py#L585-L755 | def create_environment(ApplicationName=None, EnvironmentName=None, GroupName=None, Description=None, CNAMEPrefix=None, Tier=None, Tags=None, VersionLabel=None, TemplateName=None, SolutionStackName=None, PlatformArn=None, OptionSettings=None, OptionsToRemove=None):
"""
Launches an environment for the specified application using the specified configuration.
See also: AWS API Documentation
Examples
The following operation creates a new environment for version v1 of a java application named my-app:
Expected Output:
:example: response = client.create_environment(
ApplicationName='string',
EnvironmentName='string',
GroupName='string',
Description='string',
CNAMEPrefix='string',
Tier={
'Name': 'string',
'Type': 'string',
'Version': 'string'
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
VersionLabel='string',
TemplateName='string',
SolutionStackName='string',
PlatformArn='string',
OptionSettings=[
{
'ResourceName': 'string',
'Namespace': 'string',
'OptionName': 'string',
'Value': 'string'
},
],
OptionsToRemove=[
{
'ResourceName': 'string',
'Namespace': 'string',
'OptionName': 'string'
},
]
)
:type ApplicationName: string
:param ApplicationName: [REQUIRED]
The name of the application that contains the version to be deployed.
If no application is found with this name, CreateEnvironment returns an InvalidParameterValue error.
:type EnvironmentName: string
:param EnvironmentName: A unique name for the deployment environment. Used in the application URL.
Constraint: Must be from 4 to 40 characters in length. The name can contain only letters, numbers, and hyphens. It cannot start or end with a hyphen. This name must be unique in your account. If the specified name already exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.
:type GroupName: string
:param GroupName: The name of the group to which the target environment belongs. Specify a group name only if the environment's name is specified in an environment manifest and not with the environment name parameter. See Environment Manifest (env.yaml) for details.
:type Description: string
:param Description: Describes this environment.
:type CNAMEPrefix: string
:param CNAMEPrefix: If specified, the environment attempts to use this value as the prefix for the CNAME. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.
:type Tier: dict
:param Tier: This specifies the tier to use for creating this environment.
Name (string) --The name of this environment tier.
Type (string) --The type of this environment tier.
Version (string) --The version of this environment tier.
:type Tags: list
:param Tags: This specifies the tags applied to resources in the environment.
(dict) --Describes a tag applied to a resource in an environment.
Key (string) --The key of the tag.
Value (string) --The value of the tag.
:type VersionLabel: string
:param VersionLabel: The name of the application version to deploy.
If the specified application has no associated application versions, AWS Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error.
Default: If not specified, AWS Elastic Beanstalk attempts to launch the sample application in the container.
:type TemplateName: string
:param TemplateName: The name of the configuration template to use in deployment. If no configuration template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type SolutionStackName: string
:param SolutionStackName: This is an alternative to specifying a template name. If specified, AWS Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack.
:type PlatformArn: string
:param PlatformArn: The ARN of the custom platform.
:type OptionSettings: list
:param OptionSettings: If specified, AWS Elastic Beanstalk sets the specified configuration options to the requested value in the configuration set for the new environment. These override the values obtained from the solution stack or the configuration template.
(dict) --A specification identifying an individual configuration option along with its current value. For a list of possible option values, go to Option Values in the AWS Elastic Beanstalk Developer Guide .
ResourceName (string) --A unique resource name for a time-based scaling configuration option.
Namespace (string) --A unique namespace identifying the option's associated AWS resource.
OptionName (string) --The name of the configuration option.
Value (string) --The current value for the configuration option.
:type OptionsToRemove: list
:param OptionsToRemove: A list of custom user-defined configuration options to remove from the configuration set for this new environment.
(dict) --A specification identifying an individual configuration option.
ResourceName (string) --A unique resource name for a time-based scaling configuration option.
Namespace (string) --A unique namespace identifying the option's associated AWS resource.
OptionName (string) --The name of the configuration option.
:rtype: dict
:return: {
'EnvironmentName': 'string',
'EnvironmentId': 'string',
'ApplicationName': 'string',
'VersionLabel': 'string',
'SolutionStackName': 'string',
'PlatformArn': 'string',
'TemplateName': 'string',
'Description': 'string',
'EndpointURL': 'string',
'CNAME': 'string',
'DateCreated': datetime(2015, 1, 1),
'DateUpdated': datetime(2015, 1, 1),
'Status': 'Launching'|'Updating'|'Ready'|'Terminating'|'Terminated',
'AbortableOperationInProgress': True|False,
'Health': 'Green'|'Yellow'|'Red'|'Grey',
'HealthStatus': 'NoData'|'Unknown'|'Pending'|'Ok'|'Info'|'Warning'|'Degraded'|'Severe',
'Resources': {
'LoadBalancer': {
'LoadBalancerName': 'string',
'Domain': 'string',
'Listeners': [
{
'Protocol': 'string',
'Port': 123
},
]
}
},
'Tier': {
'Name': 'string',
'Type': 'string',
'Version': 'string'
},
'EnvironmentLinks': [
{
'LinkName': 'string',
'EnvironmentName': 'string'
},
]
}
:returns:
Launching : Environment is in the process of initial deployment.
Updating : Environment is in the process of updating its configuration settings or application version.
Ready : Environment is available to have an action performed on it, such as update or terminate.
Terminating : Environment is in the shut-down process.
Terminated : Environment is not running.
"""
pass | [
"def",
"create_environment",
"(",
"ApplicationName",
"=",
"None",
",",
"EnvironmentName",
"=",
"None",
",",
"GroupName",
"=",
"None",
",",
"Description",
"=",
"None",
",",
"CNAMEPrefix",
"=",
"None",
",",
"Tier",
"=",
"None",
",",
"Tags",
"=",
"None",
",",
"VersionLabel",
"=",
"None",
",",
"TemplateName",
"=",
"None",
",",
"SolutionStackName",
"=",
"None",
",",
"PlatformArn",
"=",
"None",
",",
"OptionSettings",
"=",
"None",
",",
"OptionsToRemove",
"=",
"None",
")",
":",
"pass"
] | Launches an environment for the specified application using the specified configuration.
See also: AWS API Documentation
Examples
The following operation creates a new environment for version v1 of a java application named my-app:
Expected Output:
:example: response = client.create_environment(
ApplicationName='string',
EnvironmentName='string',
GroupName='string',
Description='string',
CNAMEPrefix='string',
Tier={
'Name': 'string',
'Type': 'string',
'Version': 'string'
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
VersionLabel='string',
TemplateName='string',
SolutionStackName='string',
PlatformArn='string',
OptionSettings=[
{
'ResourceName': 'string',
'Namespace': 'string',
'OptionName': 'string',
'Value': 'string'
},
],
OptionsToRemove=[
{
'ResourceName': 'string',
'Namespace': 'string',
'OptionName': 'string'
},
]
)
:type ApplicationName: string
:param ApplicationName: [REQUIRED]
The name of the application that contains the version to be deployed.
If no application is found with this name, CreateEnvironment returns an InvalidParameterValue error.
:type EnvironmentName: string
:param EnvironmentName: A unique name for the deployment environment. Used in the application URL.
Constraint: Must be from 4 to 40 characters in length. The name can contain only letters, numbers, and hyphens. It cannot start or end with a hyphen. This name must be unique in your account. If the specified name already exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.
:type GroupName: string
:param GroupName: The name of the group to which the target environment belongs. Specify a group name only if the environment's name is specified in an environment manifest and not with the environment name parameter. See Environment Manifest (env.yaml) for details.
:type Description: string
:param Description: Describes this environment.
:type CNAMEPrefix: string
:param CNAMEPrefix: If specified, the environment attempts to use this value as the prefix for the CNAME. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.
:type Tier: dict
:param Tier: This specifies the tier to use for creating this environment.
Name (string) --The name of this environment tier.
Type (string) --The type of this environment tier.
Version (string) --The version of this environment tier.
:type Tags: list
:param Tags: This specifies the tags applied to resources in the environment.
(dict) --Describes a tag applied to a resource in an environment.
Key (string) --The key of the tag.
Value (string) --The value of the tag.
:type VersionLabel: string
:param VersionLabel: The name of the application version to deploy.
If the specified application has no associated application versions, AWS Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error.
Default: If not specified, AWS Elastic Beanstalk attempts to launch the sample application in the container.
:type TemplateName: string
:param TemplateName: The name of the configuration template to use in deployment. If no configuration template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type SolutionStackName: string
:param SolutionStackName: This is an alternative to specifying a template name. If specified, AWS Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack.
:type PlatformArn: string
:param PlatformArn: The ARN of the custom platform.
:type OptionSettings: list
:param OptionSettings: If specified, AWS Elastic Beanstalk sets the specified configuration options to the requested value in the configuration set for the new environment. These override the values obtained from the solution stack or the configuration template.
(dict) --A specification identifying an individual configuration option along with its current value. For a list of possible option values, go to Option Values in the AWS Elastic Beanstalk Developer Guide .
ResourceName (string) --A unique resource name for a time-based scaling configuration option.
Namespace (string) --A unique namespace identifying the option's associated AWS resource.
OptionName (string) --The name of the configuration option.
Value (string) --The current value for the configuration option.
:type OptionsToRemove: list
:param OptionsToRemove: A list of custom user-defined configuration options to remove from the configuration set for this new environment.
(dict) --A specification identifying an individual configuration option.
ResourceName (string) --A unique resource name for a time-based scaling configuration option.
Namespace (string) --A unique namespace identifying the option's associated AWS resource.
OptionName (string) --The name of the configuration option.
:rtype: dict
:return: {
'EnvironmentName': 'string',
'EnvironmentId': 'string',
'ApplicationName': 'string',
'VersionLabel': 'string',
'SolutionStackName': 'string',
'PlatformArn': 'string',
'TemplateName': 'string',
'Description': 'string',
'EndpointURL': 'string',
'CNAME': 'string',
'DateCreated': datetime(2015, 1, 1),
'DateUpdated': datetime(2015, 1, 1),
'Status': 'Launching'|'Updating'|'Ready'|'Terminating'|'Terminated',
'AbortableOperationInProgress': True|False,
'Health': 'Green'|'Yellow'|'Red'|'Grey',
'HealthStatus': 'NoData'|'Unknown'|'Pending'|'Ok'|'Info'|'Warning'|'Degraded'|'Severe',
'Resources': {
'LoadBalancer': {
'LoadBalancerName': 'string',
'Domain': 'string',
'Listeners': [
{
'Protocol': 'string',
'Port': 123
},
]
}
},
'Tier': {
'Name': 'string',
'Type': 'string',
'Version': 'string'
},
'EnvironmentLinks': [
{
'LinkName': 'string',
'EnvironmentName': 'string'
},
]
}
:returns:
Launching : Environment is in the process of initial deployment.
Updating : Environment is in the process of updating its configuration settings or application version.
Ready : Environment is available to have an action performed on it, such as update or terminate.
Terminating : Environment is in the shut-down process.
Terminated : Environment is not running. | [
"Launches",
"an",
"environment",
"for",
"the",
"specified",
"application",
"using",
"the",
"specified",
"configuration",
".",
"See",
"also",
":",
"AWS",
"API",
"Documentation",
"Examples",
"The",
"following",
"operation",
"creates",
"a",
"new",
"environment",
"for",
"version",
"v1",
"of",
"a",
"java",
"application",
"named",
"my",
"-",
"app",
":",
"Expected",
"Output",
":",
":",
"example",
":",
"response",
"=",
"client",
".",
"create_environment",
"(",
"ApplicationName",
"=",
"string",
"EnvironmentName",
"=",
"string",
"GroupName",
"=",
"string",
"Description",
"=",
"string",
"CNAMEPrefix",
"=",
"string",
"Tier",
"=",
"{",
"Name",
":",
"string",
"Type",
":",
"string",
"Version",
":",
"string",
"}",
"Tags",
"=",
"[",
"{",
"Key",
":",
"string",
"Value",
":",
"string",
"}",
"]",
"VersionLabel",
"=",
"string",
"TemplateName",
"=",
"string",
"SolutionStackName",
"=",
"string",
"PlatformArn",
"=",
"string",
"OptionSettings",
"=",
"[",
"{",
"ResourceName",
":",
"string",
"Namespace",
":",
"string",
"OptionName",
":",
"string",
"Value",
":",
"string",
"}",
"]",
"OptionsToRemove",
"=",
"[",
"{",
"ResourceName",
":",
"string",
"Namespace",
":",
"string",
"OptionName",
":",
"string",
"}",
"]",
")",
":",
"type",
"ApplicationName",
":",
"string",
":",
"param",
"ApplicationName",
":",
"[",
"REQUIRED",
"]",
"The",
"name",
"of",
"the",
"application",
"that",
"contains",
"the",
"version",
"to",
"be",
"deployed",
".",
"If",
"no",
"application",
"is",
"found",
"with",
"this",
"name",
"CreateEnvironment",
"returns",
"an",
"InvalidParameterValue",
"error",
"."
] | python | train |
juju/juju-bundlelib | jujubundlelib/validation.py | https://github.com/juju/juju-bundlelib/blob/c2efa614f53675ed9526027776448bfbb0454ca6/jujubundlelib/validation.py#L37-L62 | def validate(bundle):
"""Validate a bundle object and all of its components.
The bundle must be passed as a YAML decoded object.
Return a list of bundle errors, or an empty list if the bundle is valid.
"""
errors = []
add_error = errors.append
# Check that the bundle sections are well formed.
series, services, machines, relations = _validate_sections(
bundle, add_error)
# If there are errors already, there is no point in proceeding with the
# validation process.
if errors:
return errors
# Validate each individual section.
_validate_series(series, 'bundle', add_error)
_validate_services(services, machines, add_error)
_validate_machines(machines, add_error)
_validate_relations(relations, services, add_error)
# Return all the collected errors.
return errors | [
"def",
"validate",
"(",
"bundle",
")",
":",
"errors",
"=",
"[",
"]",
"add_error",
"=",
"errors",
".",
"append",
"# Check that the bundle sections are well formed.",
"series",
",",
"services",
",",
"machines",
",",
"relations",
"=",
"_validate_sections",
"(",
"bundle",
",",
"add_error",
")",
"# If there are errors already, there is no point in proceeding with the",
"# validation process.",
"if",
"errors",
":",
"return",
"errors",
"# Validate each individual section.",
"_validate_series",
"(",
"series",
",",
"'bundle'",
",",
"add_error",
")",
"_validate_services",
"(",
"services",
",",
"machines",
",",
"add_error",
")",
"_validate_machines",
"(",
"machines",
",",
"add_error",
")",
"_validate_relations",
"(",
"relations",
",",
"services",
",",
"add_error",
")",
"# Return all the collected errors.",
"return",
"errors"
] | Validate a bundle object and all of its components.
The bundle must be passed as a YAML decoded object.
Return a list of bundle errors, or an empty list if the bundle is valid. | [
"Validate",
"a",
"bundle",
"object",
"and",
"all",
"of",
"its",
"components",
"."
] | python | train |
stevearc/dql | dql/cli.py | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/cli.py#L254-L260 | def save_config(self):
""" Save the conf file """
if not os.path.exists(self._conf_dir):
os.makedirs(self._conf_dir)
conf_file = os.path.join(self._conf_dir, "dql.json")
with open(conf_file, "w") as ofile:
json.dump(self.conf, ofile, indent=2) | [
"def",
"save_config",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_conf_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"_conf_dir",
")",
"conf_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_conf_dir",
",",
"\"dql.json\"",
")",
"with",
"open",
"(",
"conf_file",
",",
"\"w\"",
")",
"as",
"ofile",
":",
"json",
".",
"dump",
"(",
"self",
".",
"conf",
",",
"ofile",
",",
"indent",
"=",
"2",
")"
] | Save the conf file | [
"Save",
"the",
"conf",
"file"
] | python | train |
mitsei/dlkit | dlkit/json_/hierarchy/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/hierarchy/sessions.py#L978-L1001 | def can_create_hierarchy_with_record_types(self, hierarchy_record_types):
"""Tests if this user can create a single ``Hierarchy`` using the desired record types.
While ``HierarchyManager.getHierarchyRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Hierarchy``.
Providing an empty array tests if a ``Hierarchy`` can be created
with no records.
arg: hierarchy_record_types (osid.type.Type[]): array of
hierarchy record types
return: (boolean) - ``true`` if ``Hierarchy`` creation using the
specified ``Types`` is supported, ``false`` otherwise
raise: NullArgument - ``hierarchy_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bin_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=hierarchy_record_types)
return True | [
"def",
"can_create_hierarchy_with_record_types",
"(",
"self",
",",
"hierarchy_record_types",
")",
":",
"# Implemented from template for",
"# osid.resource.BinAdminSession.can_create_bin_with_record_types",
"# NOTE: It is expected that real authentication hints will be",
"# handled in a service adapter above the pay grade of this impl.",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"can_create_catalog_with_record_types",
"(",
"catalog_record_types",
"=",
"hierarchy_record_types",
")",
"return",
"True"
] | Tests if this user can create a single ``Hierarchy`` using the desired record types.
While ``HierarchyManager.getHierarchyRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Hierarchy``.
Providing an empty array tests if a ``Hierarchy`` can be created
with no records.
arg: hierarchy_record_types (osid.type.Type[]): array of
hierarchy record types
return: (boolean) - ``true`` if ``Hierarchy`` creation using the
specified ``Types`` is supported, ``false`` otherwise
raise: NullArgument - ``hierarchy_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.* | [
"Tests",
"if",
"this",
"user",
"can",
"create",
"a",
"single",
"Hierarchy",
"using",
"the",
"desired",
"record",
"types",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xwalkthroughwidget/xwalkthrough.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xwalkthroughwidget/xwalkthrough.py#L133-L145 | def fromXml(xml):
"""
Creates a new slide from XML.
:return <XWalkthroughSlide>
"""
slide = XWalkthroughSlide(**xml.attrib)
# create the items
for xgraphic in xml:
slide.addItem(XWalkthroughItem.fromXml(xgraphic))
return slide | [
"def",
"fromXml",
"(",
"xml",
")",
":",
"slide",
"=",
"XWalkthroughSlide",
"(",
"*",
"*",
"xml",
".",
"attrib",
")",
"# create the items\r",
"for",
"xgraphic",
"in",
"xml",
":",
"slide",
".",
"addItem",
"(",
"XWalkthroughItem",
".",
"fromXml",
"(",
"xgraphic",
")",
")",
"return",
"slide"
] | Creates a new slide from XML.
:return <XWalkthroughSlide> | [
"Creates",
"a",
"new",
"slide",
"from",
"XML",
".",
":",
"return",
"<XWalkthroughSlide",
">"
] | python | train |
zhmcclient/python-zhmcclient | zhmcclient/_hba.py | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_hba.py#L302-L331 | def reassign_port(self, port):
"""
Reassign this HBA to a new underlying :term:`FCP port`.
This method performs the HMC operation "Reassign Storage Adapter Port".
Authorization requirements:
* Object-access permission to the Partition containing this HBA.
* Object-access permission to the Adapter with the new Port.
* Task permission to the "Partition Details" task.
Parameters:
port (:class:`~zhmcclient.Port`): :term:`FCP port` to be used.
Raises:
:exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of
operation "Reassign Storage Adapter Port" in the :term:`HMC API`
book.
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'adapter-port-uri': port.uri}
self.manager.session.post(
self._uri + '/operations/reassign-storage-adapter-port',
body=body)
self.properties.update(body) | [
"def",
"reassign_port",
"(",
"self",
",",
"port",
")",
":",
"body",
"=",
"{",
"'adapter-port-uri'",
":",
"port",
".",
"uri",
"}",
"self",
".",
"manager",
".",
"session",
".",
"post",
"(",
"self",
".",
"_uri",
"+",
"'/operations/reassign-storage-adapter-port'",
",",
"body",
"=",
"body",
")",
"self",
".",
"properties",
".",
"update",
"(",
"body",
")"
] | Reassign this HBA to a new underlying :term:`FCP port`.
This method performs the HMC operation "Reassign Storage Adapter Port".
Authorization requirements:
* Object-access permission to the Partition containing this HBA.
* Object-access permission to the Adapter with the new Port.
* Task permission to the "Partition Details" task.
Parameters:
port (:class:`~zhmcclient.Port`): :term:`FCP port` to be used.
Raises:
:exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of
operation "Reassign Storage Adapter Port" in the :term:`HMC API`
book.
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | [
"Reassign",
"this",
"HBA",
"to",
"a",
"new",
"underlying",
":",
"term",
":",
"FCP",
"port",
"."
] | python | train |
reiinakano/xcessiv | xcessiv/functions.py | https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L16-L28 | def hash_file(path, block_size=65536):
"""Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block
"""
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest() | [
"def",
"hash_file",
"(",
"path",
",",
"block_size",
"=",
"65536",
")",
":",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"block",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"block_size",
")",
",",
"b''",
")",
":",
"sha256",
".",
"update",
"(",
"block",
")",
"return",
"sha256",
".",
"hexdigest",
"(",
")"
] | Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block | [
"Returns",
"SHA256",
"checksum",
"of",
"a",
"file"
] | python | train |
saulpw/visidata | visidata/vdtui.py | https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L2258-L2263 | def getMaxWidth(self, rows):
'Return the maximum length of any cell in column or its header.'
w = 0
if len(rows) > 0:
w = max(max(len(self.getDisplayValue(r)) for r in rows), len(self.name))+2
return max(w, len(self.name)) | [
"def",
"getMaxWidth",
"(",
"self",
",",
"rows",
")",
":",
"w",
"=",
"0",
"if",
"len",
"(",
"rows",
")",
">",
"0",
":",
"w",
"=",
"max",
"(",
"max",
"(",
"len",
"(",
"self",
".",
"getDisplayValue",
"(",
"r",
")",
")",
"for",
"r",
"in",
"rows",
")",
",",
"len",
"(",
"self",
".",
"name",
")",
")",
"+",
"2",
"return",
"max",
"(",
"w",
",",
"len",
"(",
"self",
".",
"name",
")",
")"
] | Return the maximum length of any cell in column or its header. | [
"Return",
"the",
"maximum",
"length",
"of",
"any",
"cell",
"in",
"column",
"or",
"its",
"header",
"."
] | python | train |
django-userena-ce/django-userena-ce | userena/decorators.py | https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/decorators.py#L9-L29 | def secure_required(view_func):
"""
Decorator to switch an url from http to https.
If a view is accessed through http and this decorator is applied to that
view, than it will return a permanent redirect to the secure (https)
version of the same view.
The decorator also must check that ``USERENA_USE_HTTPS`` is enabled. If
disabled, it should not redirect to https because the project doesn't
support it.
"""
def _wrapped_view(request, *args, **kwargs):
if not request.is_secure():
if getattr(settings, 'USERENA_USE_HTTPS', userena_settings.DEFAULT_USERENA_USE_HTTPS):
request_url = request.build_absolute_uri(request.get_full_path())
secure_url = request_url.replace('http://', 'https://')
return HttpResponsePermanentRedirect(secure_url)
return view_func(request, *args, **kwargs)
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view) | [
"def",
"secure_required",
"(",
"view_func",
")",
":",
"def",
"_wrapped_view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"request",
".",
"is_secure",
"(",
")",
":",
"if",
"getattr",
"(",
"settings",
",",
"'USERENA_USE_HTTPS'",
",",
"userena_settings",
".",
"DEFAULT_USERENA_USE_HTTPS",
")",
":",
"request_url",
"=",
"request",
".",
"build_absolute_uri",
"(",
"request",
".",
"get_full_path",
"(",
")",
")",
"secure_url",
"=",
"request_url",
".",
"replace",
"(",
"'http://'",
",",
"'https://'",
")",
"return",
"HttpResponsePermanentRedirect",
"(",
"secure_url",
")",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wraps",
"(",
"view_func",
",",
"assigned",
"=",
"available_attrs",
"(",
"view_func",
")",
")",
"(",
"_wrapped_view",
")"
] | Decorator to switch an url from http to https.
If a view is accessed through http and this decorator is applied to that
view, than it will return a permanent redirect to the secure (https)
version of the same view.
The decorator also must check that ``USERENA_USE_HTTPS`` is enabled. If
disabled, it should not redirect to https because the project doesn't
support it. | [
"Decorator",
"to",
"switch",
"an",
"url",
"from",
"http",
"to",
"https",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/mpls_state/rsvp/sessions/psbs/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_state/rsvp/sessions/psbs/__init__.py#L254-L277 | def _set_session_type(self, v, load=False):
"""
Setter method for session_type, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_type (session-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_type() directly.
YANG Description: Session type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-type-none': {'value': 0}, u'merged-backup': {'value': 6}, u'ingress-detour': {'value': 1}, u'egress-backup': {'value': 7}, u'repaired-session': {'value': 8}, u'bypass-ingress': {'value': 9}, u'transit-detour': {'value': 2}, u'egress-detour': {'value': 4}, u'ingress-backup': {'value': 5}, u'merged-detour': {'value': 3}},), is_leaf=True, yang_name="session-type", rest_name="session-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """session_type must be of a type compatible with session-type""",
'defined-type': "brocade-mpls-operational:session-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-type-none': {'value': 0}, u'merged-backup': {'value': 6}, u'ingress-detour': {'value': 1}, u'egress-backup': {'value': 7}, u'repaired-session': {'value': 8}, u'bypass-ingress': {'value': 9}, u'transit-detour': {'value': 2}, u'egress-detour': {'value': 4}, u'ingress-backup': {'value': 5}, u'merged-detour': {'value': 3}},), is_leaf=True, yang_name="session-type", rest_name="session-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-type', is_config=False)""",
})
self.__session_type = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_session_type",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'session-type-none'",
":",
"{",
"'value'",
":",
"0",
"}",
",",
"u'merged-backup'",
":",
"{",
"'value'",
":",
"6",
"}",
",",
"u'ingress-detour'",
":",
"{",
"'value'",
":",
"1",
"}",
",",
"u'egress-backup'",
":",
"{",
"'value'",
":",
"7",
"}",
",",
"u'repaired-session'",
":",
"{",
"'value'",
":",
"8",
"}",
",",
"u'bypass-ingress'",
":",
"{",
"'value'",
":",
"9",
"}",
",",
"u'transit-detour'",
":",
"{",
"'value'",
":",
"2",
"}",
",",
"u'egress-detour'",
":",
"{",
"'value'",
":",
"4",
"}",
",",
"u'ingress-backup'",
":",
"{",
"'value'",
":",
"5",
"}",
",",
"u'merged-detour'",
":",
"{",
"'value'",
":",
"3",
"}",
"}",
",",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"session-type\"",
",",
"rest_name",
"=",
"\"session-type\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-mpls-operational'",
",",
"defining_module",
"=",
"'brocade-mpls-operational'",
",",
"yang_type",
"=",
"'session-type'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"session_type must be of a type compatible with session-type\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-mpls-operational:session-type\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'session-type-none': {'value': 0}, u'merged-backup': {'value': 6}, u'ingress-detour': {'value': 1}, u'egress-backup': {'value': 7}, u'repaired-session': {'value': 8}, u'bypass-ingress': {'value': 9}, u'transit-detour': {'value': 2}, u'egress-detour': {'value': 4}, u'ingress-backup': {'value': 5}, u'merged-detour': {'value': 3}},), is_leaf=True, yang_name=\"session-type\", rest_name=\"session-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-type', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__session_type",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for session_type, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_type (session-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_type() directly.
YANG Description: Session type | [
"Setter",
"method",
"for",
"session_type",
"mapped",
"from",
"YANG",
"variable",
"/",
"mpls_state",
"/",
"rsvp",
"/",
"sessions",
"/",
"psbs",
"/",
"session_type",
"(",
"session",
"-",
"type",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_session_type",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_session_type",
"()",
"directly",
"."
] | python | train |
MrYsLab/PyMata | PyMata/pymata_command_handler.py | https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/PyMata/pymata_command_handler.py#L704-L730 | def i2c_reply(self, data):
"""
This method receives replies to i2c_read requests. It stores the data for each i2c device
address in a dictionary called i2c_map. The data is retrieved via a call to i2c_get_read_data()
in pymata.py
It a callback was specified in pymata.i2c_read, the raw data is sent through the callback
:param data: raw data returned from i2c device
"""
reply_data = []
address = (data[0] & 0x7f) + (data[1] << 7)
register = data[2] & 0x7f + data[3] << 7
reply_data.append(register)
for i in range(4, len(data), 2):
data_item = (data[i] & 0x7f) + (data[i + 1] << 7)
reply_data.append(data_item)
# retrieve the data entry for this address from the i2c map
if address in self.i2c_map:
i2c_data = self.i2c_map.get(address, None)
i2c_data[1] = reply_data
self.i2c_map[address] = i2c_data
# is there a call back for this entry?
# if yes, return a list of bytes through the callback
if i2c_data[0] is not None:
i2c_data[0]([self.pymata.I2C, address, reply_data]) | [
"def",
"i2c_reply",
"(",
"self",
",",
"data",
")",
":",
"reply_data",
"=",
"[",
"]",
"address",
"=",
"(",
"data",
"[",
"0",
"]",
"&",
"0x7f",
")",
"+",
"(",
"data",
"[",
"1",
"]",
"<<",
"7",
")",
"register",
"=",
"data",
"[",
"2",
"]",
"&",
"0x7f",
"+",
"data",
"[",
"3",
"]",
"<<",
"7",
"reply_data",
".",
"append",
"(",
"register",
")",
"for",
"i",
"in",
"range",
"(",
"4",
",",
"len",
"(",
"data",
")",
",",
"2",
")",
":",
"data_item",
"=",
"(",
"data",
"[",
"i",
"]",
"&",
"0x7f",
")",
"+",
"(",
"data",
"[",
"i",
"+",
"1",
"]",
"<<",
"7",
")",
"reply_data",
".",
"append",
"(",
"data_item",
")",
"# retrieve the data entry for this address from the i2c map",
"if",
"address",
"in",
"self",
".",
"i2c_map",
":",
"i2c_data",
"=",
"self",
".",
"i2c_map",
".",
"get",
"(",
"address",
",",
"None",
")",
"i2c_data",
"[",
"1",
"]",
"=",
"reply_data",
"self",
".",
"i2c_map",
"[",
"address",
"]",
"=",
"i2c_data",
"# is there a call back for this entry?",
"# if yes, return a list of bytes through the callback",
"if",
"i2c_data",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"i2c_data",
"[",
"0",
"]",
"(",
"[",
"self",
".",
"pymata",
".",
"I2C",
",",
"address",
",",
"reply_data",
"]",
")"
] | This method receives replies to i2c_read requests. It stores the data for each i2c device
address in a dictionary called i2c_map. The data is retrieved via a call to i2c_get_read_data()
in pymata.py
It a callback was specified in pymata.i2c_read, the raw data is sent through the callback
:param data: raw data returned from i2c device | [
"This",
"method",
"receives",
"replies",
"to",
"i2c_read",
"requests",
".",
"It",
"stores",
"the",
"data",
"for",
"each",
"i2c",
"device",
"address",
"in",
"a",
"dictionary",
"called",
"i2c_map",
".",
"The",
"data",
"is",
"retrieved",
"via",
"a",
"call",
"to",
"i2c_get_read_data",
"()",
"in",
"pymata",
".",
"py",
"It",
"a",
"callback",
"was",
"specified",
"in",
"pymata",
".",
"i2c_read",
"the",
"raw",
"data",
"is",
"sent",
"through",
"the",
"callback"
] | python | valid |
SatelliteQE/nailgun | nailgun/entities.py | https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L7346-L7351 | def update_payload(self, fields=None):
"""Convert ``sync_date`` to a string if datetime object provided."""
data = super(SyncPlan, self).update_payload(fields)
if isinstance(data.get('sync_date'), datetime):
data['sync_date'] = data['sync_date'].strftime('%Y-%m-%d %H:%M:%S')
return data | [
"def",
"update_payload",
"(",
"self",
",",
"fields",
"=",
"None",
")",
":",
"data",
"=",
"super",
"(",
"SyncPlan",
",",
"self",
")",
".",
"update_payload",
"(",
"fields",
")",
"if",
"isinstance",
"(",
"data",
".",
"get",
"(",
"'sync_date'",
")",
",",
"datetime",
")",
":",
"data",
"[",
"'sync_date'",
"]",
"=",
"data",
"[",
"'sync_date'",
"]",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"return",
"data"
] | Convert ``sync_date`` to a string if datetime object provided. | [
"Convert",
"sync_date",
"to",
"a",
"string",
"if",
"datetime",
"object",
"provided",
"."
] | python | train |
cuihantao/andes | andes/models/base.py | https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/base.py#L417-L455 | def get_field(self, field, idx=None, astype=None):
"""
Return `self.field` for the elements labeled by `idx`
:param astype: type cast of the return value
:param field: field name of this model
:param idx: element indices, will be the whole list if not specified
:return: field values
"""
assert astype in (None, list, matrix)
ret = None
if idx is None:
idx = self.idx
# ===================disable warning ==============================
# if field in self._service:
# logger.warning(
# 'Reading service variable <{model}.{field}> could be unsafe.'
# .format(field=field, model=self._name)
# )
# =================================================================
uid = self.get_uid(idx)
field_data = self.__dict__[field]
if isinstance(field_data, matrix):
ret = field_data[uid]
elif isinstance(field_data, list):
if isinstance(idx, (float, int, str)):
ret = field_data[uid]
else:
ret = [field_data[x] for x in uid]
if astype is not None:
ret = astype(ret)
return ret | [
"def",
"get_field",
"(",
"self",
",",
"field",
",",
"idx",
"=",
"None",
",",
"astype",
"=",
"None",
")",
":",
"assert",
"astype",
"in",
"(",
"None",
",",
"list",
",",
"matrix",
")",
"ret",
"=",
"None",
"if",
"idx",
"is",
"None",
":",
"idx",
"=",
"self",
".",
"idx",
"# ===================disable warning ==============================",
"# if field in self._service:",
"# logger.warning(",
"# 'Reading service variable <{model}.{field}> could be unsafe.'",
"# .format(field=field, model=self._name)",
"# )",
"# =================================================================",
"uid",
"=",
"self",
".",
"get_uid",
"(",
"idx",
")",
"field_data",
"=",
"self",
".",
"__dict__",
"[",
"field",
"]",
"if",
"isinstance",
"(",
"field_data",
",",
"matrix",
")",
":",
"ret",
"=",
"field_data",
"[",
"uid",
"]",
"elif",
"isinstance",
"(",
"field_data",
",",
"list",
")",
":",
"if",
"isinstance",
"(",
"idx",
",",
"(",
"float",
",",
"int",
",",
"str",
")",
")",
":",
"ret",
"=",
"field_data",
"[",
"uid",
"]",
"else",
":",
"ret",
"=",
"[",
"field_data",
"[",
"x",
"]",
"for",
"x",
"in",
"uid",
"]",
"if",
"astype",
"is",
"not",
"None",
":",
"ret",
"=",
"astype",
"(",
"ret",
")",
"return",
"ret"
] | Return `self.field` for the elements labeled by `idx`
:param astype: type cast of the return value
:param field: field name of this model
:param idx: element indices, will be the whole list if not specified
:return: field values | [
"Return",
"self",
".",
"field",
"for",
"the",
"elements",
"labeled",
"by",
"idx"
] | python | train |
sods/paramz | paramz/core/index_operations.py | https://github.com/sods/paramz/blob/ae6fc6274b70fb723d91e48fc5026a9bc5a06508/paramz/core/index_operations.py#L145-L159 | def properties_dict_for(self, index):
"""
Return a dictionary, containing properties as keys and indices as index
Thus, the indices for each constraint, which is contained will be collected as
one dictionary
Example:
let properties: 'one':[1,2,3,4], 'two':[3,5,6]
>>> properties_dict_for([2,3,5])
{'one':[2,3], 'two':[3,5]}
"""
props = self.properties_for(index)
prop_index = extract_properties_to_index(index, props)
return prop_index | [
"def",
"properties_dict_for",
"(",
"self",
",",
"index",
")",
":",
"props",
"=",
"self",
".",
"properties_for",
"(",
"index",
")",
"prop_index",
"=",
"extract_properties_to_index",
"(",
"index",
",",
"props",
")",
"return",
"prop_index"
] | Return a dictionary, containing properties as keys and indices as index
Thus, the indices for each constraint, which is contained will be collected as
one dictionary
Example:
let properties: 'one':[1,2,3,4], 'two':[3,5,6]
>>> properties_dict_for([2,3,5])
{'one':[2,3], 'two':[3,5]} | [
"Return",
"a",
"dictionary",
"containing",
"properties",
"as",
"keys",
"and",
"indices",
"as",
"index",
"Thus",
"the",
"indices",
"for",
"each",
"constraint",
"which",
"is",
"contained",
"will",
"be",
"collected",
"as",
"one",
"dictionary"
] | python | train |
airspeed-velocity/asv | asv/plugins/virtualenv.py | https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/plugins/virtualenv.py#L124-L140 | def _setup(self):
"""
Setup the environment on disk using virtualenv.
Then, all of the requirements are installed into
it using `pip install`.
"""
log.info("Creating virtualenv for {0}".format(self.name))
util.check_call([
sys.executable,
"-mvirtualenv",
'--no-site-packages',
"-p",
self._executable,
self._path])
log.info("Installing requirements for {0}".format(self.name))
self._install_requirements() | [
"def",
"_setup",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"Creating virtualenv for {0}\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"util",
".",
"check_call",
"(",
"[",
"sys",
".",
"executable",
",",
"\"-mvirtualenv\"",
",",
"'--no-site-packages'",
",",
"\"-p\"",
",",
"self",
".",
"_executable",
",",
"self",
".",
"_path",
"]",
")",
"log",
".",
"info",
"(",
"\"Installing requirements for {0}\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"self",
".",
"_install_requirements",
"(",
")"
] | Setup the environment on disk using virtualenv.
Then, all of the requirements are installed into
it using `pip install`. | [
"Setup",
"the",
"environment",
"on",
"disk",
"using",
"virtualenv",
".",
"Then",
"all",
"of",
"the",
"requirements",
"are",
"installed",
"into",
"it",
"using",
"pip",
"install",
"."
] | python | train |
RedHatInsights/insights-core | insights/client/mount.py | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L112-L118 | def _get_fs(thin_pathname):
"""
Returns the file system type (xfs, ext4) of a given device
"""
cmd = ['lsblk', '-o', 'FSTYPE', '-n', thin_pathname]
fs_return = util.subp(cmd)
return fs_return.stdout.strip() | [
"def",
"_get_fs",
"(",
"thin_pathname",
")",
":",
"cmd",
"=",
"[",
"'lsblk'",
",",
"'-o'",
",",
"'FSTYPE'",
",",
"'-n'",
",",
"thin_pathname",
"]",
"fs_return",
"=",
"util",
".",
"subp",
"(",
"cmd",
")",
"return",
"fs_return",
".",
"stdout",
".",
"strip",
"(",
")"
] | Returns the file system type (xfs, ext4) of a given device | [
"Returns",
"the",
"file",
"system",
"type",
"(",
"xfs",
"ext4",
")",
"of",
"a",
"given",
"device"
] | python | train |
jreese/aiosqlite | aiosqlite/core.py | https://github.com/jreese/aiosqlite/blob/3f548b568b8db9a57022b6e2c9627f5cdefb983f/aiosqlite/core.py#L169-L173 | async def _connect(self) -> "Connection":
"""Connect to the actual sqlite database."""
if self._connection is None:
self._connection = await self._execute(self._connector)
return self | [
"async",
"def",
"_connect",
"(",
"self",
")",
"->",
"\"Connection\"",
":",
"if",
"self",
".",
"_connection",
"is",
"None",
":",
"self",
".",
"_connection",
"=",
"await",
"self",
".",
"_execute",
"(",
"self",
".",
"_connector",
")",
"return",
"self"
] | Connect to the actual sqlite database. | [
"Connect",
"to",
"the",
"actual",
"sqlite",
"database",
"."
] | python | train |
schapman1974/tinymongo | tinymongo/tinymongo.py | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L441-L469 | def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result | [
"def",
"find",
"(",
"self",
",",
"filter",
"=",
"None",
",",
"sort",
"=",
"None",
",",
"skip",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"table",
"is",
"None",
":",
"self",
".",
"build_table",
"(",
")",
"if",
"filter",
"is",
"None",
":",
"result",
"=",
"self",
".",
"table",
".",
"all",
"(",
")",
"else",
":",
"allcond",
"=",
"self",
".",
"parse_query",
"(",
"filter",
")",
"try",
":",
"result",
"=",
"self",
".",
"table",
".",
"search",
"(",
"allcond",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"result",
"=",
"[",
"]",
"result",
"=",
"TinyMongoCursor",
"(",
"result",
",",
"sort",
"=",
"sort",
",",
"skip",
"=",
"skip",
",",
"limit",
"=",
"limit",
")",
"return",
"result"
] | Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results | [
"Finds",
"all",
"matching",
"results"
] | python | train |
tanghaibao/jcvi | jcvi/apps/base.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L921-L931 | def need_update(a, b):
"""
Check if file a is newer than file b and decide whether or not to update
file b. Can generalize to two lists.
"""
a = listify(a)
b = listify(b)
return any((not op.exists(x)) for x in b) or \
all((os.stat(x).st_size == 0 for x in b)) or \
any(is_newer_file(x, y) for x in a for y in b) | [
"def",
"need_update",
"(",
"a",
",",
"b",
")",
":",
"a",
"=",
"listify",
"(",
"a",
")",
"b",
"=",
"listify",
"(",
"b",
")",
"return",
"any",
"(",
"(",
"not",
"op",
".",
"exists",
"(",
"x",
")",
")",
"for",
"x",
"in",
"b",
")",
"or",
"all",
"(",
"(",
"os",
".",
"stat",
"(",
"x",
")",
".",
"st_size",
"==",
"0",
"for",
"x",
"in",
"b",
")",
")",
"or",
"any",
"(",
"is_newer_file",
"(",
"x",
",",
"y",
")",
"for",
"x",
"in",
"a",
"for",
"y",
"in",
"b",
")"
] | Check if file a is newer than file b and decide whether or not to update
file b. Can generalize to two lists. | [
"Check",
"if",
"file",
"a",
"is",
"newer",
"than",
"file",
"b",
"and",
"decide",
"whether",
"or",
"not",
"to",
"update",
"file",
"b",
".",
"Can",
"generalize",
"to",
"two",
"lists",
"."
] | python | train |
saltstack/salt | salt/states/libcloud_storage.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/libcloud_storage.py#L175-L195 | def file_present(container, name, path, profile, overwrite_existing=False):
'''
Ensures a object is downloaded locally.
:param container: Container name
:type container: ``str``
:param name: Object name in cloud
:type name: ``str``
:param path: Local path to file
:type path: ``str``
:param profile: The profile key
:type profile: ``str``
:param overwrite_existing: Replace if already exists
:type overwrite_existing: ``bool``
'''
result = __salt__['libcloud_storage.download_object'](path, container, name, profile, overwrite_existing)
return state_result(result, "Downloaded object", name, {}) | [
"def",
"file_present",
"(",
"container",
",",
"name",
",",
"path",
",",
"profile",
",",
"overwrite_existing",
"=",
"False",
")",
":",
"result",
"=",
"__salt__",
"[",
"'libcloud_storage.download_object'",
"]",
"(",
"path",
",",
"container",
",",
"name",
",",
"profile",
",",
"overwrite_existing",
")",
"return",
"state_result",
"(",
"result",
",",
"\"Downloaded object\"",
",",
"name",
",",
"{",
"}",
")"
] | Ensures a object is downloaded locally.
:param container: Container name
:type container: ``str``
:param name: Object name in cloud
:type name: ``str``
:param path: Local path to file
:type path: ``str``
:param profile: The profile key
:type profile: ``str``
:param overwrite_existing: Replace if already exists
:type overwrite_existing: ``bool`` | [
"Ensures",
"a",
"object",
"is",
"downloaded",
"locally",
"."
] | python | train |
base4sistemas/satcfe | satcfe/base.py | https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L326-L347 | def cancelar_ultima_venda(self, chave_cfe, dados_cancelamento):
"""Função ``CancelarUltimaVenda`` conforme ER SAT, item 6.1.4. Envia o
CF-e de cancelamento para o equipamento SAT, que o enviará para
autorização e cancelamento do CF-e pela SEFAZ.
:param chave_cfe: String contendo a chave do CF-e a ser cancelado,
prefixada com o literal ``CFe``.
:param dados_cancelamento: Uma instância
de :class:`~satcfe.entidades.CFeCancelamento` ou uma string
contendo o XML do CF-e de cancelamento.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
cfe_canc = dados_cancelamento \
if isinstance(dados_cancelamento, basestring) \
else dados_cancelamento.documento()
return self.invocar__CancelarUltimaVenda(
self.gerar_numero_sessao(), self._codigo_ativacao,
chave_cfe, cfe_canc) | [
"def",
"cancelar_ultima_venda",
"(",
"self",
",",
"chave_cfe",
",",
"dados_cancelamento",
")",
":",
"cfe_canc",
"=",
"dados_cancelamento",
"if",
"isinstance",
"(",
"dados_cancelamento",
",",
"basestring",
")",
"else",
"dados_cancelamento",
".",
"documento",
"(",
")",
"return",
"self",
".",
"invocar__CancelarUltimaVenda",
"(",
"self",
".",
"gerar_numero_sessao",
"(",
")",
",",
"self",
".",
"_codigo_ativacao",
",",
"chave_cfe",
",",
"cfe_canc",
")"
] | Função ``CancelarUltimaVenda`` conforme ER SAT, item 6.1.4. Envia o
CF-e de cancelamento para o equipamento SAT, que o enviará para
autorização e cancelamento do CF-e pela SEFAZ.
:param chave_cfe: String contendo a chave do CF-e a ser cancelado,
prefixada com o literal ``CFe``.
:param dados_cancelamento: Uma instância
de :class:`~satcfe.entidades.CFeCancelamento` ou uma string
contendo o XML do CF-e de cancelamento.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string | [
"Função",
"CancelarUltimaVenda",
"conforme",
"ER",
"SAT",
"item",
"6",
".",
"1",
".",
"4",
".",
"Envia",
"o",
"CF",
"-",
"e",
"de",
"cancelamento",
"para",
"o",
"equipamento",
"SAT",
"que",
"o",
"enviará",
"para",
"autorização",
"e",
"cancelamento",
"do",
"CF",
"-",
"e",
"pela",
"SEFAZ",
"."
] | python | train |
ejeschke/ginga | ginga/rv/Control.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/Control.py#L2609-L2614 | def motion_cb(self, viewer, button, data_x, data_y):
"""Motion event in the channel viewer window. Show the pointing
information under the cursor.
"""
self.showxy(viewer, data_x, data_y)
return True | [
"def",
"motion_cb",
"(",
"self",
",",
"viewer",
",",
"button",
",",
"data_x",
",",
"data_y",
")",
":",
"self",
".",
"showxy",
"(",
"viewer",
",",
"data_x",
",",
"data_y",
")",
"return",
"True"
] | Motion event in the channel viewer window. Show the pointing
information under the cursor. | [
"Motion",
"event",
"in",
"the",
"channel",
"viewer",
"window",
".",
"Show",
"the",
"pointing",
"information",
"under",
"the",
"cursor",
"."
] | python | train |
boriel/zxbasic | arch/zx48k/backend/__array.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__array.py#L189-L243 | def _astore16(ins):
''' Stores 2º operand content into address of 1st operand.
store16 a, x => *(&a) = x
Use '*' for indirect store on 1st operand.
'''
output = _addr(ins.quad[1])
op = ins.quad[2]
indirect = op[0] == '*'
if indirect:
op = op[1:]
immediate = op[0] == '#'
if immediate:
op = op[1:]
if is_int(op):
op = str(int(op) & 0xFFFF) # Truncate to 16bit pointer
if indirect:
if immediate:
output.append('ld de, (%s)' % op)
else:
output.append('ld de, (%s)' % op)
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
else:
H = int(op) >> 8
L = int(op) & 0xFF
output.append('ld (hl), %i' % L)
output.append('inc hl')
output.append('ld (hl), %i' % H)
return output
elif op[0] == '_':
if indirect:
if immediate:
output.append('ld de, (%s)' % op) # redundant: *#_id == _id
else:
output.append('ld de, (%s)' % op) # *_id
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
else:
if immediate:
output.append('ld de, %s' % op)
else:
output.append('ld de, (%s)' % op)
else:
output.append('pop de')
output.append('ld (hl), e')
output.append('inc hl')
output.append('ld (hl), d')
return output | [
"def",
"_astore16",
"(",
"ins",
")",
":",
"output",
"=",
"_addr",
"(",
"ins",
".",
"quad",
"[",
"1",
"]",
")",
"op",
"=",
"ins",
".",
"quad",
"[",
"2",
"]",
"indirect",
"=",
"op",
"[",
"0",
"]",
"==",
"'*'",
"if",
"indirect",
":",
"op",
"=",
"op",
"[",
"1",
":",
"]",
"immediate",
"=",
"op",
"[",
"0",
"]",
"==",
"'#'",
"if",
"immediate",
":",
"op",
"=",
"op",
"[",
"1",
":",
"]",
"if",
"is_int",
"(",
"op",
")",
":",
"op",
"=",
"str",
"(",
"int",
"(",
"op",
")",
"&",
"0xFFFF",
")",
"# Truncate to 16bit pointer",
"if",
"indirect",
":",
"if",
"immediate",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"op",
")",
"else",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"op",
")",
"output",
".",
"append",
"(",
"'call __LOAD_DE_DE'",
")",
"REQUIRES",
".",
"add",
"(",
"'lddede.asm'",
")",
"else",
":",
"H",
"=",
"int",
"(",
"op",
")",
">>",
"8",
"L",
"=",
"int",
"(",
"op",
")",
"&",
"0xFF",
"output",
".",
"append",
"(",
"'ld (hl), %i'",
"%",
"L",
")",
"output",
".",
"append",
"(",
"'inc hl'",
")",
"output",
".",
"append",
"(",
"'ld (hl), %i'",
"%",
"H",
")",
"return",
"output",
"elif",
"op",
"[",
"0",
"]",
"==",
"'_'",
":",
"if",
"indirect",
":",
"if",
"immediate",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"op",
")",
"# redundant: *#_id == _id",
"else",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"op",
")",
"# *_id",
"output",
".",
"append",
"(",
"'call __LOAD_DE_DE'",
")",
"REQUIRES",
".",
"add",
"(",
"'lddede.asm'",
")",
"else",
":",
"if",
"immediate",
":",
"output",
".",
"append",
"(",
"'ld de, %s'",
"%",
"op",
")",
"else",
":",
"output",
".",
"append",
"(",
"'ld de, (%s)'",
"%",
"op",
")",
"else",
":",
"output",
".",
"append",
"(",
"'pop de'",
")",
"output",
".",
"append",
"(",
"'ld (hl), e'",
")",
"output",
".",
"append",
"(",
"'inc hl'",
")",
"output",
".",
"append",
"(",
"'ld (hl), d'",
")",
"return",
"output"
] | Stores 2º operand content into address of 1st operand.
store16 a, x => *(&a) = x
Use '*' for indirect store on 1st operand. | [
"Stores",
"2º",
"operand",
"content",
"into",
"address",
"of",
"1st",
"operand",
".",
"store16",
"a",
"x",
"=",
">",
"*",
"(",
"&a",
")",
"=",
"x",
"Use",
"*",
"for",
"indirect",
"store",
"on",
"1st",
"operand",
"."
] | python | train |
awslabs/serverless-application-model | examples/apps/lex-make-appointment-python/lambda_function.py | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/examples/apps/lex-make-appointment-python/lambda_function.py#L192-L207 | def get_availabilities_for_duration(duration, availabilities):
"""
Helper function to return the windows of availability of the given duration, when provided a set of 30 minute windows.
"""
duration_availabilities = []
start_time = '10:00'
while start_time != '17:00':
if start_time in availabilities:
if duration == 30:
duration_availabilities.append(start_time)
elif increment_time_by_thirty_mins(start_time) in availabilities:
duration_availabilities.append(start_time)
start_time = increment_time_by_thirty_mins(start_time)
return duration_availabilities | [
"def",
"get_availabilities_for_duration",
"(",
"duration",
",",
"availabilities",
")",
":",
"duration_availabilities",
"=",
"[",
"]",
"start_time",
"=",
"'10:00'",
"while",
"start_time",
"!=",
"'17:00'",
":",
"if",
"start_time",
"in",
"availabilities",
":",
"if",
"duration",
"==",
"30",
":",
"duration_availabilities",
".",
"append",
"(",
"start_time",
")",
"elif",
"increment_time_by_thirty_mins",
"(",
"start_time",
")",
"in",
"availabilities",
":",
"duration_availabilities",
".",
"append",
"(",
"start_time",
")",
"start_time",
"=",
"increment_time_by_thirty_mins",
"(",
"start_time",
")",
"return",
"duration_availabilities"
] | Helper function to return the windows of availability of the given duration, when provided a set of 30 minute windows. | [
"Helper",
"function",
"to",
"return",
"the",
"windows",
"of",
"availability",
"of",
"the",
"given",
"duration",
"when",
"provided",
"a",
"set",
"of",
"30",
"minute",
"windows",
"."
] | python | train |
spotify/luigi | luigi/contrib/hadoop.py | https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L989-L1004 | def _map_input(self, input_stream):
"""
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
"""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter() | [
"def",
"_map_input",
"(",
"self",
",",
"input_stream",
")",
":",
"for",
"record",
"in",
"self",
".",
"reader",
"(",
"input_stream",
")",
":",
"for",
"output",
"in",
"self",
".",
"mapper",
"(",
"*",
"record",
")",
":",
"yield",
"output",
"if",
"self",
".",
"final_mapper",
"!=",
"NotImplemented",
":",
"for",
"output",
"in",
"self",
".",
"final_mapper",
"(",
")",
":",
"yield",
"output",
"self",
".",
"_flush_batch_incr_counter",
"(",
")"
] | Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value. | [
"Iterate",
"over",
"input",
"and",
"call",
"the",
"mapper",
"for",
"each",
"item",
".",
"If",
"the",
"job",
"has",
"a",
"parser",
"defined",
"the",
"return",
"values",
"from",
"the",
"parser",
"will",
"be",
"passed",
"as",
"arguments",
"to",
"the",
"mapper",
"."
] | python | train |
juju/charm-helpers | charmhelpers/core/hookenv.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L676-L688 | def relation_to_role_and_interface(relation_name):
"""
Given the name of a relation, return the role and the name of the interface
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
"""
_metadata = metadata()
for role in ('provides', 'requires', 'peers'):
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
if interface:
return role, interface
return None, None | [
"def",
"relation_to_role_and_interface",
"(",
"relation_name",
")",
":",
"_metadata",
"=",
"metadata",
"(",
")",
"for",
"role",
"in",
"(",
"'provides'",
",",
"'requires'",
",",
"'peers'",
")",
":",
"interface",
"=",
"_metadata",
".",
"get",
"(",
"role",
",",
"{",
"}",
")",
".",
"get",
"(",
"relation_name",
",",
"{",
"}",
")",
".",
"get",
"(",
"'interface'",
")",
"if",
"interface",
":",
"return",
"role",
",",
"interface",
"return",
"None",
",",
"None"
] | Given the name of a relation, return the role and the name of the interface
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``. | [
"Given",
"the",
"name",
"of",
"a",
"relation",
"return",
"the",
"role",
"and",
"the",
"name",
"of",
"the",
"interface",
"that",
"relation",
"uses",
"(",
"where",
"role",
"is",
"one",
"of",
"provides",
"requires",
"or",
"peers",
")",
"."
] | python | train |
chemlab/chemlab | chemlab/libs/cirpy.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/libs/cirpy.py#L33-L39 | def resolve(input, representation, resolvers=None, **kwargs):
""" Resolve input to the specified output representation """
resultdict = query(input, representation, resolvers, **kwargs)
result = resultdict[0]['value'] if resultdict else None
if result and len(result) == 1:
result = result[0]
return result | [
"def",
"resolve",
"(",
"input",
",",
"representation",
",",
"resolvers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"resultdict",
"=",
"query",
"(",
"input",
",",
"representation",
",",
"resolvers",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"resultdict",
"[",
"0",
"]",
"[",
"'value'",
"]",
"if",
"resultdict",
"else",
"None",
"if",
"result",
"and",
"len",
"(",
"result",
")",
"==",
"1",
":",
"result",
"=",
"result",
"[",
"0",
"]",
"return",
"result"
] | Resolve input to the specified output representation | [
"Resolve",
"input",
"to",
"the",
"specified",
"output",
"representation"
] | python | train |
tchellomello/python-arlo | pyarlo/base_station.py | https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/base_station.py#L408-L418 | def get_cameras_signal_strength(self):
"""Return a list of signal strength of all cameras."""
signal_strength = {}
if not self.camera_properties:
return None
for camera in self.camera_properties:
serialnum = camera.get('serialNumber')
cam_strength = camera.get('signalStrength')
signal_strength[serialnum] = cam_strength
return signal_strength | [
"def",
"get_cameras_signal_strength",
"(",
"self",
")",
":",
"signal_strength",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"camera_properties",
":",
"return",
"None",
"for",
"camera",
"in",
"self",
".",
"camera_properties",
":",
"serialnum",
"=",
"camera",
".",
"get",
"(",
"'serialNumber'",
")",
"cam_strength",
"=",
"camera",
".",
"get",
"(",
"'signalStrength'",
")",
"signal_strength",
"[",
"serialnum",
"]",
"=",
"cam_strength",
"return",
"signal_strength"
] | Return a list of signal strength of all cameras. | [
"Return",
"a",
"list",
"of",
"signal",
"strength",
"of",
"all",
"cameras",
"."
] | python | train |
rsmuc/health_monitoring_plugins | health_monitoring_plugins/newtecmodem.py | https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/newtecmodem.py#L74-L79 | def process_alarms(self, snmp_data):
"Build list with active alarms"
self.active_alarms = []
for i in range(0, len(self.models[self.modem_type]['alarms'])):
if bool(int(snmp_data[i])) == True:
self.active_alarms.append(self.models[self.modem_type]['alarms'][i]) | [
"def",
"process_alarms",
"(",
"self",
",",
"snmp_data",
")",
":",
"self",
".",
"active_alarms",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"models",
"[",
"self",
".",
"modem_type",
"]",
"[",
"'alarms'",
"]",
")",
")",
":",
"if",
"bool",
"(",
"int",
"(",
"snmp_data",
"[",
"i",
"]",
")",
")",
"==",
"True",
":",
"self",
".",
"active_alarms",
".",
"append",
"(",
"self",
".",
"models",
"[",
"self",
".",
"modem_type",
"]",
"[",
"'alarms'",
"]",
"[",
"i",
"]",
")"
] | Build list with active alarms | [
"Build",
"list",
"with",
"active",
"alarms"
] | python | train |
jazzband/django-axes | axes/attempts.py | https://github.com/jazzband/django-axes/blob/3e215a174030e43e7ab8c2a79c395eb0eeddc667/axes/attempts.py#L76-L86 | def reset_user_attempts(request: AxesHttpRequest, credentials: dict = None) -> int:
"""
Reset all user attempts that match the given request and credentials.
"""
attempts = filter_user_attempts(request, credentials)
count, _ = attempts.delete()
log.info('AXES: Reset %s access attempts from database.', count)
return count | [
"def",
"reset_user_attempts",
"(",
"request",
":",
"AxesHttpRequest",
",",
"credentials",
":",
"dict",
"=",
"None",
")",
"->",
"int",
":",
"attempts",
"=",
"filter_user_attempts",
"(",
"request",
",",
"credentials",
")",
"count",
",",
"_",
"=",
"attempts",
".",
"delete",
"(",
")",
"log",
".",
"info",
"(",
"'AXES: Reset %s access attempts from database.'",
",",
"count",
")",
"return",
"count"
] | Reset all user attempts that match the given request and credentials. | [
"Reset",
"all",
"user",
"attempts",
"that",
"match",
"the",
"given",
"request",
"and",
"credentials",
"."
] | python | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1994-L1999 | def schemaNewMemParserCtxt(buffer, size):
"""Create an XML Schemas parse context for that memory buffer
expected to contain an XML Schemas file. """
ret = libxml2mod.xmlSchemaNewMemParserCtxt(buffer, size)
if ret is None:raise parserError('xmlSchemaNewMemParserCtxt() failed')
return SchemaParserCtxt(_obj=ret) | [
"def",
"schemaNewMemParserCtxt",
"(",
"buffer",
",",
"size",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlSchemaNewMemParserCtxt",
"(",
"buffer",
",",
"size",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlSchemaNewMemParserCtxt() failed'",
")",
"return",
"SchemaParserCtxt",
"(",
"_obj",
"=",
"ret",
")"
] | Create an XML Schemas parse context for that memory buffer
expected to contain an XML Schemas file. | [
"Create",
"an",
"XML",
"Schemas",
"parse",
"context",
"for",
"that",
"memory",
"buffer",
"expected",
"to",
"contain",
"an",
"XML",
"Schemas",
"file",
"."
] | python | train |
opencobra/cobrapy | cobra/core/reaction.py | https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/reaction.py#L107-L111 | def reverse_id(self):
"""Generate the id of reverse_variable from the reaction's id."""
return '_'.join((self.id, 'reverse',
hashlib.md5(
self.id.encode('utf-8')).hexdigest()[0:5])) | [
"def",
"reverse_id",
"(",
"self",
")",
":",
"return",
"'_'",
".",
"join",
"(",
"(",
"self",
".",
"id",
",",
"'reverse'",
",",
"hashlib",
".",
"md5",
"(",
"self",
".",
"id",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
"0",
":",
"5",
"]",
")",
")"
] | Generate the id of reverse_variable from the reaction's id. | [
"Generate",
"the",
"id",
"of",
"reverse_variable",
"from",
"the",
"reaction",
"s",
"id",
"."
] | python | valid |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py#L413-L485 | def list_instances(
self,
parent,
page_token=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists information about instances in a project.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableInstanceAdminClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.list_instances(parent)
Args:
parent (str): The unique name of the project for which a list of instances is
requested. Values are of the form ``projects/<project>``.
page_token (str): DEPRECATED: This field is unused and ignored.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_instances" not in self._inner_api_calls:
self._inner_api_calls[
"list_instances"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_instances,
default_retry=self._method_configs["ListInstances"].retry,
default_timeout=self._method_configs["ListInstances"].timeout,
client_info=self._client_info,
)
request = bigtable_instance_admin_pb2.ListInstancesRequest(
parent=parent, page_token=page_token
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["list_instances"](
request, retry=retry, timeout=timeout, metadata=metadata
) | [
"def",
"list_instances",
"(",
"self",
",",
"parent",
",",
"page_token",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"list_instances\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"list_instances\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"list_instances",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"ListInstances\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"ListInstances\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"bigtable_instance_admin_pb2",
".",
"ListInstancesRequest",
"(",
"parent",
"=",
"parent",
",",
"page_token",
"=",
"page_token",
")",
"if",
"metadata",
"is",
"None",
":",
"metadata",
"=",
"[",
"]",
"metadata",
"=",
"list",
"(",
"metadata",
")",
"try",
":",
"routing_header",
"=",
"[",
"(",
"\"parent\"",
",",
"parent",
")",
"]",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"routing_metadata",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"routing_header",
")",
"metadata",
".",
"append",
"(",
"routing_metadata",
")",
"return",
"self",
".",
"_inner_api_calls",
"[",
"\"list_instances\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
] | Lists information about instances in a project.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableInstanceAdminClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.list_instances(parent)
Args:
parent (str): The unique name of the project for which a list of instances is
requested. Values are of the form ``projects/<project>``.
page_token (str): DEPRECATED: This field is unused and ignored.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Lists",
"information",
"about",
"instances",
"in",
"a",
"project",
"."
] | python | train |
DataDog/integrations-core | haproxy/datadog_checks/haproxy/haproxy.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/haproxy/datadog_checks/haproxy/haproxy.py#L193-L217 | def _fetch_socket_data(self, parsed_url):
''' Hit a given stats socket and return the stats lines '''
self.log.debug("Fetching haproxy stats from socket: %s" % parsed_url.geturl())
if parsed_url.scheme == 'tcp':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
splitted_loc = parsed_url.netloc.split(':')
host = splitted_loc[0]
port = int(splitted_loc[1])
sock.connect((host, port))
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(parsed_url.path)
sock.send(b"show stat\r\n")
response = ""
output = sock.recv(BUFSIZE)
while output:
response += output.decode("ASCII")
output = sock.recv(BUFSIZE)
sock.close()
return response.splitlines() | [
"def",
"_fetch_socket_data",
"(",
"self",
",",
"parsed_url",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Fetching haproxy stats from socket: %s\"",
"%",
"parsed_url",
".",
"geturl",
"(",
")",
")",
"if",
"parsed_url",
".",
"scheme",
"==",
"'tcp'",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"splitted_loc",
"=",
"parsed_url",
".",
"netloc",
".",
"split",
"(",
"':'",
")",
"host",
"=",
"splitted_loc",
"[",
"0",
"]",
"port",
"=",
"int",
"(",
"splitted_loc",
"[",
"1",
"]",
")",
"sock",
".",
"connect",
"(",
"(",
"host",
",",
"port",
")",
")",
"else",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_UNIX",
",",
"socket",
".",
"SOCK_STREAM",
")",
"sock",
".",
"connect",
"(",
"parsed_url",
".",
"path",
")",
"sock",
".",
"send",
"(",
"b\"show stat\\r\\n\"",
")",
"response",
"=",
"\"\"",
"output",
"=",
"sock",
".",
"recv",
"(",
"BUFSIZE",
")",
"while",
"output",
":",
"response",
"+=",
"output",
".",
"decode",
"(",
"\"ASCII\"",
")",
"output",
"=",
"sock",
".",
"recv",
"(",
"BUFSIZE",
")",
"sock",
".",
"close",
"(",
")",
"return",
"response",
".",
"splitlines",
"(",
")"
] | Hit a given stats socket and return the stats lines | [
"Hit",
"a",
"given",
"stats",
"socket",
"and",
"return",
"the",
"stats",
"lines"
] | python | train |
benley/butcher | butcher/gitrepo.py | https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/gitrepo.py#L147-L158 | def sethead(self, ref):
"""Set head to a git ref."""
log.debug('[%s] Setting to ref %s', self.name, ref)
try:
ref = self.repo.rev_parse(ref)
except gitdb.exc.BadObject:
# Probably means we don't have it cached yet.
# So maybe we can fetch it.
ref = self.fetchref(ref)
log.debug('[%s] Setting head to %s', self.name, ref)
self.repo.head.reset(ref, working_tree=True)
log.debug('[%s] Head object: %s', self.name, self.currenthead) | [
"def",
"sethead",
"(",
"self",
",",
"ref",
")",
":",
"log",
".",
"debug",
"(",
"'[%s] Setting to ref %s'",
",",
"self",
".",
"name",
",",
"ref",
")",
"try",
":",
"ref",
"=",
"self",
".",
"repo",
".",
"rev_parse",
"(",
"ref",
")",
"except",
"gitdb",
".",
"exc",
".",
"BadObject",
":",
"# Probably means we don't have it cached yet.",
"# So maybe we can fetch it.",
"ref",
"=",
"self",
".",
"fetchref",
"(",
"ref",
")",
"log",
".",
"debug",
"(",
"'[%s] Setting head to %s'",
",",
"self",
".",
"name",
",",
"ref",
")",
"self",
".",
"repo",
".",
"head",
".",
"reset",
"(",
"ref",
",",
"working_tree",
"=",
"True",
")",
"log",
".",
"debug",
"(",
"'[%s] Head object: %s'",
",",
"self",
".",
"name",
",",
"self",
".",
"currenthead",
")"
] | Set head to a git ref. | [
"Set",
"head",
"to",
"a",
"git",
"ref",
"."
] | python | train |
hasgeek/coaster | coaster/sqlalchemy/statemanager.py | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/sqlalchemy/statemanager.py#L870-L878 | def transitions_for(self, roles=None, actor=None, anchors=[]):
"""
For use on :class:`~coaster.sqlalchemy.mixins.RoleMixin` classes:
returns currently available transitions for the specified
roles or actor as a dictionary of name: :class:`StateTransitionWrapper`.
"""
proxy = self.obj.access_for(roles, actor, anchors)
return {name: transition for name, transition in self.transitions(current=False).items()
if name in proxy} | [
"def",
"transitions_for",
"(",
"self",
",",
"roles",
"=",
"None",
",",
"actor",
"=",
"None",
",",
"anchors",
"=",
"[",
"]",
")",
":",
"proxy",
"=",
"self",
".",
"obj",
".",
"access_for",
"(",
"roles",
",",
"actor",
",",
"anchors",
")",
"return",
"{",
"name",
":",
"transition",
"for",
"name",
",",
"transition",
"in",
"self",
".",
"transitions",
"(",
"current",
"=",
"False",
")",
".",
"items",
"(",
")",
"if",
"name",
"in",
"proxy",
"}"
] | For use on :class:`~coaster.sqlalchemy.mixins.RoleMixin` classes:
returns currently available transitions for the specified
roles or actor as a dictionary of name: :class:`StateTransitionWrapper`. | [
"For",
"use",
"on",
":",
"class",
":",
"~coaster",
".",
"sqlalchemy",
".",
"mixins",
".",
"RoleMixin",
"classes",
":",
"returns",
"currently",
"available",
"transitions",
"for",
"the",
"specified",
"roles",
"or",
"actor",
"as",
"a",
"dictionary",
"of",
"name",
":",
":",
"class",
":",
"StateTransitionWrapper",
"."
] | python | train |
cisco-sas/kitty | kitty/model/low_level/container.py | https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/container.py#L709-L717 | def get_rendered_fields(self, ctx=None):
'''
:param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered
'''
times = self._min_times
if self._mutating() and self._in_repeat_stage():
times += (self._current_index) * self._step
return super(Repeat, self).get_rendered_fields(ctx) * times | [
"def",
"get_rendered_fields",
"(",
"self",
",",
"ctx",
"=",
"None",
")",
":",
"times",
"=",
"self",
".",
"_min_times",
"if",
"self",
".",
"_mutating",
"(",
")",
"and",
"self",
".",
"_in_repeat_stage",
"(",
")",
":",
"times",
"+=",
"(",
"self",
".",
"_current_index",
")",
"*",
"self",
".",
"_step",
"return",
"super",
"(",
"Repeat",
",",
"self",
")",
".",
"get_rendered_fields",
"(",
"ctx",
")",
"*",
"times"
] | :param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered | [
":",
"param",
"ctx",
":",
"rendering",
"context",
"in",
"which",
"the",
"method",
"was",
"called",
":",
"return",
":",
"ordered",
"list",
"of",
"the",
"fields",
"that",
"will",
"be",
"rendered"
] | python | train |
mozilla/treeherder | treeherder/client/thclient/perfherder.py | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/perfherder.py#L16-L26 | def all_valid_time_intervals():
'''
Helper method to return all possible valid time intervals for data
stored by Perfherder
'''
return [PerformanceTimeInterval.DAY,
PerformanceTimeInterval.WEEK,
PerformanceTimeInterval.TWO_WEEKS,
PerformanceTimeInterval.SIXTY_DAYS,
PerformanceTimeInterval.NINETY_DAYS,
PerformanceTimeInterval.ONE_YEAR] | [
"def",
"all_valid_time_intervals",
"(",
")",
":",
"return",
"[",
"PerformanceTimeInterval",
".",
"DAY",
",",
"PerformanceTimeInterval",
".",
"WEEK",
",",
"PerformanceTimeInterval",
".",
"TWO_WEEKS",
",",
"PerformanceTimeInterval",
".",
"SIXTY_DAYS",
",",
"PerformanceTimeInterval",
".",
"NINETY_DAYS",
",",
"PerformanceTimeInterval",
".",
"ONE_YEAR",
"]"
] | Helper method to return all possible valid time intervals for data
stored by Perfherder | [
"Helper",
"method",
"to",
"return",
"all",
"possible",
"valid",
"time",
"intervals",
"for",
"data",
"stored",
"by",
"Perfherder"
] | python | train |
googleapis/google-cloud-python | datastore/google/cloud/datastore/batch.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/batch.py#L159-L200 | def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to put()")
if entity.key is None:
raise ValueError("Entity must have a key")
if self.project != entity.key.project:
raise ValueError("Key must be from same project as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity) | [
"def",
"put",
"(",
"self",
",",
"entity",
")",
":",
"if",
"self",
".",
"_status",
"!=",
"self",
".",
"_IN_PROGRESS",
":",
"raise",
"ValueError",
"(",
"\"Batch must be in progress to put()\"",
")",
"if",
"entity",
".",
"key",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Entity must have a key\"",
")",
"if",
"self",
".",
"project",
"!=",
"entity",
".",
"key",
".",
"project",
":",
"raise",
"ValueError",
"(",
"\"Key must be from same project as batch\"",
")",
"if",
"entity",
".",
"key",
".",
"is_partial",
":",
"entity_pb",
"=",
"self",
".",
"_add_partial_key_entity_pb",
"(",
")",
"self",
".",
"_partial_key_entities",
".",
"append",
"(",
"entity",
")",
"else",
":",
"entity_pb",
"=",
"self",
".",
"_add_complete_key_entity_pb",
"(",
")",
"_assign_entity_to_pb",
"(",
"entity_pb",
",",
"entity",
")"
] | Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours. | [
"Remember",
"an",
"entity",
"s",
"state",
"to",
"be",
"saved",
"during",
":",
"meth",
":",
"commit",
"."
] | python | train |
XuShaohua/bcloud | bcloud/IconWindow.py | https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/IconWindow.py#L758-L782 | def on_drag_data_received(self, widget, context, x, y, data, info, time):
'''拖放结束'''
if not data:
return
bx, by = self.iconview.convert_widget_to_bin_window_coords(x, y)
selected = Gtk.TreeView.get_path_at_pos(self.iconview, bx, by)
if not selected:
return
tree_path = selected[0]
if tree_path is None:
return
target_path = self.liststore[tree_path][PATH_COL]
is_dir = self.liststore[tree_path][ISDIR_COL]
if not is_dir or info != TargetInfo.PLAIN_TEXT:
return
filelist_str = data.get_text()
filelist = json.loads(filelist_str)
for file_item in filelist:
if file_item['path'] == target_path:
self.app.toast(_('Error: Move folder to itself!'))
return
for file_item in filelist:
file_item['dest'] = target_path
gutil.async_call(pcs.move, self.app.cookie, self.app.tokens, filelist,
callback=self.parent.reload) | [
"def",
"on_drag_data_received",
"(",
"self",
",",
"widget",
",",
"context",
",",
"x",
",",
"y",
",",
"data",
",",
"info",
",",
"time",
")",
":",
"if",
"not",
"data",
":",
"return",
"bx",
",",
"by",
"=",
"self",
".",
"iconview",
".",
"convert_widget_to_bin_window_coords",
"(",
"x",
",",
"y",
")",
"selected",
"=",
"Gtk",
".",
"TreeView",
".",
"get_path_at_pos",
"(",
"self",
".",
"iconview",
",",
"bx",
",",
"by",
")",
"if",
"not",
"selected",
":",
"return",
"tree_path",
"=",
"selected",
"[",
"0",
"]",
"if",
"tree_path",
"is",
"None",
":",
"return",
"target_path",
"=",
"self",
".",
"liststore",
"[",
"tree_path",
"]",
"[",
"PATH_COL",
"]",
"is_dir",
"=",
"self",
".",
"liststore",
"[",
"tree_path",
"]",
"[",
"ISDIR_COL",
"]",
"if",
"not",
"is_dir",
"or",
"info",
"!=",
"TargetInfo",
".",
"PLAIN_TEXT",
":",
"return",
"filelist_str",
"=",
"data",
".",
"get_text",
"(",
")",
"filelist",
"=",
"json",
".",
"loads",
"(",
"filelist_str",
")",
"for",
"file_item",
"in",
"filelist",
":",
"if",
"file_item",
"[",
"'path'",
"]",
"==",
"target_path",
":",
"self",
".",
"app",
".",
"toast",
"(",
"_",
"(",
"'Error: Move folder to itself!'",
")",
")",
"return",
"for",
"file_item",
"in",
"filelist",
":",
"file_item",
"[",
"'dest'",
"]",
"=",
"target_path",
"gutil",
".",
"async_call",
"(",
"pcs",
".",
"move",
",",
"self",
".",
"app",
".",
"cookie",
",",
"self",
".",
"app",
".",
"tokens",
",",
"filelist",
",",
"callback",
"=",
"self",
".",
"parent",
".",
"reload",
")"
] | 拖放结束 | [
"拖放结束"
] | python | train |
obulpathi/cdn-fastly-python | fastly/__init__.py | https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L436-L439 | def check_domain(self, service_id, version_number, name):
"""Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly."""
content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, name))
return FastlyDomainCheck(self, content) | [
"def",
"check_domain",
"(",
"self",
",",
"service_id",
",",
"version_number",
",",
"name",
")",
":",
"content",
"=",
"self",
".",
"_fetch",
"(",
"\"/service/%s/version/%d/domain/%s/check\"",
"%",
"(",
"service_id",
",",
"version_number",
",",
"name",
")",
")",
"return",
"FastlyDomainCheck",
"(",
"self",
",",
"content",
")"
] | Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly. | [
"Checks",
"the",
"status",
"of",
"a",
"domain",
"s",
"DNS",
"record",
".",
"Returns",
"an",
"array",
"of",
"3",
"items",
".",
"The",
"first",
"is",
"the",
"details",
"for",
"the",
"domain",
".",
"The",
"second",
"is",
"the",
"current",
"CNAME",
"of",
"the",
"domain",
".",
"The",
"third",
"is",
"a",
"boolean",
"indicating",
"whether",
"or",
"not",
"it",
"has",
"been",
"properly",
"setup",
"to",
"use",
"Fastly",
"."
] | python | train |
timster/peewee-validates | peewee_validates.py | https://github.com/timster/peewee-validates/blob/417f0fafb87fe9209439d65bc279d86a3d9e8028/peewee_validates.py#L886-L935 | def convert_field(self, name, field):
"""
Convert a single field from a Peewee model field to a validator field.
:param name: Name of the field as defined on this validator.
:param name: Peewee field instance.
:return: Validator field.
"""
if PEEWEE3:
field_type = field.field_type.lower()
else:
field_type = field.db_field
pwv_field = ModelValidator.FIELD_MAP.get(field_type, StringField)
print('pwv_field', field_type, pwv_field)
validators = []
required = not bool(getattr(field, 'null', True))
choices = getattr(field, 'choices', ())
default = getattr(field, 'default', None)
max_length = getattr(field, 'max_length', None)
unique = getattr(field, 'unique', False)
if required:
validators.append(validate_required())
if choices:
print('CHOICES', choices)
validators.append(validate_one_of([c[0] for c in choices]))
if max_length:
validators.append(validate_length(high=max_length))
if unique:
validators.append(validate_model_unique(field, self.instance.select(), self.pk_field, self.pk_value))
if isinstance(field, peewee.ForeignKeyField):
if PEEWEE3:
rel_field = field.rel_field
else:
rel_field = field.to_field
return ModelChoiceField(field.rel_model, rel_field, default=default, validators=validators)
if isinstance(field, ManyToManyField):
return ManyModelChoiceField(
field.rel_model, field.rel_model._meta.primary_key,
default=default, validators=validators)
return pwv_field(default=default, validators=validators) | [
"def",
"convert_field",
"(",
"self",
",",
"name",
",",
"field",
")",
":",
"if",
"PEEWEE3",
":",
"field_type",
"=",
"field",
".",
"field_type",
".",
"lower",
"(",
")",
"else",
":",
"field_type",
"=",
"field",
".",
"db_field",
"pwv_field",
"=",
"ModelValidator",
".",
"FIELD_MAP",
".",
"get",
"(",
"field_type",
",",
"StringField",
")",
"print",
"(",
"'pwv_field'",
",",
"field_type",
",",
"pwv_field",
")",
"validators",
"=",
"[",
"]",
"required",
"=",
"not",
"bool",
"(",
"getattr",
"(",
"field",
",",
"'null'",
",",
"True",
")",
")",
"choices",
"=",
"getattr",
"(",
"field",
",",
"'choices'",
",",
"(",
")",
")",
"default",
"=",
"getattr",
"(",
"field",
",",
"'default'",
",",
"None",
")",
"max_length",
"=",
"getattr",
"(",
"field",
",",
"'max_length'",
",",
"None",
")",
"unique",
"=",
"getattr",
"(",
"field",
",",
"'unique'",
",",
"False",
")",
"if",
"required",
":",
"validators",
".",
"append",
"(",
"validate_required",
"(",
")",
")",
"if",
"choices",
":",
"print",
"(",
"'CHOICES'",
",",
"choices",
")",
"validators",
".",
"append",
"(",
"validate_one_of",
"(",
"[",
"c",
"[",
"0",
"]",
"for",
"c",
"in",
"choices",
"]",
")",
")",
"if",
"max_length",
":",
"validators",
".",
"append",
"(",
"validate_length",
"(",
"high",
"=",
"max_length",
")",
")",
"if",
"unique",
":",
"validators",
".",
"append",
"(",
"validate_model_unique",
"(",
"field",
",",
"self",
".",
"instance",
".",
"select",
"(",
")",
",",
"self",
".",
"pk_field",
",",
"self",
".",
"pk_value",
")",
")",
"if",
"isinstance",
"(",
"field",
",",
"peewee",
".",
"ForeignKeyField",
")",
":",
"if",
"PEEWEE3",
":",
"rel_field",
"=",
"field",
".",
"rel_field",
"else",
":",
"rel_field",
"=",
"field",
".",
"to_field",
"return",
"ModelChoiceField",
"(",
"field",
".",
"rel_model",
",",
"rel_field",
",",
"default",
"=",
"default",
",",
"validators",
"=",
"validators",
")",
"if",
"isinstance",
"(",
"field",
",",
"ManyToManyField",
")",
":",
"return",
"ManyModelChoiceField",
"(",
"field",
".",
"rel_model",
",",
"field",
".",
"rel_model",
".",
"_meta",
".",
"primary_key",
",",
"default",
"=",
"default",
",",
"validators",
"=",
"validators",
")",
"return",
"pwv_field",
"(",
"default",
"=",
"default",
",",
"validators",
"=",
"validators",
")"
] | Convert a single field from a Peewee model field to a validator field.
:param name: Name of the field as defined on this validator.
:param name: Peewee field instance.
:return: Validator field. | [
"Convert",
"a",
"single",
"field",
"from",
"a",
"Peewee",
"model",
"field",
"to",
"a",
"validator",
"field",
"."
] | python | train |
saltstack/salt | salt/modules/opkg.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opkg.py#L1329-L1346 | def get_repo(repo, **kwargs): # pylint: disable=unused-argument
'''
Display a repo from the ``/etc/opkg/*.conf``
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo repo
'''
repos = list_repos()
if repos:
for source in six.itervalues(repos):
for sub in source:
if sub['name'] == repo:
return sub
return {} | [
"def",
"get_repo",
"(",
"repo",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"repos",
"=",
"list_repos",
"(",
")",
"if",
"repos",
":",
"for",
"source",
"in",
"six",
".",
"itervalues",
"(",
"repos",
")",
":",
"for",
"sub",
"in",
"source",
":",
"if",
"sub",
"[",
"'name'",
"]",
"==",
"repo",
":",
"return",
"sub",
"return",
"{",
"}"
] | Display a repo from the ``/etc/opkg/*.conf``
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo repo | [
"Display",
"a",
"repo",
"from",
"the",
"/",
"etc",
"/",
"opkg",
"/",
"*",
".",
"conf"
] | python | train |
apache/airflow | airflow/contrib/hooks/sagemaker_hook.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L519-L572 | def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
"""
Return the training job info associated with job_name and print CloudWatch logs
"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call | [
"def",
"describe_training_job_with_log",
"(",
"self",
",",
"job_name",
",",
"positions",
",",
"stream_names",
",",
"instance_count",
",",
"state",
",",
"last_description",
",",
"last_describe_job_call",
")",
":",
"log_group",
"=",
"'/aws/sagemaker/TrainingJobs'",
"if",
"len",
"(",
"stream_names",
")",
"<",
"instance_count",
":",
"# Log streams are created whenever a container starts writing to stdout/err, so this list",
"# may be dynamic until we have a stream for every instance.",
"logs_conn",
"=",
"self",
".",
"get_log_conn",
"(",
")",
"try",
":",
"streams",
"=",
"logs_conn",
".",
"describe_log_streams",
"(",
"logGroupName",
"=",
"log_group",
",",
"logStreamNamePrefix",
"=",
"job_name",
"+",
"'/'",
",",
"orderBy",
"=",
"'LogStreamName'",
",",
"limit",
"=",
"instance_count",
")",
"stream_names",
"=",
"[",
"s",
"[",
"'logStreamName'",
"]",
"for",
"s",
"in",
"streams",
"[",
"'logStreams'",
"]",
"]",
"positions",
".",
"update",
"(",
"[",
"(",
"s",
",",
"Position",
"(",
"timestamp",
"=",
"0",
",",
"skip",
"=",
"0",
")",
")",
"for",
"s",
"in",
"stream_names",
"if",
"s",
"not",
"in",
"positions",
"]",
")",
"except",
"logs_conn",
".",
"exceptions",
".",
"ResourceNotFoundException",
":",
"# On the very first training job run on an account, there's no log group until",
"# the container starts logging, so ignore any errors thrown about that",
"pass",
"if",
"len",
"(",
"stream_names",
")",
">",
"0",
":",
"for",
"idx",
",",
"event",
"in",
"self",
".",
"multi_stream_iter",
"(",
"log_group",
",",
"stream_names",
",",
"positions",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"event",
"[",
"'message'",
"]",
")",
"ts",
",",
"count",
"=",
"positions",
"[",
"stream_names",
"[",
"idx",
"]",
"]",
"if",
"event",
"[",
"'timestamp'",
"]",
"==",
"ts",
":",
"positions",
"[",
"stream_names",
"[",
"idx",
"]",
"]",
"=",
"Position",
"(",
"timestamp",
"=",
"ts",
",",
"skip",
"=",
"count",
"+",
"1",
")",
"else",
":",
"positions",
"[",
"stream_names",
"[",
"idx",
"]",
"]",
"=",
"Position",
"(",
"timestamp",
"=",
"event",
"[",
"'timestamp'",
"]",
",",
"skip",
"=",
"1",
")",
"if",
"state",
"==",
"LogState",
".",
"COMPLETE",
":",
"return",
"state",
",",
"last_description",
",",
"last_describe_job_call",
"if",
"state",
"==",
"LogState",
".",
"JOB_COMPLETE",
":",
"state",
"=",
"LogState",
".",
"COMPLETE",
"elif",
"time",
".",
"time",
"(",
")",
"-",
"last_describe_job_call",
">=",
"30",
":",
"description",
"=",
"self",
".",
"describe_training_job",
"(",
"job_name",
")",
"last_describe_job_call",
"=",
"time",
".",
"time",
"(",
")",
"if",
"secondary_training_status_changed",
"(",
"description",
",",
"last_description",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"secondary_training_status_message",
"(",
"description",
",",
"last_description",
")",
")",
"last_description",
"=",
"description",
"status",
"=",
"description",
"[",
"'TrainingJobStatus'",
"]",
"if",
"status",
"not",
"in",
"self",
".",
"non_terminal_states",
":",
"state",
"=",
"LogState",
".",
"JOB_COMPLETE",
"return",
"state",
",",
"last_description",
",",
"last_describe_job_call"
] | Return the training job info associated with job_name and print CloudWatch logs | [
"Return",
"the",
"training",
"job",
"info",
"associated",
"with",
"job_name",
"and",
"print",
"CloudWatch",
"logs"
] | python | test |
thunlp/THULAC-Python | thulac/__init__.py | https://github.com/thunlp/THULAC-Python/blob/3f1f126cd92c3d2aebdf4ab4850de3c9428a3b66/thulac/__init__.py#L208-L227 | def __cutRaw(self, oiraw, maxLength):
'''现将句子按句子完结符号切分,如果切分完后一个句子长度超过限定值
,再对该句子进行切分'''
vec = []
m = re.findall(u".*?[。?!;;!?]", oiraw)
num, l, last = 0, 0, 0
for i in range(len(m)):
if(num + len(m[i]) >= maxLength):
vec.append("".join(m[last:i]))
last = i
num = len(m[i])
else:
num += len(m[i])
l += len(m[i])
if(len(oiraw)-l + num >= maxLength):
vec.append("".join(m[last:len(m)]))
vec.append(oiraw[l:])
else:
vec.append(oiraw[l-num:])
return vec | [
"def",
"__cutRaw",
"(",
"self",
",",
"oiraw",
",",
"maxLength",
")",
":",
"vec",
"=",
"[",
"]",
"m",
"=",
"re",
".",
"findall",
"(",
"u\".*?[。?!;;!?]\", oiraw)",
"",
"",
"",
"num",
",",
"l",
",",
"last",
"=",
"0",
",",
"0",
",",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"m",
")",
")",
":",
"if",
"(",
"num",
"+",
"len",
"(",
"m",
"[",
"i",
"]",
")",
">=",
"maxLength",
")",
":",
"vec",
".",
"append",
"(",
"\"\"",
".",
"join",
"(",
"m",
"[",
"last",
":",
"i",
"]",
")",
")",
"last",
"=",
"i",
"num",
"=",
"len",
"(",
"m",
"[",
"i",
"]",
")",
"else",
":",
"num",
"+=",
"len",
"(",
"m",
"[",
"i",
"]",
")",
"l",
"+=",
"len",
"(",
"m",
"[",
"i",
"]",
")",
"if",
"(",
"len",
"(",
"oiraw",
")",
"-",
"l",
"+",
"num",
">=",
"maxLength",
")",
":",
"vec",
".",
"append",
"(",
"\"\"",
".",
"join",
"(",
"m",
"[",
"last",
":",
"len",
"(",
"m",
")",
"]",
")",
")",
"vec",
".",
"append",
"(",
"oiraw",
"[",
"l",
":",
"]",
")",
"else",
":",
"vec",
".",
"append",
"(",
"oiraw",
"[",
"l",
"-",
"num",
":",
"]",
")",
"return",
"vec"
] | 现将句子按句子完结符号切分,如果切分完后一个句子长度超过限定值
,再对该句子进行切分 | [
"现将句子按句子完结符号切分,如果切分完后一个句子长度超过限定值",
",再对该句子进行切分"
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.