repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
Yubico/python-pyhsm
|
pyhsm/base.py
|
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/base.py#L505-L522
|
def aes_ecb_encrypt(self, key_handle, plaintext):
"""
AES ECB encrypt using a key handle.
@warning: Please be aware of the known limitations of AES ECB mode before using it!
@param key_handle: Key handle to use for AES ECB encryption
@param plaintext: Data to encrypt
@type key_handle: integer or string
@type plaintext: string
@returns: Ciphertext
@rtype: string
@see: L{pyhsm.aes_ecb_cmd.YHSM_Cmd_AES_ECB_Encrypt}
"""
return pyhsm.aes_ecb_cmd.YHSM_Cmd_AES_ECB_Encrypt( \
self.stick, key_handle, plaintext).execute()
|
[
"def",
"aes_ecb_encrypt",
"(",
"self",
",",
"key_handle",
",",
"plaintext",
")",
":",
"return",
"pyhsm",
".",
"aes_ecb_cmd",
".",
"YHSM_Cmd_AES_ECB_Encrypt",
"(",
"self",
".",
"stick",
",",
"key_handle",
",",
"plaintext",
")",
".",
"execute",
"(",
")"
] |
AES ECB encrypt using a key handle.
@warning: Please be aware of the known limitations of AES ECB mode before using it!
@param key_handle: Key handle to use for AES ECB encryption
@param plaintext: Data to encrypt
@type key_handle: integer or string
@type plaintext: string
@returns: Ciphertext
@rtype: string
@see: L{pyhsm.aes_ecb_cmd.YHSM_Cmd_AES_ECB_Encrypt}
|
[
"AES",
"ECB",
"encrypt",
"using",
"a",
"key",
"handle",
"."
] |
python
|
train
|
bwohlberg/sporco
|
sporco/admm/ccmod.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmod.py#L806-L819
|
def xistep(self, i):
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
component :math:`\mathbf{x}_i`.
"""
self.YU[:] = self.Y - self.U[..., i]
b = np.take(self.ZSf, [i], axis=self.cri.axisK) + \
self.rho*sl.rfftn(self.YU, None, self.cri.axisN)
self.Xf[..., i] = sl.solvedbi_sm(np.take(
self.Zf, [i], axis=self.cri.axisK),
self.rho, b, axis=self.cri.axisM)
self.X[..., i] = sl.irfftn(self.Xf[..., i], self.cri.Nv,
self.cri.axisN)
|
[
"def",
"xistep",
"(",
"self",
",",
"i",
")",
":",
"self",
".",
"YU",
"[",
":",
"]",
"=",
"self",
".",
"Y",
"-",
"self",
".",
"U",
"[",
"...",
",",
"i",
"]",
"b",
"=",
"np",
".",
"take",
"(",
"self",
".",
"ZSf",
",",
"[",
"i",
"]",
",",
"axis",
"=",
"self",
".",
"cri",
".",
"axisK",
")",
"+",
"self",
".",
"rho",
"*",
"sl",
".",
"rfftn",
"(",
"self",
".",
"YU",
",",
"None",
",",
"self",
".",
"cri",
".",
"axisN",
")",
"self",
".",
"Xf",
"[",
"...",
",",
"i",
"]",
"=",
"sl",
".",
"solvedbi_sm",
"(",
"np",
".",
"take",
"(",
"self",
".",
"Zf",
",",
"[",
"i",
"]",
",",
"axis",
"=",
"self",
".",
"cri",
".",
"axisK",
")",
",",
"self",
".",
"rho",
",",
"b",
",",
"axis",
"=",
"self",
".",
"cri",
".",
"axisM",
")",
"self",
".",
"X",
"[",
"...",
",",
"i",
"]",
"=",
"sl",
".",
"irfftn",
"(",
"self",
".",
"Xf",
"[",
"...",
",",
"i",
"]",
",",
"self",
".",
"cri",
".",
"Nv",
",",
"self",
".",
"cri",
".",
"axisN",
")"
] |
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`
component :math:`\mathbf{x}_i`.
|
[
"r",
"Minimise",
"Augmented",
"Lagrangian",
"with",
"respect",
"to",
":",
"math",
":",
"\\",
"mathbf",
"{",
"x",
"}",
"component",
":",
"math",
":",
"\\",
"mathbf",
"{",
"x",
"}",
"_i",
"."
] |
python
|
train
|
celliern/triflow
|
triflow/core/fields.py
|
https://github.com/celliern/triflow/blob/9522de077c43c8af7d4bf08fbd4ce4b7b480dccb/triflow/core/fields.py#L80-L105
|
def factory1D(dependent_variables,
helper_functions):
"""Fields factory generating specialized container build around a
triflow Model and xarray.
Wrapper for 1D data.
Parameters
----------
dependent_variables : iterable for str
name of the dependent variables
helper_functions : iterable of str
name of the helpers functions
Returns
-------
triflow.BaseFields
Specialized container which expose the data as a structured
numpy array
"""
return BaseFields.factory(("x", ),
[(name, ("x", ))
for name
in dependent_variables],
[(name, ("x", ))
for name
in helper_functions],)
|
[
"def",
"factory1D",
"(",
"dependent_variables",
",",
"helper_functions",
")",
":",
"return",
"BaseFields",
".",
"factory",
"(",
"(",
"\"x\"",
",",
")",
",",
"[",
"(",
"name",
",",
"(",
"\"x\"",
",",
")",
")",
"for",
"name",
"in",
"dependent_variables",
"]",
",",
"[",
"(",
"name",
",",
"(",
"\"x\"",
",",
")",
")",
"for",
"name",
"in",
"helper_functions",
"]",
",",
")"
] |
Fields factory generating specialized container build around a
triflow Model and xarray.
Wrapper for 1D data.
Parameters
----------
dependent_variables : iterable for str
name of the dependent variables
helper_functions : iterable of str
name of the helpers functions
Returns
-------
triflow.BaseFields
Specialized container which expose the data as a structured
numpy array
|
[
"Fields",
"factory",
"generating",
"specialized",
"container",
"build",
"around",
"a",
"triflow",
"Model",
"and",
"xarray",
".",
"Wrapper",
"for",
"1D",
"data",
"."
] |
python
|
train
|
sernst/cauldron
|
cauldron/cli/threads.py
|
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/threads.py#L51-L83
|
def run(self):
"""
Executes the Cauldron command in a thread to prevent long-running
computations from locking the main Cauldron thread, which is needed
to serve and print status information.
"""
async def run_command():
try:
self.result = self.command(
context=self.context,
**self.kwargs
)
except Exception as error:
self.exception = error
print(error)
import traceback
traceback.print_exc()
import sys
self.context.response.fail(
code='COMMAND_EXECUTION_ERROR',
message='Failed to execute command due to internal error',
error=error
).console(
whitespace=1
)
self._has_started = True
self._loop = asyncio.new_event_loop()
self._loop.run_until_complete(run_command())
self._loop.close()
self._loop = None
self.completed_at = datetime.utcnow()
|
[
"def",
"run",
"(",
"self",
")",
":",
"async",
"def",
"run_command",
"(",
")",
":",
"try",
":",
"self",
".",
"result",
"=",
"self",
".",
"command",
"(",
"context",
"=",
"self",
".",
"context",
",",
"*",
"*",
"self",
".",
"kwargs",
")",
"except",
"Exception",
"as",
"error",
":",
"self",
".",
"exception",
"=",
"error",
"print",
"(",
"error",
")",
"import",
"traceback",
"traceback",
".",
"print_exc",
"(",
")",
"import",
"sys",
"self",
".",
"context",
".",
"response",
".",
"fail",
"(",
"code",
"=",
"'COMMAND_EXECUTION_ERROR'",
",",
"message",
"=",
"'Failed to execute command due to internal error'",
",",
"error",
"=",
"error",
")",
".",
"console",
"(",
"whitespace",
"=",
"1",
")",
"self",
".",
"_has_started",
"=",
"True",
"self",
".",
"_loop",
"=",
"asyncio",
".",
"new_event_loop",
"(",
")",
"self",
".",
"_loop",
".",
"run_until_complete",
"(",
"run_command",
"(",
")",
")",
"self",
".",
"_loop",
".",
"close",
"(",
")",
"self",
".",
"_loop",
"=",
"None",
"self",
".",
"completed_at",
"=",
"datetime",
".",
"utcnow",
"(",
")"
] |
Executes the Cauldron command in a thread to prevent long-running
computations from locking the main Cauldron thread, which is needed
to serve and print status information.
|
[
"Executes",
"the",
"Cauldron",
"command",
"in",
"a",
"thread",
"to",
"prevent",
"long",
"-",
"running",
"computations",
"from",
"locking",
"the",
"main",
"Cauldron",
"thread",
"which",
"is",
"needed",
"to",
"serve",
"and",
"print",
"status",
"information",
"."
] |
python
|
train
|
nilp0inter/cpe
|
cpe/cpe2_3_uri.py
|
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe2_3_uri.py#L254-L343
|
def _parse(self):
"""
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# CPE Name must not have whitespaces
if (self._str.find(" ") != -1):
msg = "Bad-formed CPE Name: it must not have whitespaces"
raise ValueError(msg)
# Partitioning of CPE Name
parts_match = CPE2_3_URI._parts_rxc.match(self._str)
# Validation of CPE Name parts
if (parts_match is None):
msg = "Bad-formed CPE Name: validation of parts failed"
raise ValueError(msg)
components = dict()
edition_parts = dict()
for ck in CPEComponent.CPE_COMP_KEYS:
value = parts_match.group(ck)
try:
if (ck == CPEComponent.ATT_EDITION and value is not None):
if value[0] == CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION:
# Unpack the edition part
edition_parts = CPE2_3_URI._unpack_edition(value)
else:
comp = CPE2_3_URI._create_component(ck, value)
else:
comp = CPE2_3_URI._create_component(ck, value)
except ValueError:
errmsg = "Bad-formed CPE Name: not correct value '{0}'".format(
value)
raise ValueError(errmsg)
else:
components[ck] = comp
components = dict(components, **edition_parts)
# Adds the components of version 2.3 of CPE not defined in version 2.2
for ck2 in CPEComponent.CPE_COMP_KEYS_EXTENDED:
if ck2 not in components.keys():
components[ck2] = CPEComponentUndefined()
# Exchange the undefined values in middle attributes of CPE Name for
# logical value ANY
check_change = True
# Start in the last attribute specififed in CPE Name
for ck in CPEComponent.CPE_COMP_KEYS[::-1]:
if ck in components:
comp = components[ck]
if check_change:
check_change = ((ck != CPEComponent.ATT_EDITION) and
(comp == CPEComponentUndefined()) or
(ck == CPEComponent.ATT_EDITION and
(len(edition_parts) == 0)))
elif comp == CPEComponentUndefined():
comp = CPEComponentAnyValue()
components[ck] = comp
# Storage of CPE Name
part_comp = components[CPEComponent.ATT_PART]
if isinstance(part_comp, CPEComponentLogical):
elements = []
elements.append(components)
self[CPE.KEY_UNDEFINED] = elements
else:
# Create internal structure of CPE Name in parts:
# one of them is filled with identified components,
# the rest are empty
system = parts_match.group(CPEComponent.ATT_PART)
if system in CPEComponent.SYSTEM_VALUES:
self._create_cpe_parts(system, components)
else:
self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED,
components)
# Fills the empty parts of internal structure of CPE Name
for pk in CPE.CPE_PART_KEYS:
if pk not in self.keys():
# Empty part
self[pk] = []
|
[
"def",
"_parse",
"(",
"self",
")",
":",
"# CPE Name must not have whitespaces",
"if",
"(",
"self",
".",
"_str",
".",
"find",
"(",
"\" \"",
")",
"!=",
"-",
"1",
")",
":",
"msg",
"=",
"\"Bad-formed CPE Name: it must not have whitespaces\"",
"raise",
"ValueError",
"(",
"msg",
")",
"# Partitioning of CPE Name",
"parts_match",
"=",
"CPE2_3_URI",
".",
"_parts_rxc",
".",
"match",
"(",
"self",
".",
"_str",
")",
"# Validation of CPE Name parts",
"if",
"(",
"parts_match",
"is",
"None",
")",
":",
"msg",
"=",
"\"Bad-formed CPE Name: validation of parts failed\"",
"raise",
"ValueError",
"(",
"msg",
")",
"components",
"=",
"dict",
"(",
")",
"edition_parts",
"=",
"dict",
"(",
")",
"for",
"ck",
"in",
"CPEComponent",
".",
"CPE_COMP_KEYS",
":",
"value",
"=",
"parts_match",
".",
"group",
"(",
"ck",
")",
"try",
":",
"if",
"(",
"ck",
"==",
"CPEComponent",
".",
"ATT_EDITION",
"and",
"value",
"is",
"not",
"None",
")",
":",
"if",
"value",
"[",
"0",
"]",
"==",
"CPEComponent2_3_URI",
".",
"SEPARATOR_PACKED_EDITION",
":",
"# Unpack the edition part",
"edition_parts",
"=",
"CPE2_3_URI",
".",
"_unpack_edition",
"(",
"value",
")",
"else",
":",
"comp",
"=",
"CPE2_3_URI",
".",
"_create_component",
"(",
"ck",
",",
"value",
")",
"else",
":",
"comp",
"=",
"CPE2_3_URI",
".",
"_create_component",
"(",
"ck",
",",
"value",
")",
"except",
"ValueError",
":",
"errmsg",
"=",
"\"Bad-formed CPE Name: not correct value '{0}'\"",
".",
"format",
"(",
"value",
")",
"raise",
"ValueError",
"(",
"errmsg",
")",
"else",
":",
"components",
"[",
"ck",
"]",
"=",
"comp",
"components",
"=",
"dict",
"(",
"components",
",",
"*",
"*",
"edition_parts",
")",
"# Adds the components of version 2.3 of CPE not defined in version 2.2",
"for",
"ck2",
"in",
"CPEComponent",
".",
"CPE_COMP_KEYS_EXTENDED",
":",
"if",
"ck2",
"not",
"in",
"components",
".",
"keys",
"(",
")",
":",
"components",
"[",
"ck2",
"]",
"=",
"CPEComponentUndefined",
"(",
")",
"# Exchange the undefined values in middle attributes of CPE Name for",
"# logical value ANY",
"check_change",
"=",
"True",
"# Start in the last attribute specififed in CPE Name",
"for",
"ck",
"in",
"CPEComponent",
".",
"CPE_COMP_KEYS",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"ck",
"in",
"components",
":",
"comp",
"=",
"components",
"[",
"ck",
"]",
"if",
"check_change",
":",
"check_change",
"=",
"(",
"(",
"ck",
"!=",
"CPEComponent",
".",
"ATT_EDITION",
")",
"and",
"(",
"comp",
"==",
"CPEComponentUndefined",
"(",
")",
")",
"or",
"(",
"ck",
"==",
"CPEComponent",
".",
"ATT_EDITION",
"and",
"(",
"len",
"(",
"edition_parts",
")",
"==",
"0",
")",
")",
")",
"elif",
"comp",
"==",
"CPEComponentUndefined",
"(",
")",
":",
"comp",
"=",
"CPEComponentAnyValue",
"(",
")",
"components",
"[",
"ck",
"]",
"=",
"comp",
"# Storage of CPE Name",
"part_comp",
"=",
"components",
"[",
"CPEComponent",
".",
"ATT_PART",
"]",
"if",
"isinstance",
"(",
"part_comp",
",",
"CPEComponentLogical",
")",
":",
"elements",
"=",
"[",
"]",
"elements",
".",
"append",
"(",
"components",
")",
"self",
"[",
"CPE",
".",
"KEY_UNDEFINED",
"]",
"=",
"elements",
"else",
":",
"# Create internal structure of CPE Name in parts:",
"# one of them is filled with identified components,",
"# the rest are empty",
"system",
"=",
"parts_match",
".",
"group",
"(",
"CPEComponent",
".",
"ATT_PART",
")",
"if",
"system",
"in",
"CPEComponent",
".",
"SYSTEM_VALUES",
":",
"self",
".",
"_create_cpe_parts",
"(",
"system",
",",
"components",
")",
"else",
":",
"self",
".",
"_create_cpe_parts",
"(",
"CPEComponent",
".",
"VALUE_PART_UNDEFINED",
",",
"components",
")",
"# Fills the empty parts of internal structure of CPE Name",
"for",
"pk",
"in",
"CPE",
".",
"CPE_PART_KEYS",
":",
"if",
"pk",
"not",
"in",
"self",
".",
"keys",
"(",
")",
":",
"# Empty part",
"self",
"[",
"pk",
"]",
"=",
"[",
"]"
] |
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
|
[
"Checks",
"if",
"the",
"CPE",
"Name",
"is",
"valid",
"."
] |
python
|
train
|
Crunch-io/crunch-cube
|
src/cr/cube/crunch_cube.py
|
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L1084-L1114
|
def _is_axis_allowed(self, axis):
"""Check if axis are allowed.
In case the calculation is requested over CA items dimension, it is not
valid. It's valid in all other cases.
"""
if axis is None:
# If table direction was requested, we must ensure that each slice
# doesn't have the CA items dimension (thus the [-2:] part). It's
# OK for the 0th dimension to be items, since no calculation is
# performed over it.
if DT.CA_SUBVAR in self.dim_types[-2:]:
return False
return True
if isinstance(axis, int):
if self.ndim == 1 and axis == 1:
# Special allowed case of a 1D cube, where "row"
# directions is requested.
return True
axis = [axis]
# ---axis is a tuple---
for dim_idx in axis:
if self.dim_types[dim_idx] == DT.CA_SUBVAR:
# If any of the directions explicitly asked for directly
# corresponds to the CA items dimension, the requested
# calculation is not valid.
return False
return True
|
[
"def",
"_is_axis_allowed",
"(",
"self",
",",
"axis",
")",
":",
"if",
"axis",
"is",
"None",
":",
"# If table direction was requested, we must ensure that each slice",
"# doesn't have the CA items dimension (thus the [-2:] part). It's",
"# OK for the 0th dimension to be items, since no calculation is",
"# performed over it.",
"if",
"DT",
".",
"CA_SUBVAR",
"in",
"self",
".",
"dim_types",
"[",
"-",
"2",
":",
"]",
":",
"return",
"False",
"return",
"True",
"if",
"isinstance",
"(",
"axis",
",",
"int",
")",
":",
"if",
"self",
".",
"ndim",
"==",
"1",
"and",
"axis",
"==",
"1",
":",
"# Special allowed case of a 1D cube, where \"row\"",
"# directions is requested.",
"return",
"True",
"axis",
"=",
"[",
"axis",
"]",
"# ---axis is a tuple---",
"for",
"dim_idx",
"in",
"axis",
":",
"if",
"self",
".",
"dim_types",
"[",
"dim_idx",
"]",
"==",
"DT",
".",
"CA_SUBVAR",
":",
"# If any of the directions explicitly asked for directly",
"# corresponds to the CA items dimension, the requested",
"# calculation is not valid.",
"return",
"False",
"return",
"True"
] |
Check if axis are allowed.
In case the calculation is requested over CA items dimension, it is not
valid. It's valid in all other cases.
|
[
"Check",
"if",
"axis",
"are",
"allowed",
"."
] |
python
|
train
|
albertz/py_better_exchook
|
better_exchook.py
|
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1405-L1431
|
def _StackSummary_extract(frame_gen, limit=None, lookup_lines=True, capture_locals=False):
"""
Replacement for :func:`StackSummary.extract`.
Create a StackSummary from a traceback or stack object.
Very simplified copy of the original StackSummary.extract().
We want always to capture locals, that is why we overwrite it.
Additionally, we also capture the frame.
This is a bit hacky and also not like this is originally intended (to not keep refs).
:param frame_gen: A generator that yields (frame, lineno) tuples to
include in the stack.
:param limit: None to include all frames or the number of frames to
include.
:param lookup_lines: If True, lookup lines for each frame immediately,
otherwise lookup is deferred until the frame is rendered.
:param capture_locals: If True, the local variables from each frame will
be captured as object representations into the FrameSummary.
"""
result = StackSummary()
for f, lineno in frame_gen:
co = f.f_code
filename = co.co_filename
name = co.co_name
result.append(ExtendedFrameSummary(
frame=f, filename=filename, lineno=lineno, name=name, lookup_line=False))
return result
|
[
"def",
"_StackSummary_extract",
"(",
"frame_gen",
",",
"limit",
"=",
"None",
",",
"lookup_lines",
"=",
"True",
",",
"capture_locals",
"=",
"False",
")",
":",
"result",
"=",
"StackSummary",
"(",
")",
"for",
"f",
",",
"lineno",
"in",
"frame_gen",
":",
"co",
"=",
"f",
".",
"f_code",
"filename",
"=",
"co",
".",
"co_filename",
"name",
"=",
"co",
".",
"co_name",
"result",
".",
"append",
"(",
"ExtendedFrameSummary",
"(",
"frame",
"=",
"f",
",",
"filename",
"=",
"filename",
",",
"lineno",
"=",
"lineno",
",",
"name",
"=",
"name",
",",
"lookup_line",
"=",
"False",
")",
")",
"return",
"result"
] |
Replacement for :func:`StackSummary.extract`.
Create a StackSummary from a traceback or stack object.
Very simplified copy of the original StackSummary.extract().
We want always to capture locals, that is why we overwrite it.
Additionally, we also capture the frame.
This is a bit hacky and also not like this is originally intended (to not keep refs).
:param frame_gen: A generator that yields (frame, lineno) tuples to
include in the stack.
:param limit: None to include all frames or the number of frames to
include.
:param lookup_lines: If True, lookup lines for each frame immediately,
otherwise lookup is deferred until the frame is rendered.
:param capture_locals: If True, the local variables from each frame will
be captured as object representations into the FrameSummary.
|
[
"Replacement",
"for",
":",
"func",
":",
"StackSummary",
".",
"extract",
"."
] |
python
|
train
|
contentful/contentful-management.py
|
contentful_management/content_type_field_validation.py
|
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/content_type_field_validation.py#L28-L36
|
def to_json(self):
"""
Returns the JSON Representation of the content type field validation.
"""
result = {}
for k, v in self._data.items():
result[camel_case(k)] = v
return result
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_data",
".",
"items",
"(",
")",
":",
"result",
"[",
"camel_case",
"(",
"k",
")",
"]",
"=",
"v",
"return",
"result"
] |
Returns the JSON Representation of the content type field validation.
|
[
"Returns",
"the",
"JSON",
"Representation",
"of",
"the",
"content",
"type",
"field",
"validation",
"."
] |
python
|
train
|
helixyte/everest
|
everest/traversal.py
|
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/traversal.py#L125-L139
|
def get_matching(self, source_id):
"""
Returns a matching target object for the given source ID.
"""
value = self._accessor.get_by_id(source_id)
if not value is None:
reg = get_current_registry()
prx_fac = reg.getUtility(IDataTraversalProxyFactory)
prx = prx_fac.make_proxy(value,
self._accessor,
self.relationship_direction,
self.relation_operation)
else:
prx = None
return prx
|
[
"def",
"get_matching",
"(",
"self",
",",
"source_id",
")",
":",
"value",
"=",
"self",
".",
"_accessor",
".",
"get_by_id",
"(",
"source_id",
")",
"if",
"not",
"value",
"is",
"None",
":",
"reg",
"=",
"get_current_registry",
"(",
")",
"prx_fac",
"=",
"reg",
".",
"getUtility",
"(",
"IDataTraversalProxyFactory",
")",
"prx",
"=",
"prx_fac",
".",
"make_proxy",
"(",
"value",
",",
"self",
".",
"_accessor",
",",
"self",
".",
"relationship_direction",
",",
"self",
".",
"relation_operation",
")",
"else",
":",
"prx",
"=",
"None",
"return",
"prx"
] |
Returns a matching target object for the given source ID.
|
[
"Returns",
"a",
"matching",
"target",
"object",
"for",
"the",
"given",
"source",
"ID",
"."
] |
python
|
train
|
jkwill87/mapi
|
mapi/endpoints.py
|
https://github.com/jkwill87/mapi/blob/730bf57c12aecaf49e18c15bf2b35af7f554b3cc/mapi/endpoints.py#L69-L78
|
def _clean_dict(target_dict, whitelist=None):
""" Convenience function that removes a dicts keys that have falsy values
"""
assert isinstance(target_dict, dict)
return {
ustr(k).strip(): ustr(v).strip()
for k, v in target_dict.items()
if v not in (None, Ellipsis, [], (), "")
and (not whitelist or k in whitelist)
}
|
[
"def",
"_clean_dict",
"(",
"target_dict",
",",
"whitelist",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"target_dict",
",",
"dict",
")",
"return",
"{",
"ustr",
"(",
"k",
")",
".",
"strip",
"(",
")",
":",
"ustr",
"(",
"v",
")",
".",
"strip",
"(",
")",
"for",
"k",
",",
"v",
"in",
"target_dict",
".",
"items",
"(",
")",
"if",
"v",
"not",
"in",
"(",
"None",
",",
"Ellipsis",
",",
"[",
"]",
",",
"(",
")",
",",
"\"\"",
")",
"and",
"(",
"not",
"whitelist",
"or",
"k",
"in",
"whitelist",
")",
"}"
] |
Convenience function that removes a dicts keys that have falsy values
|
[
"Convenience",
"function",
"that",
"removes",
"a",
"dicts",
"keys",
"that",
"have",
"falsy",
"values"
] |
python
|
train
|
echinopsii/net.echinopsii.ariane.community.cli.python3
|
ariane_clip3/injector.py
|
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/injector.py#L706-L732
|
def remove(self):
"""
remove this component from Ariane server cache, stop the on demand refresh and actor linked to this component
:return:
"""
LOGGER.debug("InjectorCachedComponent.remove")
ret = True
args = {'properties': {'OPERATION': 'DEL_COMPONENT_FROM_CACHE',
'REMOTE_COMPONENT':
str(self.injector_component_2_json(properties_only=True)).replace("'", '"'),
'CACHE_ID': InjectorCachedComponentService.cache_id}}
result = InjectorCachedComponentService.requester.call(args).get()
if result.rc != 0:
err_msg = 'InjectorCachedComponent.remove - Problem while saving component ( id : ' + self.id + \
'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \
" (" + str(result.rc) + ")"
LOGGER.warning(err_msg)
ret = False
if self.service is not None and self.service.is_started:
self.service.stop()
if self.actor_ref is not None:
self.stop()
return ret
|
[
"def",
"remove",
"(",
"self",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"InjectorCachedComponent.remove\"",
")",
"ret",
"=",
"True",
"args",
"=",
"{",
"'properties'",
":",
"{",
"'OPERATION'",
":",
"'DEL_COMPONENT_FROM_CACHE'",
",",
"'REMOTE_COMPONENT'",
":",
"str",
"(",
"self",
".",
"injector_component_2_json",
"(",
"properties_only",
"=",
"True",
")",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
",",
"'CACHE_ID'",
":",
"InjectorCachedComponentService",
".",
"cache_id",
"}",
"}",
"result",
"=",
"InjectorCachedComponentService",
".",
"requester",
".",
"call",
"(",
"args",
")",
".",
"get",
"(",
")",
"if",
"result",
".",
"rc",
"!=",
"0",
":",
"err_msg",
"=",
"'InjectorCachedComponent.remove - Problem while saving component ( id : '",
"+",
"self",
".",
"id",
"+",
"'Reason: '",
"+",
"str",
"(",
"result",
".",
"response_content",
")",
"+",
"'-'",
"+",
"str",
"(",
"result",
".",
"error_message",
")",
"+",
"\" (\"",
"+",
"str",
"(",
"result",
".",
"rc",
")",
"+",
"\")\"",
"LOGGER",
".",
"warning",
"(",
"err_msg",
")",
"ret",
"=",
"False",
"if",
"self",
".",
"service",
"is",
"not",
"None",
"and",
"self",
".",
"service",
".",
"is_started",
":",
"self",
".",
"service",
".",
"stop",
"(",
")",
"if",
"self",
".",
"actor_ref",
"is",
"not",
"None",
":",
"self",
".",
"stop",
"(",
")",
"return",
"ret"
] |
remove this component from Ariane server cache, stop the on demand refresh and actor linked to this component
:return:
|
[
"remove",
"this",
"component",
"from",
"Ariane",
"server",
"cache",
"stop",
"the",
"on",
"demand",
"refresh",
"and",
"actor",
"linked",
"to",
"this",
"component",
":",
"return",
":"
] |
python
|
train
|
noirbizarre/django-eztables
|
eztables/views.py
|
https://github.com/noirbizarre/django-eztables/blob/347e74dcc08121d20f4cf942181d873dbe33b995/eztables/views.py#L126-L144
|
def global_search(self, queryset):
'''Filter a queryset with global search'''
search = self.dt_data['sSearch']
if search:
if self.dt_data['bRegex']:
criterions = [
Q(**{'%s__iregex' % field: search})
for field in self.get_db_fields()
if self.can_regex(field)
]
if len(criterions) > 0:
search = reduce(or_, criterions)
queryset = queryset.filter(search)
else:
for term in search.split():
criterions = (Q(**{'%s__icontains' % field: term}) for field in self.get_db_fields())
search = reduce(or_, criterions)
queryset = queryset.filter(search)
return queryset
|
[
"def",
"global_search",
"(",
"self",
",",
"queryset",
")",
":",
"search",
"=",
"self",
".",
"dt_data",
"[",
"'sSearch'",
"]",
"if",
"search",
":",
"if",
"self",
".",
"dt_data",
"[",
"'bRegex'",
"]",
":",
"criterions",
"=",
"[",
"Q",
"(",
"*",
"*",
"{",
"'%s__iregex'",
"%",
"field",
":",
"search",
"}",
")",
"for",
"field",
"in",
"self",
".",
"get_db_fields",
"(",
")",
"if",
"self",
".",
"can_regex",
"(",
"field",
")",
"]",
"if",
"len",
"(",
"criterions",
")",
">",
"0",
":",
"search",
"=",
"reduce",
"(",
"or_",
",",
"criterions",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"search",
")",
"else",
":",
"for",
"term",
"in",
"search",
".",
"split",
"(",
")",
":",
"criterions",
"=",
"(",
"Q",
"(",
"*",
"*",
"{",
"'%s__icontains'",
"%",
"field",
":",
"term",
"}",
")",
"for",
"field",
"in",
"self",
".",
"get_db_fields",
"(",
")",
")",
"search",
"=",
"reduce",
"(",
"or_",
",",
"criterions",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"search",
")",
"return",
"queryset"
] |
Filter a queryset with global search
|
[
"Filter",
"a",
"queryset",
"with",
"global",
"search"
] |
python
|
train
|
ejeschke/ginga
|
ginga/rv/plugins/SaveImage.py
|
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/SaveImage.py#L329-L370
|
def _write_history(self, pfx, hdu, linechar=60, indentchar=2):
"""Write change history to given HDU header.
Limit each HISTORY line to given number of characters.
Subsequent lines of the same history will be indented.
"""
channel = self.fv.get_channel(self.chname)
if channel is None:
return
history_plgname = 'ChangeHistory'
try:
history_obj = self.fv.gpmon.getPlugin(history_plgname)
except Exception:
self.logger.error(
'{0} plugin is not loaded. No HISTORY will be written to '
'{1}.'.format(history_plgname, pfx))
return
if channel.name not in history_obj.name_dict:
self.logger.error(
'{0} channel not found in {1}. No HISTORY will be written to '
'{2}.'.format(channel.name, history_plgname, pfx))
return
file_dict = history_obj.name_dict[channel.name]
chistory = []
ind = ' ' * indentchar
# NOTE: List comprehension too slow!
for key in file_dict:
if not key.startswith(pfx):
continue
for bnch in file_dict[key].values():
chistory.append('{0} {1}'.format(bnch.MODIFIED, bnch.DESCRIP))
# Add each HISTORY prettily into header, sorted by timestamp
for s in sorted(chistory):
for i in range(0, len(s), linechar):
subs = s[i:i + linechar]
if i > 0:
subs = ind + subs.lstrip()
hdu.header.add_history(subs)
|
[
"def",
"_write_history",
"(",
"self",
",",
"pfx",
",",
"hdu",
",",
"linechar",
"=",
"60",
",",
"indentchar",
"=",
"2",
")",
":",
"channel",
"=",
"self",
".",
"fv",
".",
"get_channel",
"(",
"self",
".",
"chname",
")",
"if",
"channel",
"is",
"None",
":",
"return",
"history_plgname",
"=",
"'ChangeHistory'",
"try",
":",
"history_obj",
"=",
"self",
".",
"fv",
".",
"gpmon",
".",
"getPlugin",
"(",
"history_plgname",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'{0} plugin is not loaded. No HISTORY will be written to '",
"'{1}.'",
".",
"format",
"(",
"history_plgname",
",",
"pfx",
")",
")",
"return",
"if",
"channel",
".",
"name",
"not",
"in",
"history_obj",
".",
"name_dict",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'{0} channel not found in {1}. No HISTORY will be written to '",
"'{2}.'",
".",
"format",
"(",
"channel",
".",
"name",
",",
"history_plgname",
",",
"pfx",
")",
")",
"return",
"file_dict",
"=",
"history_obj",
".",
"name_dict",
"[",
"channel",
".",
"name",
"]",
"chistory",
"=",
"[",
"]",
"ind",
"=",
"' '",
"*",
"indentchar",
"# NOTE: List comprehension too slow!",
"for",
"key",
"in",
"file_dict",
":",
"if",
"not",
"key",
".",
"startswith",
"(",
"pfx",
")",
":",
"continue",
"for",
"bnch",
"in",
"file_dict",
"[",
"key",
"]",
".",
"values",
"(",
")",
":",
"chistory",
".",
"append",
"(",
"'{0} {1}'",
".",
"format",
"(",
"bnch",
".",
"MODIFIED",
",",
"bnch",
".",
"DESCRIP",
")",
")",
"# Add each HISTORY prettily into header, sorted by timestamp",
"for",
"s",
"in",
"sorted",
"(",
"chistory",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"s",
")",
",",
"linechar",
")",
":",
"subs",
"=",
"s",
"[",
"i",
":",
"i",
"+",
"linechar",
"]",
"if",
"i",
">",
"0",
":",
"subs",
"=",
"ind",
"+",
"subs",
".",
"lstrip",
"(",
")",
"hdu",
".",
"header",
".",
"add_history",
"(",
"subs",
")"
] |
Write change history to given HDU header.
Limit each HISTORY line to given number of characters.
Subsequent lines of the same history will be indented.
|
[
"Write",
"change",
"history",
"to",
"given",
"HDU",
"header",
".",
"Limit",
"each",
"HISTORY",
"line",
"to",
"given",
"number",
"of",
"characters",
".",
"Subsequent",
"lines",
"of",
"the",
"same",
"history",
"will",
"be",
"indented",
"."
] |
python
|
train
|
matthewdeanmartin/find_known_secrets
|
build.py
|
https://github.com/matthewdeanmartin/find_known_secrets/blob/f25735c1ab4512bad85ade33af7021f6fac1d13b/build.py#L332-L351
|
def dead_code():
"""
This also finds code you are working on today!
"""
with safe_cd(SRC):
if IS_TRAVIS:
command = "{0} vulture {1}".format(PYTHON, PROJECT_NAME).strip().split()
else:
command = "{0} vulture {1}".format(PIPENV, PROJECT_NAME).strip().split()
output_file_name = "dead_code.txt"
with open(output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
cutoff = 20
num_lines = sum(1 for line in open(output_file_name) if line)
if num_lines > cutoff:
print("Too many lines of dead code : {0}, max {1}".format(num_lines, cutoff))
exit(-1)
|
[
"def",
"dead_code",
"(",
")",
":",
"with",
"safe_cd",
"(",
"SRC",
")",
":",
"if",
"IS_TRAVIS",
":",
"command",
"=",
"\"{0} vulture {1}\"",
".",
"format",
"(",
"PYTHON",
",",
"PROJECT_NAME",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"else",
":",
"command",
"=",
"\"{0} vulture {1}\"",
".",
"format",
"(",
"PIPENV",
",",
"PROJECT_NAME",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"output_file_name",
"=",
"\"dead_code.txt\"",
"with",
"open",
"(",
"output_file_name",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"env",
"=",
"config_pythonpath",
"(",
")",
"subprocess",
".",
"call",
"(",
"command",
",",
"stdout",
"=",
"outfile",
",",
"env",
"=",
"env",
")",
"cutoff",
"=",
"20",
"num_lines",
"=",
"sum",
"(",
"1",
"for",
"line",
"in",
"open",
"(",
"output_file_name",
")",
"if",
"line",
")",
"if",
"num_lines",
">",
"cutoff",
":",
"print",
"(",
"\"Too many lines of dead code : {0}, max {1}\"",
".",
"format",
"(",
"num_lines",
",",
"cutoff",
")",
")",
"exit",
"(",
"-",
"1",
")"
] |
This also finds code you are working on today!
|
[
"This",
"also",
"finds",
"code",
"you",
"are",
"working",
"on",
"today!"
] |
python
|
test
|
fprimex/zdesk
|
zdesk/zdesk_api.py
|
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1295-L1299
|
def group_memberships_assignable(self, group_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#list-assignable-memberships"
api_path = "/api/v2/groups/{group_id}/memberships/assignable.json"
api_path = api_path.format(group_id=group_id)
return self.call(api_path, **kwargs)
|
[
"def",
"group_memberships_assignable",
"(",
"self",
",",
"group_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/groups/{group_id}/memberships/assignable.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"group_id",
"=",
"group_id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"*",
"*",
"kwargs",
")"
] |
https://developer.zendesk.com/rest_api/docs/core/group_memberships#list-assignable-memberships
|
[
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"core",
"/",
"group_memberships#list",
"-",
"assignable",
"-",
"memberships"
] |
python
|
train
|
jwodder/doapi
|
doapi/doapi.py
|
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/doapi.py#L493-L522
|
def wait_actions(self, actions, wait_interval=None, wait_time=None):
r"""
Poll the server periodically until all actions in ``actions`` have
either completed or errored out, yielding each `Action`'s final value
as it ends.
If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any
remaining in-progress actions) is raised.
If a `KeyboardInterrupt` is caught, any remaining actions are returned
immediately without waiting for completion.
.. versionchanged:: 0.2.0
Raises `WaitTimeoutError` on timeout
:param iterable actions: an iterable of `Action`\ s and/or other values
that are acceptable arguments to :meth:`fetch_action`
:param number wait_interval: how many seconds to sleep between
requests; defaults to :attr:`wait_interval` if not specified or
`None`
:param number wait_time: the total number of seconds after which the
method will raise an error if any actions have not yet completed,
or a negative number to wait indefinitely; defaults to
:attr:`wait_time` if not specified or `None`
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
:raises WaitTimeoutError: if ``wait_time`` is exceeded
"""
return self._wait(map(self._action, actions), "done", True,
wait_interval, wait_time)
|
[
"def",
"wait_actions",
"(",
"self",
",",
"actions",
",",
"wait_interval",
"=",
"None",
",",
"wait_time",
"=",
"None",
")",
":",
"return",
"self",
".",
"_wait",
"(",
"map",
"(",
"self",
".",
"_action",
",",
"actions",
")",
",",
"\"done\"",
",",
"True",
",",
"wait_interval",
",",
"wait_time",
")"
] |
r"""
Poll the server periodically until all actions in ``actions`` have
either completed or errored out, yielding each `Action`'s final value
as it ends.
If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any
remaining in-progress actions) is raised.
If a `KeyboardInterrupt` is caught, any remaining actions are returned
immediately without waiting for completion.
.. versionchanged:: 0.2.0
Raises `WaitTimeoutError` on timeout
:param iterable actions: an iterable of `Action`\ s and/or other values
that are acceptable arguments to :meth:`fetch_action`
:param number wait_interval: how many seconds to sleep between
requests; defaults to :attr:`wait_interval` if not specified or
`None`
:param number wait_time: the total number of seconds after which the
method will raise an error if any actions have not yet completed,
or a negative number to wait indefinitely; defaults to
:attr:`wait_time` if not specified or `None`
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
:raises WaitTimeoutError: if ``wait_time`` is exceeded
|
[
"r",
"Poll",
"the",
"server",
"periodically",
"until",
"all",
"actions",
"in",
"actions",
"have",
"either",
"completed",
"or",
"errored",
"out",
"yielding",
"each",
"Action",
"s",
"final",
"value",
"as",
"it",
"ends",
"."
] |
python
|
train
|
metagriffin/globre
|
globre/__init__.py
|
https://github.com/metagriffin/globre/blob/d4b0ffb352b0b7d5e221d2357d4094e390d4fbeb/globre/__init__.py#L120-L216
|
def compile(pattern, flags=0, sep=None, split_prefix=False):
'''
Converts a glob-matching pattern (using Apache Cocoon style rules)
to a regular expression, which basically means that the following
characters have special meanings:
* ``?``: matches any single character excluding the separator character
* ``*``: matches zero or more characters excluding the separator character
* ``**``: matches zero or more characters including the separator character
* ``\``: escape character used to precede any of the others for a literal
* ``[...]``: matches any character in the specified regex-style range
* ``{...}``: inlines a regex expression
:Parameters:
sep : str; default: "/"
The `sep` parameter specifies the hierarchical path component
separator to use. By default, it uses the unix-style forward-slash
separator (``"/"``), but can be overriden to be a sequence of
alternative valid hierarchical path component separator characters.
Note that although `sep` *could* be set to both forward- and back-
slashes (i.e. ``"/\\"``) to, theoretically, support either unix- and
windows-style path components, this has the significant flaw that
then *both* characters can be used within the same path as
separators.
flags : int; default: 0
The `flags` bit mask can contain all the standard `re` flags, in
addition to the ``globre.EXACT`` flag. If EXACT is set, then the
returned regex will include a leading '^' and trailing '$', meaning
that the regex must match the entire string, from beginning to end.
split_prefix : bool; default: false
If `split_prefix` is truthy, the return value becomes a tuple with
the first element set to any initial non-wildcarded string found in
the pattern. The second element remains the regex object as before.
For example, the pattern ``foo/**.ini`` would result in a tuple
equivalent to ``('foo/', re.compile('foo/.*\\.ini'))``.
'''
prefix = None
expr = ''
if sep is None:
sep = '/'
if not sep:
TypeError('invalid parameter "sep" value: %r' % (sep,))
if set(sep) & set(SPECIAL_CHARS):
TypeError('parameter "sep" cannot contain any of %r' % (SPECIAL_CHARS,))
if len(sep) == 1:
literal = re.escape
else:
def make_literal(sep):
sep = '[' + re.escape(sep) + ']'
sepcre = re.compile(sep)
def _literal(text):
return sep.join(sepcre.split(text))
return _literal
literal = make_literal(sep)
if sep != '/':
sep = re.escape(sep)
for token in Tokenizer(pattern).tokens():
if split_prefix and expr == '':
prefix = token[1] if token[0] == Tokenizer.LITERAL else ''
if token[0] == Tokenizer.LITERAL:
expr += literal(token[1])
elif token[0] == Tokenizer.SINGLE:
expr += '[^' + sep + ']'
elif token[0] == Tokenizer.MULTIPLE:
expr += '[^' + sep + ']*?'
elif token[0] == Tokenizer.ANY:
expr += '.*?'
elif token[0] == Tokenizer.RANGE:
expr += '[' + token[1] + ']'
elif token[0] == Tokenizer.REGEX:
expr += token[1]
else:
ValueError('unexpected token %r from globre.Tokenizer for glob: %s'
% (token, pattern))
if flags & EXACT:
if not expr.startswith('^'):
expr = '^' + expr
# todo: technically, the last "$" *could* be escaped and therefore
# an extra "$" would need to be added... but that is very unlikely.
if not expr.endswith('$'):
expr += '$'
expr = re.compile(expr, flags=flags & ~ EXACT)
if prefix is not None:
return (prefix, expr)
return expr
|
[
"def",
"compile",
"(",
"pattern",
",",
"flags",
"=",
"0",
",",
"sep",
"=",
"None",
",",
"split_prefix",
"=",
"False",
")",
":",
"prefix",
"=",
"None",
"expr",
"=",
"''",
"if",
"sep",
"is",
"None",
":",
"sep",
"=",
"'/'",
"if",
"not",
"sep",
":",
"TypeError",
"(",
"'invalid parameter \"sep\" value: %r'",
"%",
"(",
"sep",
",",
")",
")",
"if",
"set",
"(",
"sep",
")",
"&",
"set",
"(",
"SPECIAL_CHARS",
")",
":",
"TypeError",
"(",
"'parameter \"sep\" cannot contain any of %r'",
"%",
"(",
"SPECIAL_CHARS",
",",
")",
")",
"if",
"len",
"(",
"sep",
")",
"==",
"1",
":",
"literal",
"=",
"re",
".",
"escape",
"else",
":",
"def",
"make_literal",
"(",
"sep",
")",
":",
"sep",
"=",
"'['",
"+",
"re",
".",
"escape",
"(",
"sep",
")",
"+",
"']'",
"sepcre",
"=",
"re",
".",
"compile",
"(",
"sep",
")",
"def",
"_literal",
"(",
"text",
")",
":",
"return",
"sep",
".",
"join",
"(",
"sepcre",
".",
"split",
"(",
"text",
")",
")",
"return",
"_literal",
"literal",
"=",
"make_literal",
"(",
"sep",
")",
"if",
"sep",
"!=",
"'/'",
":",
"sep",
"=",
"re",
".",
"escape",
"(",
"sep",
")",
"for",
"token",
"in",
"Tokenizer",
"(",
"pattern",
")",
".",
"tokens",
"(",
")",
":",
"if",
"split_prefix",
"and",
"expr",
"==",
"''",
":",
"prefix",
"=",
"token",
"[",
"1",
"]",
"if",
"token",
"[",
"0",
"]",
"==",
"Tokenizer",
".",
"LITERAL",
"else",
"''",
"if",
"token",
"[",
"0",
"]",
"==",
"Tokenizer",
".",
"LITERAL",
":",
"expr",
"+=",
"literal",
"(",
"token",
"[",
"1",
"]",
")",
"elif",
"token",
"[",
"0",
"]",
"==",
"Tokenizer",
".",
"SINGLE",
":",
"expr",
"+=",
"'[^'",
"+",
"sep",
"+",
"']'",
"elif",
"token",
"[",
"0",
"]",
"==",
"Tokenizer",
".",
"MULTIPLE",
":",
"expr",
"+=",
"'[^'",
"+",
"sep",
"+",
"']*?'",
"elif",
"token",
"[",
"0",
"]",
"==",
"Tokenizer",
".",
"ANY",
":",
"expr",
"+=",
"'.*?'",
"elif",
"token",
"[",
"0",
"]",
"==",
"Tokenizer",
".",
"RANGE",
":",
"expr",
"+=",
"'['",
"+",
"token",
"[",
"1",
"]",
"+",
"']'",
"elif",
"token",
"[",
"0",
"]",
"==",
"Tokenizer",
".",
"REGEX",
":",
"expr",
"+=",
"token",
"[",
"1",
"]",
"else",
":",
"ValueError",
"(",
"'unexpected token %r from globre.Tokenizer for glob: %s'",
"%",
"(",
"token",
",",
"pattern",
")",
")",
"if",
"flags",
"&",
"EXACT",
":",
"if",
"not",
"expr",
".",
"startswith",
"(",
"'^'",
")",
":",
"expr",
"=",
"'^'",
"+",
"expr",
"# todo: technically, the last \"$\" *could* be escaped and therefore",
"# an extra \"$\" would need to be added... but that is very unlikely.",
"if",
"not",
"expr",
".",
"endswith",
"(",
"'$'",
")",
":",
"expr",
"+=",
"'$'",
"expr",
"=",
"re",
".",
"compile",
"(",
"expr",
",",
"flags",
"=",
"flags",
"&",
"~",
"EXACT",
")",
"if",
"prefix",
"is",
"not",
"None",
":",
"return",
"(",
"prefix",
",",
"expr",
")",
"return",
"expr"
] |
Converts a glob-matching pattern (using Apache Cocoon style rules)
to a regular expression, which basically means that the following
characters have special meanings:
* ``?``: matches any single character excluding the separator character
* ``*``: matches zero or more characters excluding the separator character
* ``**``: matches zero or more characters including the separator character
* ``\``: escape character used to precede any of the others for a literal
* ``[...]``: matches any character in the specified regex-style range
* ``{...}``: inlines a regex expression
:Parameters:
sep : str; default: "/"
The `sep` parameter specifies the hierarchical path component
separator to use. By default, it uses the unix-style forward-slash
separator (``"/"``), but can be overriden to be a sequence of
alternative valid hierarchical path component separator characters.
Note that although `sep` *could* be set to both forward- and back-
slashes (i.e. ``"/\\"``) to, theoretically, support either unix- and
windows-style path components, this has the significant flaw that
then *both* characters can be used within the same path as
separators.
flags : int; default: 0
The `flags` bit mask can contain all the standard `re` flags, in
addition to the ``globre.EXACT`` flag. If EXACT is set, then the
returned regex will include a leading '^' and trailing '$', meaning
that the regex must match the entire string, from beginning to end.
split_prefix : bool; default: false
If `split_prefix` is truthy, the return value becomes a tuple with
the first element set to any initial non-wildcarded string found in
the pattern. The second element remains the regex object as before.
For example, the pattern ``foo/**.ini`` would result in a tuple
equivalent to ``('foo/', re.compile('foo/.*\\.ini'))``.
|
[
"Converts",
"a",
"glob",
"-",
"matching",
"pattern",
"(",
"using",
"Apache",
"Cocoon",
"style",
"rules",
")",
"to",
"a",
"regular",
"expression",
"which",
"basically",
"means",
"that",
"the",
"following",
"characters",
"have",
"special",
"meanings",
":"
] |
python
|
train
|
divio/aldryn-apphooks-config
|
aldryn_apphooks_config/utils.py
|
https://github.com/divio/aldryn-apphooks-config/blob/5b8dfc7516982a8746fc08cf919c6ab116335d62/aldryn_apphooks_config/utils.py#L88-L96
|
def get_apphook_configs(obj):
"""
Get apphook configs for an object obj
:param obj: any model instance
:return: list of apphook configs for given obj
"""
keys = get_apphook_field_names(obj)
return [getattr(obj, key) for key in keys] if keys else []
|
[
"def",
"get_apphook_configs",
"(",
"obj",
")",
":",
"keys",
"=",
"get_apphook_field_names",
"(",
"obj",
")",
"return",
"[",
"getattr",
"(",
"obj",
",",
"key",
")",
"for",
"key",
"in",
"keys",
"]",
"if",
"keys",
"else",
"[",
"]"
] |
Get apphook configs for an object obj
:param obj: any model instance
:return: list of apphook configs for given obj
|
[
"Get",
"apphook",
"configs",
"for",
"an",
"object",
"obj"
] |
python
|
train
|
square/connect-python-sdk
|
squareconnect/models/catalog_query_range.py
|
https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/catalog_query_range.py#L67-L81
|
def attribute_name(self, attribute_name):
"""
Sets the attribute_name of this CatalogQueryRange.
The name of the attribute to be searched.
:param attribute_name: The attribute_name of this CatalogQueryRange.
:type: str
"""
if attribute_name is None:
raise ValueError("Invalid value for `attribute_name`, must not be `None`")
if len(attribute_name) < 1:
raise ValueError("Invalid value for `attribute_name`, length must be greater than or equal to `1`")
self._attribute_name = attribute_name
|
[
"def",
"attribute_name",
"(",
"self",
",",
"attribute_name",
")",
":",
"if",
"attribute_name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `attribute_name`, must not be `None`\"",
")",
"if",
"len",
"(",
"attribute_name",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `attribute_name`, length must be greater than or equal to `1`\"",
")",
"self",
".",
"_attribute_name",
"=",
"attribute_name"
] |
Sets the attribute_name of this CatalogQueryRange.
The name of the attribute to be searched.
:param attribute_name: The attribute_name of this CatalogQueryRange.
:type: str
|
[
"Sets",
"the",
"attribute_name",
"of",
"this",
"CatalogQueryRange",
".",
"The",
"name",
"of",
"the",
"attribute",
"to",
"be",
"searched",
"."
] |
python
|
train
|
Opentrons/opentrons
|
api/src/opentrons/server/endpoints/control.py
|
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/server/endpoints/control.py#L385-L416
|
def _move_mount(robot, mount, point):
"""
The carriage moves the mount in the Z axis, and the gantry moves in X and Y
Mount movements do not have the same protections calculated in to an
existing `move` command like Pipette does, so the safest thing is to home
the Z axis, then move in X and Y, then move down to the specified Z height
"""
carriage = robot._actuators[mount]['carriage']
# Home both carriages, to prevent collisions and to ensure that the other
# mount doesn't block the one being moved (mount moves are primarily for
# changing pipettes, so we don't want the other pipette blocking access)
robot.poses = carriage.home(robot.poses)
other_mount = 'left' if mount == 'right' else 'right'
robot.poses = robot._actuators[other_mount]['carriage'].home(robot.poses)
robot.gantry.move(
robot.poses, x=point[0], y=point[1])
robot.poses = carriage.move(
robot.poses, z=point[2])
# These x and y values are hard to interpret because of some internals of
# pose tracker. It's mostly z that matters for this operation anyway
x, y, _ = tuple(
pose_tracker.absolute(
robot.poses, robot._actuators[mount]['carriage']))
_, _, z = tuple(
pose_tracker.absolute(
robot.poses, robot.gantry))
new_position = (x, y, z)
return "Move complete. New position: {}".format(new_position)
|
[
"def",
"_move_mount",
"(",
"robot",
",",
"mount",
",",
"point",
")",
":",
"carriage",
"=",
"robot",
".",
"_actuators",
"[",
"mount",
"]",
"[",
"'carriage'",
"]",
"# Home both carriages, to prevent collisions and to ensure that the other",
"# mount doesn't block the one being moved (mount moves are primarily for",
"# changing pipettes, so we don't want the other pipette blocking access)",
"robot",
".",
"poses",
"=",
"carriage",
".",
"home",
"(",
"robot",
".",
"poses",
")",
"other_mount",
"=",
"'left'",
"if",
"mount",
"==",
"'right'",
"else",
"'right'",
"robot",
".",
"poses",
"=",
"robot",
".",
"_actuators",
"[",
"other_mount",
"]",
"[",
"'carriage'",
"]",
".",
"home",
"(",
"robot",
".",
"poses",
")",
"robot",
".",
"gantry",
".",
"move",
"(",
"robot",
".",
"poses",
",",
"x",
"=",
"point",
"[",
"0",
"]",
",",
"y",
"=",
"point",
"[",
"1",
"]",
")",
"robot",
".",
"poses",
"=",
"carriage",
".",
"move",
"(",
"robot",
".",
"poses",
",",
"z",
"=",
"point",
"[",
"2",
"]",
")",
"# These x and y values are hard to interpret because of some internals of",
"# pose tracker. It's mostly z that matters for this operation anyway",
"x",
",",
"y",
",",
"_",
"=",
"tuple",
"(",
"pose_tracker",
".",
"absolute",
"(",
"robot",
".",
"poses",
",",
"robot",
".",
"_actuators",
"[",
"mount",
"]",
"[",
"'carriage'",
"]",
")",
")",
"_",
",",
"_",
",",
"z",
"=",
"tuple",
"(",
"pose_tracker",
".",
"absolute",
"(",
"robot",
".",
"poses",
",",
"robot",
".",
"gantry",
")",
")",
"new_position",
"=",
"(",
"x",
",",
"y",
",",
"z",
")",
"return",
"\"Move complete. New position: {}\"",
".",
"format",
"(",
"new_position",
")"
] |
The carriage moves the mount in the Z axis, and the gantry moves in X and Y
Mount movements do not have the same protections calculated in to an
existing `move` command like Pipette does, so the safest thing is to home
the Z axis, then move in X and Y, then move down to the specified Z height
|
[
"The",
"carriage",
"moves",
"the",
"mount",
"in",
"the",
"Z",
"axis",
"and",
"the",
"gantry",
"moves",
"in",
"X",
"and",
"Y"
] |
python
|
train
|
gitpython-developers/GitPython
|
git/remote.py
|
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/remote.py#L813-L843
|
def push(self, refspec=None, progress=None, **kwargs):
"""Push changes from source branch in refspec to target branch in refspec.
:param refspec: see 'fetch' method
:param progress:
Can take one of many value types:
* None to discard progress information
* A function (callable) that is called with the progress information.
Signature: ``progress(op_code, cur_count, max_count=None, message='')``.
`Click here <http://goo.gl/NPa7st>`_ for a description of all arguments
given to the function.
* An instance of a class derived from ``git.RemoteProgress`` that
overrides the ``update()`` function.
:note: No further progress information is returned after push returns.
:param kwargs: Additional arguments to be passed to git-push
:return:
IterableList(PushInfo, ...) iterable list of PushInfo instances, each
one informing about an individual head which had been updated on the remote
side.
If the push contains rejected heads, these will have the PushInfo.ERROR bit set
in their flags.
If the operation fails completely, the length of the returned IterableList will
be null."""
kwargs = add_progress(kwargs, self.repo.git, progress)
proc = self.repo.git.push(self, refspec, porcelain=True, as_process=True,
universal_newlines=True, **kwargs)
return self._get_push_info(proc, progress)
|
[
"def",
"push",
"(",
"self",
",",
"refspec",
"=",
"None",
",",
"progress",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"add_progress",
"(",
"kwargs",
",",
"self",
".",
"repo",
".",
"git",
",",
"progress",
")",
"proc",
"=",
"self",
".",
"repo",
".",
"git",
".",
"push",
"(",
"self",
",",
"refspec",
",",
"porcelain",
"=",
"True",
",",
"as_process",
"=",
"True",
",",
"universal_newlines",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_get_push_info",
"(",
"proc",
",",
"progress",
")"
] |
Push changes from source branch in refspec to target branch in refspec.
:param refspec: see 'fetch' method
:param progress:
Can take one of many value types:
* None to discard progress information
* A function (callable) that is called with the progress information.
Signature: ``progress(op_code, cur_count, max_count=None, message='')``.
`Click here <http://goo.gl/NPa7st>`_ for a description of all arguments
given to the function.
* An instance of a class derived from ``git.RemoteProgress`` that
overrides the ``update()`` function.
:note: No further progress information is returned after push returns.
:param kwargs: Additional arguments to be passed to git-push
:return:
IterableList(PushInfo, ...) iterable list of PushInfo instances, each
one informing about an individual head which had been updated on the remote
side.
If the push contains rejected heads, these will have the PushInfo.ERROR bit set
in their flags.
If the operation fails completely, the length of the returned IterableList will
be null.
|
[
"Push",
"changes",
"from",
"source",
"branch",
"in",
"refspec",
"to",
"target",
"branch",
"in",
"refspec",
"."
] |
python
|
train
|
xapple/plumbing
|
plumbing/common.py
|
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/common.py#L472-L492
|
def prepend_to_file(path, data, bufsize=1<<15):
"""TODO:
* Add a random string to the backup file.
* Restore permissions after copy.
"""
# Backup the file #
backupname = path + os.extsep + 'bak'
# Remove previous backup if it exists #
try: os.unlink(backupname)
except OSError: pass
os.rename(path, backupname)
# Open input/output files, note: outputfile's permissions lost #
with open(backupname) as inputfile:
with open(path, 'w') as outputfile:
outputfile.write(data)
buf = inputfile.read(bufsize)
while buf:
outputfile.write(buf)
buf = inputfile.read(bufsize)
# Remove backup on success #
os.remove(backupname)
|
[
"def",
"prepend_to_file",
"(",
"path",
",",
"data",
",",
"bufsize",
"=",
"1",
"<<",
"15",
")",
":",
"# Backup the file #",
"backupname",
"=",
"path",
"+",
"os",
".",
"extsep",
"+",
"'bak'",
"# Remove previous backup if it exists #",
"try",
":",
"os",
".",
"unlink",
"(",
"backupname",
")",
"except",
"OSError",
":",
"pass",
"os",
".",
"rename",
"(",
"path",
",",
"backupname",
")",
"# Open input/output files, note: outputfile's permissions lost #",
"with",
"open",
"(",
"backupname",
")",
"as",
"inputfile",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"outputfile",
":",
"outputfile",
".",
"write",
"(",
"data",
")",
"buf",
"=",
"inputfile",
".",
"read",
"(",
"bufsize",
")",
"while",
"buf",
":",
"outputfile",
".",
"write",
"(",
"buf",
")",
"buf",
"=",
"inputfile",
".",
"read",
"(",
"bufsize",
")",
"# Remove backup on success #",
"os",
".",
"remove",
"(",
"backupname",
")"
] |
TODO:
* Add a random string to the backup file.
* Restore permissions after copy.
|
[
"TODO",
":",
"*",
"Add",
"a",
"random",
"string",
"to",
"the",
"backup",
"file",
".",
"*",
"Restore",
"permissions",
"after",
"copy",
"."
] |
python
|
train
|
inspirehep/inspire-dojson
|
inspire_dojson/hep/rules/bd1xx.py
|
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd1xx.py#L196-L274
|
def authors2marc(self, key, value):
"""Populate the ``100`` MARC field.
Also populates the ``700`` and the ``701`` MARC fields through side effects.
"""
value = force_list(value)
def _get_ids(value):
ids = {
'i': [],
'j': [],
}
if value.get('ids'):
for _id in value.get('ids'):
if _id.get('schema') == 'INSPIRE ID':
ids['i'].append(_id.get('value'))
elif _id.get('schema') == 'ORCID':
ids['j'].append('ORCID:' + _id.get('value'))
elif _id.get('schema') == 'JACOW':
ids['j'].append(_id.get('value'))
elif _id.get('schema') == 'CERN':
ids['j'].append('CCID-' + _id.get('value')[5:])
return ids
def _get_affiliations(value):
return [
aff.get('value') for aff in value.get('affiliations', [])
]
def _get_affiliations_identifiers(value):
return [
u'{}:{}'.format(aff.get('schema'), aff.get('value')) for aff in value.get('affiliations_identifiers', [])
]
def _get_inspire_roles(value):
values = force_list(value.get('inspire_roles'))
return ['ed.' for role in values if role == 'editor']
def _get_raw_affiliations(value):
return [
aff.get('value') for aff in value.get('raw_affiliations', [])
]
def get_value_100_700(value):
ids = _get_ids(value)
return {
'a': value.get('full_name'),
'e': _get_inspire_roles(value),
'q': value.get('alternative_names'),
'i': ids.get('i'),
'j': ids.get('j'),
'm': value.get('emails'),
't': _get_affiliations_identifiers(value),
'u': _get_affiliations(value),
'v': _get_raw_affiliations(value),
}
def get_value_701(value):
ids = _get_ids(value)
return {
'a': value.get('full_name'),
'q': value.get('alternative_names'),
'i': ids.get('i'),
'j': ids.get('j'),
'u': _get_affiliations(value),
'v': _get_raw_affiliations(value),
}
if len(value) > 1:
self["700"] = []
self["701"] = []
for author in value[1:]:
is_supervisor = 'supervisor' in author.get('inspire_roles', [])
if is_supervisor:
self["701"].append(get_value_701(author))
else:
self["700"].append(get_value_100_700(author))
return get_value_100_700(value[0])
|
[
"def",
"authors2marc",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"value",
"=",
"force_list",
"(",
"value",
")",
"def",
"_get_ids",
"(",
"value",
")",
":",
"ids",
"=",
"{",
"'i'",
":",
"[",
"]",
",",
"'j'",
":",
"[",
"]",
",",
"}",
"if",
"value",
".",
"get",
"(",
"'ids'",
")",
":",
"for",
"_id",
"in",
"value",
".",
"get",
"(",
"'ids'",
")",
":",
"if",
"_id",
".",
"get",
"(",
"'schema'",
")",
"==",
"'INSPIRE ID'",
":",
"ids",
"[",
"'i'",
"]",
".",
"append",
"(",
"_id",
".",
"get",
"(",
"'value'",
")",
")",
"elif",
"_id",
".",
"get",
"(",
"'schema'",
")",
"==",
"'ORCID'",
":",
"ids",
"[",
"'j'",
"]",
".",
"append",
"(",
"'ORCID:'",
"+",
"_id",
".",
"get",
"(",
"'value'",
")",
")",
"elif",
"_id",
".",
"get",
"(",
"'schema'",
")",
"==",
"'JACOW'",
":",
"ids",
"[",
"'j'",
"]",
".",
"append",
"(",
"_id",
".",
"get",
"(",
"'value'",
")",
")",
"elif",
"_id",
".",
"get",
"(",
"'schema'",
")",
"==",
"'CERN'",
":",
"ids",
"[",
"'j'",
"]",
".",
"append",
"(",
"'CCID-'",
"+",
"_id",
".",
"get",
"(",
"'value'",
")",
"[",
"5",
":",
"]",
")",
"return",
"ids",
"def",
"_get_affiliations",
"(",
"value",
")",
":",
"return",
"[",
"aff",
".",
"get",
"(",
"'value'",
")",
"for",
"aff",
"in",
"value",
".",
"get",
"(",
"'affiliations'",
",",
"[",
"]",
")",
"]",
"def",
"_get_affiliations_identifiers",
"(",
"value",
")",
":",
"return",
"[",
"u'{}:{}'",
".",
"format",
"(",
"aff",
".",
"get",
"(",
"'schema'",
")",
",",
"aff",
".",
"get",
"(",
"'value'",
")",
")",
"for",
"aff",
"in",
"value",
".",
"get",
"(",
"'affiliations_identifiers'",
",",
"[",
"]",
")",
"]",
"def",
"_get_inspire_roles",
"(",
"value",
")",
":",
"values",
"=",
"force_list",
"(",
"value",
".",
"get",
"(",
"'inspire_roles'",
")",
")",
"return",
"[",
"'ed.'",
"for",
"role",
"in",
"values",
"if",
"role",
"==",
"'editor'",
"]",
"def",
"_get_raw_affiliations",
"(",
"value",
")",
":",
"return",
"[",
"aff",
".",
"get",
"(",
"'value'",
")",
"for",
"aff",
"in",
"value",
".",
"get",
"(",
"'raw_affiliations'",
",",
"[",
"]",
")",
"]",
"def",
"get_value_100_700",
"(",
"value",
")",
":",
"ids",
"=",
"_get_ids",
"(",
"value",
")",
"return",
"{",
"'a'",
":",
"value",
".",
"get",
"(",
"'full_name'",
")",
",",
"'e'",
":",
"_get_inspire_roles",
"(",
"value",
")",
",",
"'q'",
":",
"value",
".",
"get",
"(",
"'alternative_names'",
")",
",",
"'i'",
":",
"ids",
".",
"get",
"(",
"'i'",
")",
",",
"'j'",
":",
"ids",
".",
"get",
"(",
"'j'",
")",
",",
"'m'",
":",
"value",
".",
"get",
"(",
"'emails'",
")",
",",
"'t'",
":",
"_get_affiliations_identifiers",
"(",
"value",
")",
",",
"'u'",
":",
"_get_affiliations",
"(",
"value",
")",
",",
"'v'",
":",
"_get_raw_affiliations",
"(",
"value",
")",
",",
"}",
"def",
"get_value_701",
"(",
"value",
")",
":",
"ids",
"=",
"_get_ids",
"(",
"value",
")",
"return",
"{",
"'a'",
":",
"value",
".",
"get",
"(",
"'full_name'",
")",
",",
"'q'",
":",
"value",
".",
"get",
"(",
"'alternative_names'",
")",
",",
"'i'",
":",
"ids",
".",
"get",
"(",
"'i'",
")",
",",
"'j'",
":",
"ids",
".",
"get",
"(",
"'j'",
")",
",",
"'u'",
":",
"_get_affiliations",
"(",
"value",
")",
",",
"'v'",
":",
"_get_raw_affiliations",
"(",
"value",
")",
",",
"}",
"if",
"len",
"(",
"value",
")",
">",
"1",
":",
"self",
"[",
"\"700\"",
"]",
"=",
"[",
"]",
"self",
"[",
"\"701\"",
"]",
"=",
"[",
"]",
"for",
"author",
"in",
"value",
"[",
"1",
":",
"]",
":",
"is_supervisor",
"=",
"'supervisor'",
"in",
"author",
".",
"get",
"(",
"'inspire_roles'",
",",
"[",
"]",
")",
"if",
"is_supervisor",
":",
"self",
"[",
"\"701\"",
"]",
".",
"append",
"(",
"get_value_701",
"(",
"author",
")",
")",
"else",
":",
"self",
"[",
"\"700\"",
"]",
".",
"append",
"(",
"get_value_100_700",
"(",
"author",
")",
")",
"return",
"get_value_100_700",
"(",
"value",
"[",
"0",
"]",
")"
] |
Populate the ``100`` MARC field.
Also populates the ``700`` and the ``701`` MARC fields through side effects.
|
[
"Populate",
"the",
"100",
"MARC",
"field",
"."
] |
python
|
train
|
mikicz/arca
|
arca/_arca.py
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L334-L344
|
def get_repo(self, repo: str, branch: str, *,
depth: Optional[int]=1,
reference: Optional[Path]=None
) -> Repo:
""" Returns a :class:`Repo <git.repo.base.Repo>` instance for the branch.
See :meth:`run` for arguments descriptions.
"""
git_repo, _ = self.get_files(repo, branch, depth=depth, reference=reference)
return git_repo
|
[
"def",
"get_repo",
"(",
"self",
",",
"repo",
":",
"str",
",",
"branch",
":",
"str",
",",
"*",
",",
"depth",
":",
"Optional",
"[",
"int",
"]",
"=",
"1",
",",
"reference",
":",
"Optional",
"[",
"Path",
"]",
"=",
"None",
")",
"->",
"Repo",
":",
"git_repo",
",",
"_",
"=",
"self",
".",
"get_files",
"(",
"repo",
",",
"branch",
",",
"depth",
"=",
"depth",
",",
"reference",
"=",
"reference",
")",
"return",
"git_repo"
] |
Returns a :class:`Repo <git.repo.base.Repo>` instance for the branch.
See :meth:`run` for arguments descriptions.
|
[
"Returns",
"a",
":",
"class",
":",
"Repo",
"<git",
".",
"repo",
".",
"base",
".",
"Repo",
">",
"instance",
"for",
"the",
"branch",
"."
] |
python
|
train
|
martinkosir/neverbounce-python
|
neverbounce/client.py
|
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L125-L148
|
def _handle_response(response):
"""
Handle the response and possible failures.
:param Response response: Response data.
:return: A dictionary or a string with response data.
:raises: NeverBounceAPIError if the API call fails.
"""
if not response.ok:
raise NeverBounceAPIError(response)
if response.headers.get('Content-Type') == 'application/octet-stream':
return response.iter_lines()
try:
resp = response.json()
except ValueError:
raise InvalidResponseError('Failed to handle the response content-type {}.'.format(
response.headers.get('Content-Type'))
)
if 'success' in resp and not resp['success']:
if 'msg' in resp and resp['msg'] == 'Authentication failed':
raise AccessTokenExpired
else:
raise NeverBounceAPIError(response)
return resp
|
[
"def",
"_handle_response",
"(",
"response",
")",
":",
"if",
"not",
"response",
".",
"ok",
":",
"raise",
"NeverBounceAPIError",
"(",
"response",
")",
"if",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")",
"==",
"'application/octet-stream'",
":",
"return",
"response",
".",
"iter_lines",
"(",
")",
"try",
":",
"resp",
"=",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"raise",
"InvalidResponseError",
"(",
"'Failed to handle the response content-type {}.'",
".",
"format",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")",
")",
")",
"if",
"'success'",
"in",
"resp",
"and",
"not",
"resp",
"[",
"'success'",
"]",
":",
"if",
"'msg'",
"in",
"resp",
"and",
"resp",
"[",
"'msg'",
"]",
"==",
"'Authentication failed'",
":",
"raise",
"AccessTokenExpired",
"else",
":",
"raise",
"NeverBounceAPIError",
"(",
"response",
")",
"return",
"resp"
] |
Handle the response and possible failures.
:param Response response: Response data.
:return: A dictionary or a string with response data.
:raises: NeverBounceAPIError if the API call fails.
|
[
"Handle",
"the",
"response",
"and",
"possible",
"failures",
".",
":",
"param",
"Response",
"response",
":",
"Response",
"data",
".",
":",
"return",
":",
"A",
"dictionary",
"or",
"a",
"string",
"with",
"response",
"data",
".",
":",
"raises",
":",
"NeverBounceAPIError",
"if",
"the",
"API",
"call",
"fails",
"."
] |
python
|
train
|
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_0/feature_management/feature_management_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/feature_management/feature_management_client.py#L43-L56
|
def get_features(self, target_contribution_id=None):
"""GetFeatures.
[Preview API] Get a list of all defined features
:param str target_contribution_id: Optional target contribution. If null/empty, return all features. If specified include the features that target the specified contribution.
:rtype: [ContributedFeature]
"""
query_parameters = {}
if target_contribution_id is not None:
query_parameters['targetContributionId'] = self._serialize.query('target_contribution_id', target_contribution_id, 'str')
response = self._send(http_method='GET',
location_id='c4209f25-7a27-41dd-9f04-06080c7b6afd',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[ContributedFeature]', self._unwrap_collection(response))
|
[
"def",
"get_features",
"(",
"self",
",",
"target_contribution_id",
"=",
"None",
")",
":",
"query_parameters",
"=",
"{",
"}",
"if",
"target_contribution_id",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'targetContributionId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'target_contribution_id'",
",",
"target_contribution_id",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'c4209f25-7a27-41dd-9f04-06080c7b6afd'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[ContributedFeature]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] |
GetFeatures.
[Preview API] Get a list of all defined features
:param str target_contribution_id: Optional target contribution. If null/empty, return all features. If specified include the features that target the specified contribution.
:rtype: [ContributedFeature]
|
[
"GetFeatures",
".",
"[",
"Preview",
"API",
"]",
"Get",
"a",
"list",
"of",
"all",
"defined",
"features",
":",
"param",
"str",
"target_contribution_id",
":",
"Optional",
"target",
"contribution",
".",
"If",
"null",
"/",
"empty",
"return",
"all",
"features",
".",
"If",
"specified",
"include",
"the",
"features",
"that",
"target",
"the",
"specified",
"contribution",
".",
":",
"rtype",
":",
"[",
"ContributedFeature",
"]"
] |
python
|
train
|
dj-stripe/dj-stripe
|
djstripe/checks.py
|
https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/checks.py#L126-L144
|
def check_webhook_secret(app_configs=None, **kwargs):
"""
Check that DJSTRIPE_WEBHOOK_SECRET looks correct
"""
from . import settings as djstripe_settings
messages = []
secret = djstripe_settings.WEBHOOK_SECRET
if secret and not secret.startswith("whsec_"):
messages.append(
checks.Warning(
"DJSTRIPE_WEBHOOK_SECRET does not look valid",
hint="It should start with whsec_...",
id="djstripe.W003",
)
)
return messages
|
[
"def",
"check_webhook_secret",
"(",
"app_configs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"import",
"settings",
"as",
"djstripe_settings",
"messages",
"=",
"[",
"]",
"secret",
"=",
"djstripe_settings",
".",
"WEBHOOK_SECRET",
"if",
"secret",
"and",
"not",
"secret",
".",
"startswith",
"(",
"\"whsec_\"",
")",
":",
"messages",
".",
"append",
"(",
"checks",
".",
"Warning",
"(",
"\"DJSTRIPE_WEBHOOK_SECRET does not look valid\"",
",",
"hint",
"=",
"\"It should start with whsec_...\"",
",",
"id",
"=",
"\"djstripe.W003\"",
",",
")",
")",
"return",
"messages"
] |
Check that DJSTRIPE_WEBHOOK_SECRET looks correct
|
[
"Check",
"that",
"DJSTRIPE_WEBHOOK_SECRET",
"looks",
"correct"
] |
python
|
train
|
theolind/pymysensors
|
mysensors/sensor.py
|
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/sensor.py#L157-L165
|
def get_schema(self, protocol_version):
"""Return the child schema for the correct const version."""
const = get_const(protocol_version)
custom_schema = vol.Schema({
typ.value: const.VALID_SETREQ[typ]
for typ in const.VALID_TYPES[const.Presentation.S_CUSTOM]})
return custom_schema.extend({
typ.value: const.VALID_SETREQ[typ]
for typ in const.VALID_TYPES[self.type]})
|
[
"def",
"get_schema",
"(",
"self",
",",
"protocol_version",
")",
":",
"const",
"=",
"get_const",
"(",
"protocol_version",
")",
"custom_schema",
"=",
"vol",
".",
"Schema",
"(",
"{",
"typ",
".",
"value",
":",
"const",
".",
"VALID_SETREQ",
"[",
"typ",
"]",
"for",
"typ",
"in",
"const",
".",
"VALID_TYPES",
"[",
"const",
".",
"Presentation",
".",
"S_CUSTOM",
"]",
"}",
")",
"return",
"custom_schema",
".",
"extend",
"(",
"{",
"typ",
".",
"value",
":",
"const",
".",
"VALID_SETREQ",
"[",
"typ",
"]",
"for",
"typ",
"in",
"const",
".",
"VALID_TYPES",
"[",
"self",
".",
"type",
"]",
"}",
")"
] |
Return the child schema for the correct const version.
|
[
"Return",
"the",
"child",
"schema",
"for",
"the",
"correct",
"const",
"version",
"."
] |
python
|
train
|
xiaocong/uiautomator
|
uiautomator/__init__.py
|
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L912-L929
|
def click(self):
'''
click on the ui object.
Usage:
d(text="Clock").click() # click on the center of the ui object
d(text="OK").click.wait(timeout=3000) # click and wait for the new window update
d(text="John").click.topleft() # click on the topleft of the ui object
d(text="John").click.bottomright() # click on the bottomright of the ui object
'''
@param_to_property(action=["tl", "topleft", "br", "bottomright", "wait"])
def _click(action=None, timeout=3000):
if action is None:
return self.jsonrpc.click(self.selector)
elif action in ["tl", "topleft", "br", "bottomright"]:
return self.jsonrpc.click(self.selector, action)
else:
return self.jsonrpc.clickAndWaitForNewWindow(self.selector, timeout)
return _click
|
[
"def",
"click",
"(",
"self",
")",
":",
"@",
"param_to_property",
"(",
"action",
"=",
"[",
"\"tl\"",
",",
"\"topleft\"",
",",
"\"br\"",
",",
"\"bottomright\"",
",",
"\"wait\"",
"]",
")",
"def",
"_click",
"(",
"action",
"=",
"None",
",",
"timeout",
"=",
"3000",
")",
":",
"if",
"action",
"is",
"None",
":",
"return",
"self",
".",
"jsonrpc",
".",
"click",
"(",
"self",
".",
"selector",
")",
"elif",
"action",
"in",
"[",
"\"tl\"",
",",
"\"topleft\"",
",",
"\"br\"",
",",
"\"bottomright\"",
"]",
":",
"return",
"self",
".",
"jsonrpc",
".",
"click",
"(",
"self",
".",
"selector",
",",
"action",
")",
"else",
":",
"return",
"self",
".",
"jsonrpc",
".",
"clickAndWaitForNewWindow",
"(",
"self",
".",
"selector",
",",
"timeout",
")",
"return",
"_click"
] |
click on the ui object.
Usage:
d(text="Clock").click() # click on the center of the ui object
d(text="OK").click.wait(timeout=3000) # click and wait for the new window update
d(text="John").click.topleft() # click on the topleft of the ui object
d(text="John").click.bottomright() # click on the bottomright of the ui object
|
[
"click",
"on",
"the",
"ui",
"object",
".",
"Usage",
":",
"d",
"(",
"text",
"=",
"Clock",
")",
".",
"click",
"()",
"#",
"click",
"on",
"the",
"center",
"of",
"the",
"ui",
"object",
"d",
"(",
"text",
"=",
"OK",
")",
".",
"click",
".",
"wait",
"(",
"timeout",
"=",
"3000",
")",
"#",
"click",
"and",
"wait",
"for",
"the",
"new",
"window",
"update",
"d",
"(",
"text",
"=",
"John",
")",
".",
"click",
".",
"topleft",
"()",
"#",
"click",
"on",
"the",
"topleft",
"of",
"the",
"ui",
"object",
"d",
"(",
"text",
"=",
"John",
")",
".",
"click",
".",
"bottomright",
"()",
"#",
"click",
"on",
"the",
"bottomright",
"of",
"the",
"ui",
"object"
] |
python
|
train
|
dpkp/kafka-python
|
kafka/coordinator/base.py
|
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/coordinator/base.py#L650-L671
|
def _send_group_coordinator_request(self):
"""Discover the current coordinator for the group.
Returns:
Future: resolves to the node id of the coordinator
"""
node_id = self._client.least_loaded_node()
if node_id is None:
return Future().failure(Errors.NoBrokersAvailable())
elif not self._client.ready(node_id, metadata_priority=False):
e = Errors.NodeNotReadyError(node_id)
return Future().failure(e)
log.debug("Sending group coordinator request for group %s to broker %s",
self.group_id, node_id)
request = GroupCoordinatorRequest[0](self.group_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_group_coordinator_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
|
[
"def",
"_send_group_coordinator_request",
"(",
"self",
")",
":",
"node_id",
"=",
"self",
".",
"_client",
".",
"least_loaded_node",
"(",
")",
"if",
"node_id",
"is",
"None",
":",
"return",
"Future",
"(",
")",
".",
"failure",
"(",
"Errors",
".",
"NoBrokersAvailable",
"(",
")",
")",
"elif",
"not",
"self",
".",
"_client",
".",
"ready",
"(",
"node_id",
",",
"metadata_priority",
"=",
"False",
")",
":",
"e",
"=",
"Errors",
".",
"NodeNotReadyError",
"(",
"node_id",
")",
"return",
"Future",
"(",
")",
".",
"failure",
"(",
"e",
")",
"log",
".",
"debug",
"(",
"\"Sending group coordinator request for group %s to broker %s\"",
",",
"self",
".",
"group_id",
",",
"node_id",
")",
"request",
"=",
"GroupCoordinatorRequest",
"[",
"0",
"]",
"(",
"self",
".",
"group_id",
")",
"future",
"=",
"Future",
"(",
")",
"_f",
"=",
"self",
".",
"_client",
".",
"send",
"(",
"node_id",
",",
"request",
")",
"_f",
".",
"add_callback",
"(",
"self",
".",
"_handle_group_coordinator_response",
",",
"future",
")",
"_f",
".",
"add_errback",
"(",
"self",
".",
"_failed_request",
",",
"node_id",
",",
"request",
",",
"future",
")",
"return",
"future"
] |
Discover the current coordinator for the group.
Returns:
Future: resolves to the node id of the coordinator
|
[
"Discover",
"the",
"current",
"coordinator",
"for",
"the",
"group",
"."
] |
python
|
train
|
glomex/gcdt
|
gcdt/iam.py
|
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/iam.py#L138-L188
|
def build_role(self, name, policies=False):
"""
Generate role for IAM cloudformation template
:param name: Name of role
:param policies: List of policies to attach to this role (False = none)
:return: Ref to new role
"""
# Build role template
if policies:
role = self.__template.add_resource(
Role(
self.name_strip(name),
AssumeRolePolicyDocument=Policy(
Version=self.VERSION_IAM,
Statement=[
Statement(
Effect=Allow,
Principal=Principal(
"Service", self.__role_principals
),
Action=[AssumeRole],
)
]
),
Path=self.__role_path,
ManagedPolicyArns=policies,
))
# Add role to list for default policy
self.__roles_list.append(troposphere.Ref(role))
else:
role = self.__template.add_resource(
Role(
self.name_strip(name),
AssumeRolePolicyDocument=Policy(
Version=self.VERSION_IAM,
Statement=[
Statement(
Effect=Allow,
Principal=Principal(
"Service", self.__role_principals
),
Action=[AssumeRole],
)
]
),
Path=self.__role_path,
))
# Add role to list for default policy
self.__roles_list.append(troposphere.Ref(role))
return role
|
[
"def",
"build_role",
"(",
"self",
",",
"name",
",",
"policies",
"=",
"False",
")",
":",
"# Build role template",
"if",
"policies",
":",
"role",
"=",
"self",
".",
"__template",
".",
"add_resource",
"(",
"Role",
"(",
"self",
".",
"name_strip",
"(",
"name",
")",
",",
"AssumeRolePolicyDocument",
"=",
"Policy",
"(",
"Version",
"=",
"self",
".",
"VERSION_IAM",
",",
"Statement",
"=",
"[",
"Statement",
"(",
"Effect",
"=",
"Allow",
",",
"Principal",
"=",
"Principal",
"(",
"\"Service\"",
",",
"self",
".",
"__role_principals",
")",
",",
"Action",
"=",
"[",
"AssumeRole",
"]",
",",
")",
"]",
")",
",",
"Path",
"=",
"self",
".",
"__role_path",
",",
"ManagedPolicyArns",
"=",
"policies",
",",
")",
")",
"# Add role to list for default policy",
"self",
".",
"__roles_list",
".",
"append",
"(",
"troposphere",
".",
"Ref",
"(",
"role",
")",
")",
"else",
":",
"role",
"=",
"self",
".",
"__template",
".",
"add_resource",
"(",
"Role",
"(",
"self",
".",
"name_strip",
"(",
"name",
")",
",",
"AssumeRolePolicyDocument",
"=",
"Policy",
"(",
"Version",
"=",
"self",
".",
"VERSION_IAM",
",",
"Statement",
"=",
"[",
"Statement",
"(",
"Effect",
"=",
"Allow",
",",
"Principal",
"=",
"Principal",
"(",
"\"Service\"",
",",
"self",
".",
"__role_principals",
")",
",",
"Action",
"=",
"[",
"AssumeRole",
"]",
",",
")",
"]",
")",
",",
"Path",
"=",
"self",
".",
"__role_path",
",",
")",
")",
"# Add role to list for default policy",
"self",
".",
"__roles_list",
".",
"append",
"(",
"troposphere",
".",
"Ref",
"(",
"role",
")",
")",
"return",
"role"
] |
Generate role for IAM cloudformation template
:param name: Name of role
:param policies: List of policies to attach to this role (False = none)
:return: Ref to new role
|
[
"Generate",
"role",
"for",
"IAM",
"cloudformation",
"template",
":",
"param",
"name",
":",
"Name",
"of",
"role",
":",
"param",
"policies",
":",
"List",
"of",
"policies",
"to",
"attach",
"to",
"this",
"role",
"(",
"False",
"=",
"none",
")",
":",
"return",
":",
"Ref",
"to",
"new",
"role"
] |
python
|
train
|
PokeAPI/pokebase
|
pokebase/common.py
|
https://github.com/PokeAPI/pokebase/blob/e7d695662037811f3956ed4ea817ffee70b12e33/pokebase/common.py#L72-L80
|
def sprite_filepath_build(sprite_type, sprite_id, **kwargs):
"""returns the filepath of the sprite *relative to SPRITE_CACHE*"""
options = parse_sprite_options(sprite_type, **kwargs)
filename = '.'.join([str(sprite_id), SPRITE_EXT])
filepath = os.path.join(sprite_type, *options, filename)
return filepath
|
[
"def",
"sprite_filepath_build",
"(",
"sprite_type",
",",
"sprite_id",
",",
"*",
"*",
"kwargs",
")",
":",
"options",
"=",
"parse_sprite_options",
"(",
"sprite_type",
",",
"*",
"*",
"kwargs",
")",
"filename",
"=",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"sprite_id",
")",
",",
"SPRITE_EXT",
"]",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sprite_type",
",",
"*",
"options",
",",
"filename",
")",
"return",
"filepath"
] |
returns the filepath of the sprite *relative to SPRITE_CACHE*
|
[
"returns",
"the",
"filepath",
"of",
"the",
"sprite",
"*",
"relative",
"to",
"SPRITE_CACHE",
"*"
] |
python
|
train
|
mazulo/pynews_cli
|
pynews/utils.py
|
https://github.com/mazulo/pynews_cli/blob/88a6d70e18b9d8ea87ab7321a3d7ee78c4fa5ffb/pynews/utils.py#L40-L53
|
def get_story(new):
"""Return a story of the given ID."""
url = URLS['item'].format(new)
try:
data = req.get(url)
except req.ConnectionError:
raise
except req.Timeout:
raise req.Timeout('A timeout problem occurred.')
except req.TooManyRedirects:
raise req.TooManyRedirects('The request exceeds the configured number\
of maximum redirections.')
else:
return data.json()
|
[
"def",
"get_story",
"(",
"new",
")",
":",
"url",
"=",
"URLS",
"[",
"'item'",
"]",
".",
"format",
"(",
"new",
")",
"try",
":",
"data",
"=",
"req",
".",
"get",
"(",
"url",
")",
"except",
"req",
".",
"ConnectionError",
":",
"raise",
"except",
"req",
".",
"Timeout",
":",
"raise",
"req",
".",
"Timeout",
"(",
"'A timeout problem occurred.'",
")",
"except",
"req",
".",
"TooManyRedirects",
":",
"raise",
"req",
".",
"TooManyRedirects",
"(",
"'The request exceeds the configured number\\\n of maximum redirections.'",
")",
"else",
":",
"return",
"data",
".",
"json",
"(",
")"
] |
Return a story of the given ID.
|
[
"Return",
"a",
"story",
"of",
"the",
"given",
"ID",
"."
] |
python
|
train
|
aouyar/PyMunin
|
pymunin/__init__.py
|
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L171-L184
|
def _getGraph(self, graph_name, fail_noexist=False):
"""Private method for returning graph object with name graph_name.
@param graph_name: Graph Name
@param fail_noexist: If true throw exception if there is no graph with
name graph_name.
@return: Graph Object or None
"""
graph = self._graphDict.get(graph_name)
if fail_noexist and graph is None:
raise AttributeError("Invalid graph name: %s" % graph_name)
else:
return graph
|
[
"def",
"_getGraph",
"(",
"self",
",",
"graph_name",
",",
"fail_noexist",
"=",
"False",
")",
":",
"graph",
"=",
"self",
".",
"_graphDict",
".",
"get",
"(",
"graph_name",
")",
"if",
"fail_noexist",
"and",
"graph",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"Invalid graph name: %s\"",
"%",
"graph_name",
")",
"else",
":",
"return",
"graph"
] |
Private method for returning graph object with name graph_name.
@param graph_name: Graph Name
@param fail_noexist: If true throw exception if there is no graph with
name graph_name.
@return: Graph Object or None
|
[
"Private",
"method",
"for",
"returning",
"graph",
"object",
"with",
"name",
"graph_name",
"."
] |
python
|
train
|
AtteqCom/zsl
|
src/zsl/task/task_decorator.py
|
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/task/task_decorator.py#L342-L356
|
def xml_output(f):
"""
Set content-type for response to WEB-REQUEST to 'text/xml'
"""
@wraps(f)
def xml_output_inner_fn(*args, **kwargs):
ret_val = f(*args, **kwargs)
if isinstance(JobContext.get_current_context(), WebJobContext):
JobContext.get_current_context().add_responder(
MimeSetterWebTaskResponder('text/xml'))
return ret_val
return xml_output_inner_fn
|
[
"def",
"xml_output",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"xml_output_inner_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ret_val",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"JobContext",
".",
"get_current_context",
"(",
")",
",",
"WebJobContext",
")",
":",
"JobContext",
".",
"get_current_context",
"(",
")",
".",
"add_responder",
"(",
"MimeSetterWebTaskResponder",
"(",
"'text/xml'",
")",
")",
"return",
"ret_val",
"return",
"xml_output_inner_fn"
] |
Set content-type for response to WEB-REQUEST to 'text/xml'
|
[
"Set",
"content",
"-",
"type",
"for",
"response",
"to",
"WEB",
"-",
"REQUEST",
"to",
"text",
"/",
"xml"
] |
python
|
train
|
kisom/pypcapfile
|
pcapfile/protocols/linklayer/wifi.py
|
https://github.com/kisom/pypcapfile/blob/67520cfbb6c2e9ab3e7c181a8012ddc56ec5cad8/pcapfile/protocols/linklayer/wifi.py#L1569-L1581
|
def strip_ccmp(self, idx):
"""strip(8 byte) wlan.ccmp.extiv
CCMP Extended Initialization Vector
:return: int
number of processed bytes
:return: ctypes.raw
ccmp vector
"""
ccmp_extiv = None
if len(self._packet[idx:]) >= 8:
raw_bytes = self._packet[idx:idx + 8]
ccmp_extiv, = struct.unpack_from('Q', raw_bytes, 0)
return 8, ccmp_extiv
|
[
"def",
"strip_ccmp",
"(",
"self",
",",
"idx",
")",
":",
"ccmp_extiv",
"=",
"None",
"if",
"len",
"(",
"self",
".",
"_packet",
"[",
"idx",
":",
"]",
")",
">=",
"8",
":",
"raw_bytes",
"=",
"self",
".",
"_packet",
"[",
"idx",
":",
"idx",
"+",
"8",
"]",
"ccmp_extiv",
",",
"=",
"struct",
".",
"unpack_from",
"(",
"'Q'",
",",
"raw_bytes",
",",
"0",
")",
"return",
"8",
",",
"ccmp_extiv"
] |
strip(8 byte) wlan.ccmp.extiv
CCMP Extended Initialization Vector
:return: int
number of processed bytes
:return: ctypes.raw
ccmp vector
|
[
"strip",
"(",
"8",
"byte",
")",
"wlan",
".",
"ccmp",
".",
"extiv",
"CCMP",
"Extended",
"Initialization",
"Vector",
":",
"return",
":",
"int",
"number",
"of",
"processed",
"bytes",
":",
"return",
":",
"ctypes",
".",
"raw",
"ccmp",
"vector"
] |
python
|
valid
|
spyder-ide/spyder
|
spyder/plugins/editor/lsp/transport/main.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/lsp/transport/main.py#L62-L76
|
def logger_init(level):
"""
Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`.
"""
levellist = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
handler = logging.StreamHandler()
fmt = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
handler.setFormatter(logging.Formatter(fmt))
logger = logging.root
logger.addHandler(handler)
logger.setLevel(levellist[level])
|
[
"def",
"logger_init",
"(",
"level",
")",
":",
"levellist",
"=",
"[",
"logging",
".",
"ERROR",
",",
"logging",
".",
"WARNING",
",",
"logging",
".",
"INFO",
",",
"logging",
".",
"DEBUG",
"]",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"fmt",
"=",
"(",
"'%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '",
"'-35s %(lineno) -5d: %(message)s'",
")",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"fmt",
")",
")",
"logger",
"=",
"logging",
".",
"root",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"setLevel",
"(",
"levellist",
"[",
"level",
"]",
")"
] |
Initialize the logger for this thread.
Sets the log level to ERROR (0), WARNING (1), INFO (2), or DEBUG (3),
depending on the argument `level`.
|
[
"Initialize",
"the",
"logger",
"for",
"this",
"thread",
"."
] |
python
|
train
|
bapakode/OmMongo
|
ommongo/session.py
|
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/session.py#L259-L306
|
def execute_query(self, query, session):
''' Get the results of ``query``. This method does flush in a
transaction, so any objects retrieved which are not in the cache
which would be updated when the transaction finishes will be
stale '''
self.auto_ensure_indexes(query.type)
kwargs = dict()
if query._get_fields():
if PYMONGO_3: # pragma: nocover
kwargs['projection'] = query._fields_expression()
else: # pragma: nocover
kwargs['fields'] = query._fields_expression()
collection = self.db[query.type.get_collection_name()]
if query._search:
index_fields = query._createIndex
if index_fields:
# create new index
if type(index_fields) is list:
index_list = []
for field in index_fields:
index_list.append ((field, pymongo.TEXT))
collection.create_index(index_list, name='search_index', default_language='english')
else:
raise InvalidConfigException()
cursor = collection.find(query.query, {'__index_score': {'$meta': "textScore"}}, **kwargs)
cursor.sort([('__index_score', {'$meta': 'textScore'})])
elif query._rawquery:
if query._query_type=='aggregate':
cursor = collection.aggregate(query.query, cursor={}, **kwargs)
elif query._query_type=='map_reduce':
cursor = collection.map_reduce( query._mapreduce_mapper, query._mapreduce_reducer, query._mapreduce_key, query=query._mapreduce_query)
else:
cursor = collection.find(query.query, **kwargs)
if query._sort:
cursor.sort(query._sort)
elif query.type.config_default_sort:
cursor.sort(query.type.config_default_sort)
if query.hints:
cursor.hint(query.hints)
if query._get_limit() is not None:
cursor.limit(query._get_limit())
if query._get_skip() is not None:
cursor.skip(query._get_skip())
return QueryResult(session, cursor, query.type, raw_output=query._raw_output, fields=query._get_fields())
|
[
"def",
"execute_query",
"(",
"self",
",",
"query",
",",
"session",
")",
":",
"self",
".",
"auto_ensure_indexes",
"(",
"query",
".",
"type",
")",
"kwargs",
"=",
"dict",
"(",
")",
"if",
"query",
".",
"_get_fields",
"(",
")",
":",
"if",
"PYMONGO_3",
":",
"# pragma: nocover",
"kwargs",
"[",
"'projection'",
"]",
"=",
"query",
".",
"_fields_expression",
"(",
")",
"else",
":",
"# pragma: nocover",
"kwargs",
"[",
"'fields'",
"]",
"=",
"query",
".",
"_fields_expression",
"(",
")",
"collection",
"=",
"self",
".",
"db",
"[",
"query",
".",
"type",
".",
"get_collection_name",
"(",
")",
"]",
"if",
"query",
".",
"_search",
":",
"index_fields",
"=",
"query",
".",
"_createIndex",
"if",
"index_fields",
":",
"# create new index",
"if",
"type",
"(",
"index_fields",
")",
"is",
"list",
":",
"index_list",
"=",
"[",
"]",
"for",
"field",
"in",
"index_fields",
":",
"index_list",
".",
"append",
"(",
"(",
"field",
",",
"pymongo",
".",
"TEXT",
")",
")",
"collection",
".",
"create_index",
"(",
"index_list",
",",
"name",
"=",
"'search_index'",
",",
"default_language",
"=",
"'english'",
")",
"else",
":",
"raise",
"InvalidConfigException",
"(",
")",
"cursor",
"=",
"collection",
".",
"find",
"(",
"query",
".",
"query",
",",
"{",
"'__index_score'",
":",
"{",
"'$meta'",
":",
"\"textScore\"",
"}",
"}",
",",
"*",
"*",
"kwargs",
")",
"cursor",
".",
"sort",
"(",
"[",
"(",
"'__index_score'",
",",
"{",
"'$meta'",
":",
"'textScore'",
"}",
")",
"]",
")",
"elif",
"query",
".",
"_rawquery",
":",
"if",
"query",
".",
"_query_type",
"==",
"'aggregate'",
":",
"cursor",
"=",
"collection",
".",
"aggregate",
"(",
"query",
".",
"query",
",",
"cursor",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
"elif",
"query",
".",
"_query_type",
"==",
"'map_reduce'",
":",
"cursor",
"=",
"collection",
".",
"map_reduce",
"(",
"query",
".",
"_mapreduce_mapper",
",",
"query",
".",
"_mapreduce_reducer",
",",
"query",
".",
"_mapreduce_key",
",",
"query",
"=",
"query",
".",
"_mapreduce_query",
")",
"else",
":",
"cursor",
"=",
"collection",
".",
"find",
"(",
"query",
".",
"query",
",",
"*",
"*",
"kwargs",
")",
"if",
"query",
".",
"_sort",
":",
"cursor",
".",
"sort",
"(",
"query",
".",
"_sort",
")",
"elif",
"query",
".",
"type",
".",
"config_default_sort",
":",
"cursor",
".",
"sort",
"(",
"query",
".",
"type",
".",
"config_default_sort",
")",
"if",
"query",
".",
"hints",
":",
"cursor",
".",
"hint",
"(",
"query",
".",
"hints",
")",
"if",
"query",
".",
"_get_limit",
"(",
")",
"is",
"not",
"None",
":",
"cursor",
".",
"limit",
"(",
"query",
".",
"_get_limit",
"(",
")",
")",
"if",
"query",
".",
"_get_skip",
"(",
")",
"is",
"not",
"None",
":",
"cursor",
".",
"skip",
"(",
"query",
".",
"_get_skip",
"(",
")",
")",
"return",
"QueryResult",
"(",
"session",
",",
"cursor",
",",
"query",
".",
"type",
",",
"raw_output",
"=",
"query",
".",
"_raw_output",
",",
"fields",
"=",
"query",
".",
"_get_fields",
"(",
")",
")"
] |
Get the results of ``query``. This method does flush in a
transaction, so any objects retrieved which are not in the cache
which would be updated when the transaction finishes will be
stale
|
[
"Get",
"the",
"results",
"of",
"query",
".",
"This",
"method",
"does",
"flush",
"in",
"a",
"transaction",
"so",
"any",
"objects",
"retrieved",
"which",
"are",
"not",
"in",
"the",
"cache",
"which",
"would",
"be",
"updated",
"when",
"the",
"transaction",
"finishes",
"will",
"be",
"stale"
] |
python
|
train
|
luckydonald/pytgbot
|
code_generation/output/teleflask_messages.py
|
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/teleflask_messages.py#L14-L32
|
def _apply_update_receiver(self, receiver, reply_id):
"""
Updates `self.receiver` and/or `self.reply_id` if they still contain the default value.
:param receiver: The receiver `chat_id` to use.
Either `self.receiver`, if set, e.g. when instancing `TextMessage(receiver=10001231231, ...)`,
or the `chat.id` of the update context, being the id of groups or the user's `from_peer.id` in private messages.
:type receiver: None | str|unicode | int
:param reply_id: Reply to that `message_id` in the chat we send to.
Either `self.reply_id`, if set, e.g. when instancing `TextMessage(reply_id=123123, ...)`,
or the `message_id` of the update which triggered the bot's functions.
:type reply_id: DEFAULT_MESSAGE_ID | int
"""
if self.receiver is None:
self.receiver = receiver
# end if
if self.reply_id is DEFAULT_MESSAGE_ID:
self.reply_id = reply_id
|
[
"def",
"_apply_update_receiver",
"(",
"self",
",",
"receiver",
",",
"reply_id",
")",
":",
"if",
"self",
".",
"receiver",
"is",
"None",
":",
"self",
".",
"receiver",
"=",
"receiver",
"# end if",
"if",
"self",
".",
"reply_id",
"is",
"DEFAULT_MESSAGE_ID",
":",
"self",
".",
"reply_id",
"=",
"reply_id"
] |
Updates `self.receiver` and/or `self.reply_id` if they still contain the default value.
:param receiver: The receiver `chat_id` to use.
Either `self.receiver`, if set, e.g. when instancing `TextMessage(receiver=10001231231, ...)`,
or the `chat.id` of the update context, being the id of groups or the user's `from_peer.id` in private messages.
:type receiver: None | str|unicode | int
:param reply_id: Reply to that `message_id` in the chat we send to.
Either `self.reply_id`, if set, e.g. when instancing `TextMessage(reply_id=123123, ...)`,
or the `message_id` of the update which triggered the bot's functions.
:type reply_id: DEFAULT_MESSAGE_ID | int
|
[
"Updates",
"self",
".",
"receiver",
"and",
"/",
"or",
"self",
".",
"reply_id",
"if",
"they",
"still",
"contain",
"the",
"default",
"value",
".",
":",
"param",
"receiver",
":",
"The",
"receiver",
"chat_id",
"to",
"use",
".",
"Either",
"self",
".",
"receiver",
"if",
"set",
"e",
".",
"g",
".",
"when",
"instancing",
"TextMessage",
"(",
"receiver",
"=",
"10001231231",
"...",
")",
"or",
"the",
"chat",
".",
"id",
"of",
"the",
"update",
"context",
"being",
"the",
"id",
"of",
"groups",
"or",
"the",
"user",
"s",
"from_peer",
".",
"id",
"in",
"private",
"messages",
".",
":",
"type",
"receiver",
":",
"None",
"|",
"str|unicode",
"|",
"int"
] |
python
|
train
|
note35/sinon
|
sinon/lib/matcher.py
|
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/matcher.py#L46-L61
|
def __value_compare(self, target):
"""
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
"""
if self.expectation == "__ANY__":
return True
elif self.expectation == "__DEFINED__":
return True if target is not None else False
elif self.expectation == "__TYPE__":
return True if type(target) == self.target_type else False #pylint:disable=unidiomatic-typecheck
elif self.expectation == "__INSTANCE__":
return True if isinstance(target, self.target_type.__class__) else False
else:
return True if target == self.expectation else False
|
[
"def",
"__value_compare",
"(",
"self",
",",
"target",
")",
":",
"if",
"self",
".",
"expectation",
"==",
"\"__ANY__\"",
":",
"return",
"True",
"elif",
"self",
".",
"expectation",
"==",
"\"__DEFINED__\"",
":",
"return",
"True",
"if",
"target",
"is",
"not",
"None",
"else",
"False",
"elif",
"self",
".",
"expectation",
"==",
"\"__TYPE__\"",
":",
"return",
"True",
"if",
"type",
"(",
"target",
")",
"==",
"self",
".",
"target_type",
"else",
"False",
"#pylint:disable=unidiomatic-typecheck",
"elif",
"self",
".",
"expectation",
"==",
"\"__INSTANCE__\"",
":",
"return",
"True",
"if",
"isinstance",
"(",
"target",
",",
"self",
".",
"target_type",
".",
"__class__",
")",
"else",
"False",
"else",
":",
"return",
"True",
"if",
"target",
"==",
"self",
".",
"expectation",
"else",
"False"
] |
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
|
[
"Comparing",
"result",
"based",
"on",
"expectation",
"if",
"arg_type",
"is",
"VALUE",
"Args",
":",
"Anything",
"Return",
":",
"Boolean"
] |
python
|
train
|
zhmcclient/python-zhmcclient
|
zhmcclient/_console.py
|
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_console.py#L231-L239
|
def user_patterns(self):
"""
:class:`~zhmcclient.UserPatternManager`: Access to the
:term:`User Patterns <User Pattern>` in this Console.
"""
# We do here some lazy loading.
if not self._user_patterns:
self._user_patterns = UserPatternManager(self)
return self._user_patterns
|
[
"def",
"user_patterns",
"(",
"self",
")",
":",
"# We do here some lazy loading.",
"if",
"not",
"self",
".",
"_user_patterns",
":",
"self",
".",
"_user_patterns",
"=",
"UserPatternManager",
"(",
"self",
")",
"return",
"self",
".",
"_user_patterns"
] |
:class:`~zhmcclient.UserPatternManager`: Access to the
:term:`User Patterns <User Pattern>` in this Console.
|
[
":",
"class",
":",
"~zhmcclient",
".",
"UserPatternManager",
":",
"Access",
"to",
"the",
":",
"term",
":",
"User",
"Patterns",
"<User",
"Pattern",
">",
"in",
"this",
"Console",
"."
] |
python
|
train
|
knipknap/SpiffWorkflow
|
SpiffWorkflow/serializer/xml.py
|
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/serializer/xml.py#L140-L161
|
def serialize_value_map(self, map_elem, thedict):
"""
Serializes a dictionary of key/value pairs, where the values are
either strings, or Attrib, or PathAttrib objects.
Example::
<variable>
<name>foo</name>
<value>text</value>
</variable>
<variable>
<name>foo2</name>
<value><attribute>foobar</attribute></value>
</variable>
"""
for key, value in sorted((str(k), v) for (k, v) in thedict.items()):
var_elem = SubElement(map_elem, 'variable')
SubElement(var_elem, 'name').text = str(key)
value_elem = SubElement(var_elem, 'value')
self.serialize_value(value_elem, value)
return map_elem
|
[
"def",
"serialize_value_map",
"(",
"self",
",",
"map_elem",
",",
"thedict",
")",
":",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"(",
"str",
"(",
"k",
")",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"thedict",
".",
"items",
"(",
")",
")",
":",
"var_elem",
"=",
"SubElement",
"(",
"map_elem",
",",
"'variable'",
")",
"SubElement",
"(",
"var_elem",
",",
"'name'",
")",
".",
"text",
"=",
"str",
"(",
"key",
")",
"value_elem",
"=",
"SubElement",
"(",
"var_elem",
",",
"'value'",
")",
"self",
".",
"serialize_value",
"(",
"value_elem",
",",
"value",
")",
"return",
"map_elem"
] |
Serializes a dictionary of key/value pairs, where the values are
either strings, or Attrib, or PathAttrib objects.
Example::
<variable>
<name>foo</name>
<value>text</value>
</variable>
<variable>
<name>foo2</name>
<value><attribute>foobar</attribute></value>
</variable>
|
[
"Serializes",
"a",
"dictionary",
"of",
"key",
"/",
"value",
"pairs",
"where",
"the",
"values",
"are",
"either",
"strings",
"or",
"Attrib",
"or",
"PathAttrib",
"objects",
"."
] |
python
|
valid
|
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py#L143-L161
|
def port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_mac_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_group_vlan_classification = ET.SubElement(switchport, "access-mac-group-vlan-classification")
access = ET.SubElement(access_mac_group_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_vlan_id_key = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id_key.text = kwargs.pop('access_vlan_id')
access_mac_group = ET.SubElement(vlan, "access-mac-group")
access_mac_group.text = kwargs.pop('access_mac_group')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_mac_group",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"port_profile",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"port-profile\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-port-profile\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"vlan_profile",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"vlan-profile\"",
")",
"switchport",
"=",
"ET",
".",
"SubElement",
"(",
"vlan_profile",
",",
"\"switchport\"",
")",
"access_mac_group_vlan_classification",
"=",
"ET",
".",
"SubElement",
"(",
"switchport",
",",
"\"access-mac-group-vlan-classification\"",
")",
"access",
"=",
"ET",
".",
"SubElement",
"(",
"access_mac_group_vlan_classification",
",",
"\"access\"",
")",
"vlan",
"=",
"ET",
".",
"SubElement",
"(",
"access",
",",
"\"vlan\"",
")",
"access_vlan_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"vlan",
",",
"\"access-vlan-id\"",
")",
"access_vlan_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'access_vlan_id'",
")",
"access_mac_group",
"=",
"ET",
".",
"SubElement",
"(",
"vlan",
",",
"\"access-mac-group\"",
")",
"access_mac_group",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'access_mac_group'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
|
gatkin/declxml
|
declxml.py
|
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L579-L608
|
def string(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default='', # type: Optional[Text]
omit_empty=False, # type: bool
strip_whitespace=True, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for string values.
:param strip_whitespace: Indicates whether leading and trailing whitespace should be stripped
from parsed string values.
See also :func:`declxml.boolean`
"""
value_parser = _string_parser(strip_whitespace)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
)
|
[
"def",
"string",
"(",
"element_name",
",",
"# type: Text",
"attribute",
"=",
"None",
",",
"# type: Optional[Text]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"default",
"=",
"''",
",",
"# type: Optional[Text]",
"omit_empty",
"=",
"False",
",",
"# type: bool",
"strip_whitespace",
"=",
"True",
",",
"# type: bool",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> Processor",
"value_parser",
"=",
"_string_parser",
"(",
"strip_whitespace",
")",
"return",
"_PrimitiveValue",
"(",
"element_name",
",",
"value_parser",
",",
"attribute",
",",
"required",
",",
"alias",
",",
"default",
",",
"omit_empty",
",",
"hooks",
")"
] |
Create a processor for string values.
:param strip_whitespace: Indicates whether leading and trailing whitespace should be stripped
from parsed string values.
See also :func:`declxml.boolean`
|
[
"Create",
"a",
"processor",
"for",
"string",
"values",
"."
] |
python
|
train
|
JarryShaw/PyPCAPKit
|
src/protocols/link/link.py
|
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/link/link.py#L79-L121
|
def _import_next_layer(self, proto, length):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer
"""
if length == 0:
from pcapkit.protocols.null import NoPayload as Protocol
elif self._sigterm:
from pcapkit.protocols.raw import Raw as Protocol
elif proto == 0x0806:
from pcapkit.protocols.link.arp import ARP as Protocol
elif proto == 0x8035:
from pcapkit.protocols.link.rarp import RARP as Protocol
elif proto == 0x8100:
from pcapkit.protocols.link.vlan import VLAN as Protocol
elif proto == 0x0800:
from pcapkit.protocols.internet.ipv4 import IPv4 as Protocol
elif proto == 0x86DD:
from pcapkit.protocols.internet.ipv6 import IPv6 as Protocol
elif proto == 0x8137:
from pcapkit.protocols.internet.ipx import IPX as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, error=self._onerror,
layer=self._exlayer, protocol=self._exproto)
return next_
|
[
"def",
"_import_next_layer",
"(",
"self",
",",
"proto",
",",
"length",
")",
":",
"if",
"length",
"==",
"0",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"null",
"import",
"NoPayload",
"as",
"Protocol",
"elif",
"self",
".",
"_sigterm",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"raw",
"import",
"Raw",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x0806",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"link",
".",
"arp",
"import",
"ARP",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x8035",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"link",
".",
"rarp",
"import",
"RARP",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x8100",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"link",
".",
"vlan",
"import",
"VLAN",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x0800",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"internet",
".",
"ipv4",
"import",
"IPv4",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x86DD",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"internet",
".",
"ipv6",
"import",
"IPv6",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x8137",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"internet",
".",
"ipx",
"import",
"IPX",
"as",
"Protocol",
"else",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"raw",
"import",
"Raw",
"as",
"Protocol",
"next_",
"=",
"Protocol",
"(",
"self",
".",
"_file",
",",
"length",
",",
"error",
"=",
"self",
".",
"_onerror",
",",
"layer",
"=",
"self",
".",
"_exlayer",
",",
"protocol",
"=",
"self",
".",
"_exproto",
")",
"return",
"next_"
] |
Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer
|
[
"Import",
"next",
"layer",
"extractor",
"."
] |
python
|
train
|
ThreatConnect-Inc/tcex
|
tcex/tcex_bin.py
|
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin.py#L62-L81
|
def db_create_table(self, table_name, columns):
"""Create a temporary DB table.
Arguments:
table_name (str): The name of the table.
columns (list): List of columns to add to the DB.
"""
formatted_columns = ''
for col in set(columns):
formatted_columns += '"{}" text, '.format(col.strip('"').strip('\''))
formatted_columns = formatted_columns.strip(', ')
create_table_sql = 'CREATE TABLE IF NOT EXISTS {} ({});'.format(
table_name, formatted_columns
)
try:
cr = self.db_conn.cursor()
cr.execute(create_table_sql)
except sqlite3.Error as e:
self.handle_error(e)
|
[
"def",
"db_create_table",
"(",
"self",
",",
"table_name",
",",
"columns",
")",
":",
"formatted_columns",
"=",
"''",
"for",
"col",
"in",
"set",
"(",
"columns",
")",
":",
"formatted_columns",
"+=",
"'\"{}\" text, '",
".",
"format",
"(",
"col",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"'\\''",
")",
")",
"formatted_columns",
"=",
"formatted_columns",
".",
"strip",
"(",
"', '",
")",
"create_table_sql",
"=",
"'CREATE TABLE IF NOT EXISTS {} ({});'",
".",
"format",
"(",
"table_name",
",",
"formatted_columns",
")",
"try",
":",
"cr",
"=",
"self",
".",
"db_conn",
".",
"cursor",
"(",
")",
"cr",
".",
"execute",
"(",
"create_table_sql",
")",
"except",
"sqlite3",
".",
"Error",
"as",
"e",
":",
"self",
".",
"handle_error",
"(",
"e",
")"
] |
Create a temporary DB table.
Arguments:
table_name (str): The name of the table.
columns (list): List of columns to add to the DB.
|
[
"Create",
"a",
"temporary",
"DB",
"table",
"."
] |
python
|
train
|
mitsei/dlkit
|
dlkit/json_/assessment_authoring/objects.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/objects.py#L814-L819
|
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._cumulative_default = self._mdata['cumulative']['default_boolean_values'][0]
self._minimum_score_default = self._mdata['minimum_score']['default_cardinal_values'][0]
self._maximum_score_default = self._mdata['maximum_score']['default_cardinal_values'][0]
|
[
"def",
"_init_metadata",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"osid_objects",
".",
"OsidObjectForm",
".",
"_init_metadata",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_cumulative_default",
"=",
"self",
".",
"_mdata",
"[",
"'cumulative'",
"]",
"[",
"'default_boolean_values'",
"]",
"[",
"0",
"]",
"self",
".",
"_minimum_score_default",
"=",
"self",
".",
"_mdata",
"[",
"'minimum_score'",
"]",
"[",
"'default_cardinal_values'",
"]",
"[",
"0",
"]",
"self",
".",
"_maximum_score_default",
"=",
"self",
".",
"_mdata",
"[",
"'maximum_score'",
"]",
"[",
"'default_cardinal_values'",
"]",
"[",
"0",
"]"
] |
Initialize form metadata
|
[
"Initialize",
"form",
"metadata"
] |
python
|
train
|
bokeh/bokeh
|
bokeh/server/views/ws.py
|
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/server/views/ws.py#L243-L257
|
def send_message(self, message):
''' Send a Bokeh Server protocol message to the connected client.
Args:
message (Message) : a message to send
'''
try:
if _message_test_port is not None:
_message_test_port.sent.append(message)
yield message.send(self)
except (WebSocketClosedError, StreamClosedError): # Tornado 4.x may raise StreamClosedError
# on_close() is / will be called anyway
log.warning("Failed sending message as connection was closed")
raise gen.Return(None)
|
[
"def",
"send_message",
"(",
"self",
",",
"message",
")",
":",
"try",
":",
"if",
"_message_test_port",
"is",
"not",
"None",
":",
"_message_test_port",
".",
"sent",
".",
"append",
"(",
"message",
")",
"yield",
"message",
".",
"send",
"(",
"self",
")",
"except",
"(",
"WebSocketClosedError",
",",
"StreamClosedError",
")",
":",
"# Tornado 4.x may raise StreamClosedError",
"# on_close() is / will be called anyway",
"log",
".",
"warning",
"(",
"\"Failed sending message as connection was closed\"",
")",
"raise",
"gen",
".",
"Return",
"(",
"None",
")"
] |
Send a Bokeh Server protocol message to the connected client.
Args:
message (Message) : a message to send
|
[
"Send",
"a",
"Bokeh",
"Server",
"protocol",
"message",
"to",
"the",
"connected",
"client",
"."
] |
python
|
train
|
theonion/django-bulbs
|
bulbs/utils/methods.py
|
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/utils/methods.py#L125-L133
|
def import_class(name):
"""Load class from fully-qualified python module name.
ex: import_class('bulbs.content.models.Content')
"""
module, _, klass = name.rpartition('.')
mod = import_module(module)
return getattr(mod, klass)
|
[
"def",
"import_class",
"(",
"name",
")",
":",
"module",
",",
"_",
",",
"klass",
"=",
"name",
".",
"rpartition",
"(",
"'.'",
")",
"mod",
"=",
"import_module",
"(",
"module",
")",
"return",
"getattr",
"(",
"mod",
",",
"klass",
")"
] |
Load class from fully-qualified python module name.
ex: import_class('bulbs.content.models.Content')
|
[
"Load",
"class",
"from",
"fully",
"-",
"qualified",
"python",
"module",
"name",
"."
] |
python
|
train
|
gmr/tinman
|
tinman/handlers/mixins.py
|
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/mixins.py#L43-L55
|
def _statsd_address(self):
"""Return a tuple of host and port for the statsd server to send
stats to.
:return: tuple(host, port)
"""
return (self.application.settings.get('statsd',
{}).get('host',
self.STATSD_HOST),
self.application.settings.get('statsd',
{}).get('port',
self.STATSD_PORT))
|
[
"def",
"_statsd_address",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"application",
".",
"settings",
".",
"get",
"(",
"'statsd'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'host'",
",",
"self",
".",
"STATSD_HOST",
")",
",",
"self",
".",
"application",
".",
"settings",
".",
"get",
"(",
"'statsd'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'port'",
",",
"self",
".",
"STATSD_PORT",
")",
")"
] |
Return a tuple of host and port for the statsd server to send
stats to.
:return: tuple(host, port)
|
[
"Return",
"a",
"tuple",
"of",
"host",
"and",
"port",
"for",
"the",
"statsd",
"server",
"to",
"send",
"stats",
"to",
"."
] |
python
|
train
|
geertj/gruvi
|
lib/gruvi/ssl.py
|
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/ssl.py#L135-L192
|
def feed_ssldata(self, data):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling :meth:`shutdown`.
"""
if self._state == self.S_UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
return ([], [data] if data else [])
ssldata = []; appdata = []
self._need_ssldata = False
if data:
self._incoming.write(data)
try:
if self._state == self.S_DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = self.S_WRAPPED
if self._handshake_cb:
self._handshake_cb()
if self._state == self.S_WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.bufsize)
appdata.append(chunk)
if not chunk: # close_notify
break
if self._state == self.S_SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = self.S_UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
if self._state == self.S_UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, sslcompat.CertificateError) as e:
if getattr(e, 'errno', None) not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE, ssl.SSL_ERROR_SYSCALL):
if self._state == self.S_DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(e)
raise
self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
|
[
"def",
"feed_ssldata",
"(",
"self",
",",
"data",
")",
":",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"S_UNWRAPPED",
":",
"# If unwrapped, pass plaintext data straight through.",
"return",
"(",
"[",
"]",
",",
"[",
"data",
"]",
"if",
"data",
"else",
"[",
"]",
")",
"ssldata",
"=",
"[",
"]",
"appdata",
"=",
"[",
"]",
"self",
".",
"_need_ssldata",
"=",
"False",
"if",
"data",
":",
"self",
".",
"_incoming",
".",
"write",
"(",
"data",
")",
"try",
":",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"S_DO_HANDSHAKE",
":",
"# Call do_handshake() until it doesn't raise anymore.",
"self",
".",
"_sslobj",
".",
"do_handshake",
"(",
")",
"self",
".",
"_state",
"=",
"self",
".",
"S_WRAPPED",
"if",
"self",
".",
"_handshake_cb",
":",
"self",
".",
"_handshake_cb",
"(",
")",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"S_WRAPPED",
":",
"# Main state: read data from SSL until close_notify",
"while",
"True",
":",
"chunk",
"=",
"self",
".",
"_sslobj",
".",
"read",
"(",
"self",
".",
"bufsize",
")",
"appdata",
".",
"append",
"(",
"chunk",
")",
"if",
"not",
"chunk",
":",
"# close_notify",
"break",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"S_SHUTDOWN",
":",
"# Call shutdown() until it doesn't raise anymore.",
"self",
".",
"_sslobj",
".",
"unwrap",
"(",
")",
"self",
".",
"_sslobj",
"=",
"None",
"self",
".",
"_state",
"=",
"self",
".",
"S_UNWRAPPED",
"if",
"self",
".",
"_shutdown_cb",
":",
"self",
".",
"_shutdown_cb",
"(",
")",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"S_UNWRAPPED",
":",
"# Drain possible plaintext data after close_notify.",
"appdata",
".",
"append",
"(",
"self",
".",
"_incoming",
".",
"read",
"(",
")",
")",
"except",
"(",
"ssl",
".",
"SSLError",
",",
"sslcompat",
".",
"CertificateError",
")",
"as",
"e",
":",
"if",
"getattr",
"(",
"e",
",",
"'errno'",
",",
"None",
")",
"not",
"in",
"(",
"ssl",
".",
"SSL_ERROR_WANT_READ",
",",
"ssl",
".",
"SSL_ERROR_WANT_WRITE",
",",
"ssl",
".",
"SSL_ERROR_SYSCALL",
")",
":",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"S_DO_HANDSHAKE",
"and",
"self",
".",
"_handshake_cb",
":",
"self",
".",
"_handshake_cb",
"(",
"e",
")",
"raise",
"self",
".",
"_need_ssldata",
"=",
"e",
".",
"errno",
"==",
"ssl",
".",
"SSL_ERROR_WANT_READ",
"# Check for record level data that needs to be sent back.",
"# Happens for the initial handshake and renegotiations.",
"if",
"self",
".",
"_outgoing",
".",
"pending",
":",
"ssldata",
".",
"append",
"(",
"self",
".",
"_outgoing",
".",
"read",
"(",
")",
")",
"return",
"(",
"ssldata",
",",
"appdata",
")"
] |
Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling :meth:`shutdown`.
|
[
"Feed",
"SSL",
"record",
"level",
"data",
"into",
"the",
"pipe",
"."
] |
python
|
train
|
matthiask/django-cte-forest
|
cte_forest/models.py
|
https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/models.py#L604-L645
|
def as_tree(self, visitor=None, children=None):
""" Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
Optionally, a `visitor` callback can be used, which is responsible
for generating a dictionary representation of a given
:class:`CTENode`. By default, the :meth:`_default_node_visitor` is
used which generates a dictionary with the current node as well as
structural properties. See :meth:`_default_node_visitor` for the
appropriate signature of this callback.
Optionally, a `children` callback can be used, which is responsible
for determining which :class:`CTENode`s are children of each visited
:class:`CTENode`, resulting in a key (by default ``children``) and a
list of children :class:`CTENode` objects, which are then included
in the dictionary representation of the currently-visited node. See
:meth:`_default_node_children` for the appropriate signature of this
callback.
For each node visited, the :meth:`CTENode.as_tree` method is invoked
along with the optional `visitor` and `children` arguments. This
method, if not overridden, will delegate to :meth:`node_as_tree`,
which is responsible for invoking the :meth:`visitor` and
:meth:`children` methods, as well as updating the dictionary
representation of the node with the representation of the children
nodes.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
"""
return [
root.as_tree(visitor=visitor, children=children) for root in self.roots()
]
|
[
"def",
"as_tree",
"(",
"self",
",",
"visitor",
"=",
"None",
",",
"children",
"=",
"None",
")",
":",
"return",
"[",
"root",
".",
"as_tree",
"(",
"visitor",
"=",
"visitor",
",",
"children",
"=",
"children",
")",
"for",
"root",
"in",
"self",
".",
"roots",
"(",
")",
"]"
] |
Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
Optionally, a `visitor` callback can be used, which is responsible
for generating a dictionary representation of a given
:class:`CTENode`. By default, the :meth:`_default_node_visitor` is
used which generates a dictionary with the current node as well as
structural properties. See :meth:`_default_node_visitor` for the
appropriate signature of this callback.
Optionally, a `children` callback can be used, which is responsible
for determining which :class:`CTENode`s are children of each visited
:class:`CTENode`, resulting in a key (by default ``children``) and a
list of children :class:`CTENode` objects, which are then included
in the dictionary representation of the currently-visited node. See
:meth:`_default_node_children` for the appropriate signature of this
callback.
For each node visited, the :meth:`CTENode.as_tree` method is invoked
along with the optional `visitor` and `children` arguments. This
method, if not overridden, will delegate to :meth:`node_as_tree`,
which is responsible for invoking the :meth:`visitor` and
:meth:`children` methods, as well as updating the dictionary
representation of the node with the representation of the children
nodes.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
|
[
"Recursively",
"traverses",
"each",
"tree",
"(",
"starting",
"from",
"each",
"root",
")",
"in",
"order",
"to",
"generate",
"a",
"dictionary",
"-",
"based",
"tree",
"structure",
"of",
"the",
"entire",
"forest",
".",
"Each",
"level",
"of",
"the",
"forest",
"/",
"tree",
"is",
"a",
"list",
"of",
"nodes",
"and",
"each",
"node",
"consists",
"of",
"a",
"dictionary",
"representation",
"where",
"the",
"entry",
"children",
"(",
"by",
"default",
")",
"consists",
"of",
"a",
"list",
"of",
"dictionary",
"representations",
"of",
"its",
"children",
"."
] |
python
|
train
|
idlesign/uwsgiconf
|
uwsgiconf/options/networking.py
|
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/networking.py#L121-L158
|
def register_socket(self, socket):
"""Registers the given socket(s) for further use.
:param Socket|list[Socket] socket: Socket type object. See ``.sockets``.
"""
sockets = self._sockets
for socket in listify(socket):
uses_shared = isinstance(socket.address, SocketShared)
if uses_shared:
# Handling shared sockets involves socket index resolution.
shared_socket = socket.address # type: SocketShared
if shared_socket not in sockets:
self.register_socket(shared_socket)
socket.address = self._get_shared_socket_idx(shared_socket)
socket.address = self._section.replace_placeholders(socket.address)
self._set(socket.name, socket, multi=True)
socket._contribute_to_opts(self)
bound_workers = socket.bound_workers
if bound_workers:
self._set(
'map-socket', '%s:%s' % (len(sockets), ','.join(map(str, bound_workers))),
multi=True)
if not uses_shared:
sockets.append(socket)
return self._section
|
[
"def",
"register_socket",
"(",
"self",
",",
"socket",
")",
":",
"sockets",
"=",
"self",
".",
"_sockets",
"for",
"socket",
"in",
"listify",
"(",
"socket",
")",
":",
"uses_shared",
"=",
"isinstance",
"(",
"socket",
".",
"address",
",",
"SocketShared",
")",
"if",
"uses_shared",
":",
"# Handling shared sockets involves socket index resolution.",
"shared_socket",
"=",
"socket",
".",
"address",
"# type: SocketShared",
"if",
"shared_socket",
"not",
"in",
"sockets",
":",
"self",
".",
"register_socket",
"(",
"shared_socket",
")",
"socket",
".",
"address",
"=",
"self",
".",
"_get_shared_socket_idx",
"(",
"shared_socket",
")",
"socket",
".",
"address",
"=",
"self",
".",
"_section",
".",
"replace_placeholders",
"(",
"socket",
".",
"address",
")",
"self",
".",
"_set",
"(",
"socket",
".",
"name",
",",
"socket",
",",
"multi",
"=",
"True",
")",
"socket",
".",
"_contribute_to_opts",
"(",
"self",
")",
"bound_workers",
"=",
"socket",
".",
"bound_workers",
"if",
"bound_workers",
":",
"self",
".",
"_set",
"(",
"'map-socket'",
",",
"'%s:%s'",
"%",
"(",
"len",
"(",
"sockets",
")",
",",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"bound_workers",
")",
")",
")",
",",
"multi",
"=",
"True",
")",
"if",
"not",
"uses_shared",
":",
"sockets",
".",
"append",
"(",
"socket",
")",
"return",
"self",
".",
"_section"
] |
Registers the given socket(s) for further use.
:param Socket|list[Socket] socket: Socket type object. See ``.sockets``.
|
[
"Registers",
"the",
"given",
"socket",
"(",
"s",
")",
"for",
"further",
"use",
"."
] |
python
|
train
|
innogames/polysh
|
polysh/remote_dispatcher.py
|
https://github.com/innogames/polysh/blob/fbea36f3bc9f47a62d72040c48dad1776124dae3/polysh/remote_dispatcher.py#L51-L56
|
def main_loop_iteration(timeout=None):
"""Return the number of RemoteDispatcher.handle_read() calls made by this
iteration"""
prev_nr_read = nr_handle_read
asyncore.loop(count=1, timeout=timeout, use_poll=True)
return nr_handle_read - prev_nr_read
|
[
"def",
"main_loop_iteration",
"(",
"timeout",
"=",
"None",
")",
":",
"prev_nr_read",
"=",
"nr_handle_read",
"asyncore",
".",
"loop",
"(",
"count",
"=",
"1",
",",
"timeout",
"=",
"timeout",
",",
"use_poll",
"=",
"True",
")",
"return",
"nr_handle_read",
"-",
"prev_nr_read"
] |
Return the number of RemoteDispatcher.handle_read() calls made by this
iteration
|
[
"Return",
"the",
"number",
"of",
"RemoteDispatcher",
".",
"handle_read",
"()",
"calls",
"made",
"by",
"this",
"iteration"
] |
python
|
train
|
codelv/enaml-native
|
src/enamlnative/android/android_button.py
|
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_button.py#L131-L136
|
def create_widget(self):
""" Create the underlying widget.
"""
d = self.declaration
self.widget = FloatingActionButton(self.get_context(), None, d.style)
|
[
"def",
"create_widget",
"(",
"self",
")",
":",
"d",
"=",
"self",
".",
"declaration",
"self",
".",
"widget",
"=",
"FloatingActionButton",
"(",
"self",
".",
"get_context",
"(",
")",
",",
"None",
",",
"d",
".",
"style",
")"
] |
Create the underlying widget.
|
[
"Create",
"the",
"underlying",
"widget",
"."
] |
python
|
train
|
esheldon/fitsio
|
fitsio/header.py
|
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L359-L369
|
def next(self):
"""
for iteration over the header entries
"""
if self._current < len(self._record_list):
rec = self._record_list[self._current]
key = rec['name']
self._current += 1
return key
else:
raise StopIteration
|
[
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current",
"<",
"len",
"(",
"self",
".",
"_record_list",
")",
":",
"rec",
"=",
"self",
".",
"_record_list",
"[",
"self",
".",
"_current",
"]",
"key",
"=",
"rec",
"[",
"'name'",
"]",
"self",
".",
"_current",
"+=",
"1",
"return",
"key",
"else",
":",
"raise",
"StopIteration"
] |
for iteration over the header entries
|
[
"for",
"iteration",
"over",
"the",
"header",
"entries"
] |
python
|
train
|
senaite/senaite.core
|
bika/lims/browser/partition_magic.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/partition_magic.py#L297-L315
|
def get_number_of_partitions_for(self, ar):
"""Return the number of selected partitions
"""
# fetch the number of partitions from the request
uid = api.get_uid(ar)
num = self.request.get("primary", {}).get(uid)
if num is None:
# get the number of partitions from the template
template = ar.getTemplate()
if template:
num = len(template.getPartitions())
else:
num = DEFAULT_NUMBER_OF_PARTITIONS
try:
num = int(num)
except (TypeError, ValueError):
num = DEFAULT_NUMBER_OF_PARTITIONS
return num
|
[
"def",
"get_number_of_partitions_for",
"(",
"self",
",",
"ar",
")",
":",
"# fetch the number of partitions from the request",
"uid",
"=",
"api",
".",
"get_uid",
"(",
"ar",
")",
"num",
"=",
"self",
".",
"request",
".",
"get",
"(",
"\"primary\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"uid",
")",
"if",
"num",
"is",
"None",
":",
"# get the number of partitions from the template",
"template",
"=",
"ar",
".",
"getTemplate",
"(",
")",
"if",
"template",
":",
"num",
"=",
"len",
"(",
"template",
".",
"getPartitions",
"(",
")",
")",
"else",
":",
"num",
"=",
"DEFAULT_NUMBER_OF_PARTITIONS",
"try",
":",
"num",
"=",
"int",
"(",
"num",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"num",
"=",
"DEFAULT_NUMBER_OF_PARTITIONS",
"return",
"num"
] |
Return the number of selected partitions
|
[
"Return",
"the",
"number",
"of",
"selected",
"partitions"
] |
python
|
train
|
timster/peewee-validates
|
peewee_validates.py
|
https://github.com/timster/peewee-validates/blob/417f0fafb87fe9209439d65bc279d86a3d9e8028/peewee_validates.py#L314-L334
|
def coerce_single_instance(lookup_field, value):
"""
Convert from whatever value is given to a scalar value for lookup_field.
If value is a dict, then lookup_field.name is used to get the value from the dict. Example:
lookup_field.name = 'id'
value = {'id': 123, 'name': 'tim'}
returns = 123
If value is a model, then lookup_field.name is extracted from the model. Example:
lookup_field.name = 'id'
value = <User id=123 name='tim'>
returns = 123
Otherwise the value is returned as-is.
:param lookup_field: Peewee model field used for getting name from value.
:param value: Some kind of value (usually a dict, Model instance, or scalar).
"""
if isinstance(value, dict):
return value.get(lookup_field.name)
if isinstance(value, peewee.Model):
return getattr(value, lookup_field.name)
return value
|
[
"def",
"coerce_single_instance",
"(",
"lookup_field",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"value",
".",
"get",
"(",
"lookup_field",
".",
"name",
")",
"if",
"isinstance",
"(",
"value",
",",
"peewee",
".",
"Model",
")",
":",
"return",
"getattr",
"(",
"value",
",",
"lookup_field",
".",
"name",
")",
"return",
"value"
] |
Convert from whatever value is given to a scalar value for lookup_field.
If value is a dict, then lookup_field.name is used to get the value from the dict. Example:
lookup_field.name = 'id'
value = {'id': 123, 'name': 'tim'}
returns = 123
If value is a model, then lookup_field.name is extracted from the model. Example:
lookup_field.name = 'id'
value = <User id=123 name='tim'>
returns = 123
Otherwise the value is returned as-is.
:param lookup_field: Peewee model field used for getting name from value.
:param value: Some kind of value (usually a dict, Model instance, or scalar).
|
[
"Convert",
"from",
"whatever",
"value",
"is",
"given",
"to",
"a",
"scalar",
"value",
"for",
"lookup_field",
".",
"If",
"value",
"is",
"a",
"dict",
"then",
"lookup_field",
".",
"name",
"is",
"used",
"to",
"get",
"the",
"value",
"from",
"the",
"dict",
".",
"Example",
":",
"lookup_field",
".",
"name",
"=",
"id",
"value",
"=",
"{",
"id",
":",
"123",
"name",
":",
"tim",
"}",
"returns",
"=",
"123",
"If",
"value",
"is",
"a",
"model",
"then",
"lookup_field",
".",
"name",
"is",
"extracted",
"from",
"the",
"model",
".",
"Example",
":",
"lookup_field",
".",
"name",
"=",
"id",
"value",
"=",
"<User",
"id",
"=",
"123",
"name",
"=",
"tim",
">",
"returns",
"=",
"123",
"Otherwise",
"the",
"value",
"is",
"returned",
"as",
"-",
"is",
"."
] |
python
|
train
|
googleapis/google-cloud-python
|
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py#L264-L276
|
def send(self, request):
"""Queue a request to be sent to the RPC."""
if self._UNARY_REQUESTS:
try:
self._send_unary_request(request)
except exceptions.GoogleAPICallError:
_LOGGER.debug(
"Exception while sending unary RPC. This is typically "
"non-fatal as stream requests are best-effort.",
exc_info=True,
)
else:
self._rpc.send(request)
|
[
"def",
"send",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"_UNARY_REQUESTS",
":",
"try",
":",
"self",
".",
"_send_unary_request",
"(",
"request",
")",
"except",
"exceptions",
".",
"GoogleAPICallError",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Exception while sending unary RPC. This is typically \"",
"\"non-fatal as stream requests are best-effort.\"",
",",
"exc_info",
"=",
"True",
",",
")",
"else",
":",
"self",
".",
"_rpc",
".",
"send",
"(",
"request",
")"
] |
Queue a request to be sent to the RPC.
|
[
"Queue",
"a",
"request",
"to",
"be",
"sent",
"to",
"the",
"RPC",
"."
] |
python
|
train
|
brandonxiang/geojson-python-utils
|
geojson_utils/geojson_utils.py
|
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L170-L194
|
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
|
[
"def",
"draw_circle",
"(",
"radius_in_meters",
",",
"center_point",
",",
"steps",
"=",
"15",
")",
":",
"steps",
"=",
"steps",
"if",
"steps",
">",
"15",
"else",
"15",
"center",
"=",
"[",
"center_point",
"[",
"'coordinates'",
"]",
"[",
"1",
"]",
",",
"center_point",
"[",
"'coordinates'",
"]",
"[",
"0",
"]",
"]",
"dist",
"=",
"(",
"radius_in_meters",
"/",
"1000",
")",
"/",
"6371",
"# convert meters to radiant",
"rad_center",
"=",
"[",
"number2radius",
"(",
"center",
"[",
"0",
"]",
")",
",",
"number2radius",
"(",
"center",
"[",
"1",
"]",
")",
"]",
"# 15 sided circle",
"poly",
"=",
"[",
"]",
"for",
"step",
"in",
"range",
"(",
"0",
",",
"steps",
")",
":",
"brng",
"=",
"2",
"*",
"math",
".",
"pi",
"*",
"step",
"/",
"steps",
"lat",
"=",
"math",
".",
"asin",
"(",
"math",
".",
"sin",
"(",
"rad_center",
"[",
"0",
"]",
")",
"*",
"math",
".",
"cos",
"(",
"dist",
")",
"+",
"math",
".",
"cos",
"(",
"rad_center",
"[",
"0",
"]",
")",
"*",
"math",
".",
"sin",
"(",
"dist",
")",
"*",
"math",
".",
"cos",
"(",
"brng",
")",
")",
"lng",
"=",
"rad_center",
"[",
"1",
"]",
"+",
"math",
".",
"atan2",
"(",
"math",
".",
"sin",
"(",
"brng",
")",
"*",
"math",
".",
"sin",
"(",
"dist",
")",
"*",
"math",
".",
"cos",
"(",
"rad_center",
"[",
"0",
"]",
")",
",",
"math",
".",
"cos",
"(",
"dist",
")",
"-",
"math",
".",
"sin",
"(",
"rad_center",
"[",
"0",
"]",
")",
"*",
"math",
".",
"sin",
"(",
"lat",
")",
")",
"poly",
".",
"append",
"(",
"[",
"number2degree",
"(",
"lng",
")",
",",
"number2degree",
"(",
"lat",
")",
"]",
")",
"return",
"{",
"\"type\"",
":",
"\"Polygon\"",
",",
"\"coordinates\"",
":",
"[",
"poly",
"]",
"}"
] |
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
|
[
"get",
"a",
"circle",
"shape",
"polygon",
"based",
"on",
"centerPoint",
"and",
"radius"
] |
python
|
train
|
RudolfCardinal/pythonlib
|
cardinal_pythonlib/rnc_db.py
|
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L2161-L2182
|
def insert_multiple_records(self,
table: str,
fields: Sequence[str],
records: Sequence[Sequence[Any]]) -> int:
"""Inserts a record into database, table "table", using the list of
fieldnames and the list of records (each a list of values).
Returns number of rows affected."""
self.ensure_db_open()
sql = self.localize_sql(get_sql_insert(table, fields,
self.get_delims()))
log.debug("About to insert multiple records with SQL template: " + sql)
try:
cursor = self.db.cursor()
debug_sql(sql, records)
cursor.executemany(sql, records)
# ... binds the placeholders (?, %s) to values in the process
# http://www.python.org/dev/peps/pep-0249/
log.debug("Records inserted.")
return cursor.rowcount
except: # nopep8
log.exception("insert_multiple_records: Failed to insert records.")
raise
|
[
"def",
"insert_multiple_records",
"(",
"self",
",",
"table",
":",
"str",
",",
"fields",
":",
"Sequence",
"[",
"str",
"]",
",",
"records",
":",
"Sequence",
"[",
"Sequence",
"[",
"Any",
"]",
"]",
")",
"->",
"int",
":",
"self",
".",
"ensure_db_open",
"(",
")",
"sql",
"=",
"self",
".",
"localize_sql",
"(",
"get_sql_insert",
"(",
"table",
",",
"fields",
",",
"self",
".",
"get_delims",
"(",
")",
")",
")",
"log",
".",
"debug",
"(",
"\"About to insert multiple records with SQL template: \"",
"+",
"sql",
")",
"try",
":",
"cursor",
"=",
"self",
".",
"db",
".",
"cursor",
"(",
")",
"debug_sql",
"(",
"sql",
",",
"records",
")",
"cursor",
".",
"executemany",
"(",
"sql",
",",
"records",
")",
"# ... binds the placeholders (?, %s) to values in the process",
"# http://www.python.org/dev/peps/pep-0249/",
"log",
".",
"debug",
"(",
"\"Records inserted.\"",
")",
"return",
"cursor",
".",
"rowcount",
"except",
":",
"# nopep8",
"log",
".",
"exception",
"(",
"\"insert_multiple_records: Failed to insert records.\"",
")",
"raise"
] |
Inserts a record into database, table "table", using the list of
fieldnames and the list of records (each a list of values).
Returns number of rows affected.
|
[
"Inserts",
"a",
"record",
"into",
"database",
"table",
"table",
"using",
"the",
"list",
"of",
"fieldnames",
"and",
"the",
"list",
"of",
"records",
"(",
"each",
"a",
"list",
"of",
"values",
")",
".",
"Returns",
"number",
"of",
"rows",
"affected",
"."
] |
python
|
train
|
robmcmullen/atrcopy
|
atrcopy/segments.py
|
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L1003-L1016
|
def get_comments_in_range(self, start, end):
"""Get a list of comments at specified indexes"""
comments = {}
# Naive way, but maybe it's fast enough: loop over all comments
# gathering those within the bounds
for rawindex, comment in self.rawdata.extra.comments.items():
try:
index = self.get_index_from_base_index(rawindex)
except IndexError:
continue
if index >= start and index < end:
comments[index] = comment
return comments
|
[
"def",
"get_comments_in_range",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"comments",
"=",
"{",
"}",
"# Naive way, but maybe it's fast enough: loop over all comments",
"# gathering those within the bounds",
"for",
"rawindex",
",",
"comment",
"in",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
".",
"items",
"(",
")",
":",
"try",
":",
"index",
"=",
"self",
".",
"get_index_from_base_index",
"(",
"rawindex",
")",
"except",
"IndexError",
":",
"continue",
"if",
"index",
">=",
"start",
"and",
"index",
"<",
"end",
":",
"comments",
"[",
"index",
"]",
"=",
"comment",
"return",
"comments"
] |
Get a list of comments at specified indexes
|
[
"Get",
"a",
"list",
"of",
"comments",
"at",
"specified",
"indexes"
] |
python
|
train
|
assemblerflow/flowcraft
|
flowcraft/generator/engine.py
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/engine.py#L1478-L1587
|
def fetch_docker_tags(self):
"""
Export all dockerhub tags associated with each component given by
the -t flag.
"""
# dict to store the already parsed components (useful when forks are
# given to the pipeline string via -t flag
dict_of_parsed = {}
# fetches terminal width and subtracts 3 because we always add a
# new line character and we want a space at the beggining and at the end
# of each line
terminal_width = shutil.get_terminal_size().columns - 3
# first header
center_string = " Selected container tags "
# starts a list with the headers
tags_list = [
[
"=" * int(terminal_width / 4),
"{0}{1}{0}".format(
"=" * int(((terminal_width/2 - len(center_string)) / 2)),
center_string)
,
"{}\n".format("=" * int(terminal_width / 4))
],
["component", "container", "tags"],
[
"=" * int(terminal_width / 4),
"=" * int(terminal_width / 2),
"=" * int(terminal_width / 4)
]
]
# Skip first init process and iterate through the others
for p in self.processes[1:]:
template = p.template
# if component has already been printed then skip and don't print
# again
if template in dict_of_parsed:
continue
# starts a list of containers for the current process in
# dict_of_parsed, in which each containers will be added to this
# list once it gets parsed
dict_of_parsed[template] = {
"container": []
}
# fetch repo name from directives of each component.
for directives in p.directives.values():
try:
repo = directives["container"]
default_version = directives["version"]
except KeyError:
# adds the default container if container key isn't present
# this happens for instance in integrity_coverage
repo = "flowcraft/flowcraft_base"
default_version = "1.0.0-1"
# checks if repo_version already exists in list of the
# containers for the current component being queried
repo_version = repo + default_version
if repo_version not in dict_of_parsed[template]["container"]:
# make the request to docker hub
r = requests.get(
"https://hub.docker.com/v2/repositories/{}/tags/"
.format(repo)
)
# checks the status code of the request, if it is 200 then
# parses docker hub entry, otherwise retrieve no tags but
# alerts the user
if r.status_code != 404:
# parse response content to dict and fetch results key
r_content = json.loads(r.content)["results"]
for version in r_content:
printed_version = (version["name"] + "*") \
if version["name"] == default_version \
else version["name"]
tags_list.append([template, repo, printed_version])
else:
tags_list.append([template, repo, "No DockerHub tags"])
dict_of_parsed[template]["container"].append(repo_version)
# iterate through each entry in tags_list and print the list of tags
# for each component. Each entry (excluding the headers) contains
# 3 elements (component name, container and tag version)
for x, entry in enumerate(tags_list):
# adds different color to the header in the first list and
# if row is pair add one color and if is even add another (different
# background)
color = "blue_bold" if x < 3 else \
("white" if x % 2 != 0 else "0;37;40m")
# generates a small list with the terminal width for each column,
# this will be given to string formatting as the 3, 4 and 5 element
final_width = [
int(terminal_width/4),
int(terminal_width/2),
int(terminal_width/4)
]
# writes the string to the stdout
sys.stdout.write(
colored_print("\n {0: <{3}} {1: ^{4}} {2: >{5}}".format(
*entry, *final_width), color)
)
# assures that the entire line gets the same color
sys.stdout.write("\n{0: >{1}}\n".format("(* = default)",
terminal_width + 3))
|
[
"def",
"fetch_docker_tags",
"(",
"self",
")",
":",
"# dict to store the already parsed components (useful when forks are",
"# given to the pipeline string via -t flag",
"dict_of_parsed",
"=",
"{",
"}",
"# fetches terminal width and subtracts 3 because we always add a",
"# new line character and we want a space at the beggining and at the end",
"# of each line",
"terminal_width",
"=",
"shutil",
".",
"get_terminal_size",
"(",
")",
".",
"columns",
"-",
"3",
"# first header",
"center_string",
"=",
"\" Selected container tags \"",
"# starts a list with the headers",
"tags_list",
"=",
"[",
"[",
"\"=\"",
"*",
"int",
"(",
"terminal_width",
"/",
"4",
")",
",",
"\"{0}{1}{0}\"",
".",
"format",
"(",
"\"=\"",
"*",
"int",
"(",
"(",
"(",
"terminal_width",
"/",
"2",
"-",
"len",
"(",
"center_string",
")",
")",
"/",
"2",
")",
")",
",",
"center_string",
")",
",",
"\"{}\\n\"",
".",
"format",
"(",
"\"=\"",
"*",
"int",
"(",
"terminal_width",
"/",
"4",
")",
")",
"]",
",",
"[",
"\"component\"",
",",
"\"container\"",
",",
"\"tags\"",
"]",
",",
"[",
"\"=\"",
"*",
"int",
"(",
"terminal_width",
"/",
"4",
")",
",",
"\"=\"",
"*",
"int",
"(",
"terminal_width",
"/",
"2",
")",
",",
"\"=\"",
"*",
"int",
"(",
"terminal_width",
"/",
"4",
")",
"]",
"]",
"# Skip first init process and iterate through the others",
"for",
"p",
"in",
"self",
".",
"processes",
"[",
"1",
":",
"]",
":",
"template",
"=",
"p",
".",
"template",
"# if component has already been printed then skip and don't print",
"# again",
"if",
"template",
"in",
"dict_of_parsed",
":",
"continue",
"# starts a list of containers for the current process in",
"# dict_of_parsed, in which each containers will be added to this",
"# list once it gets parsed",
"dict_of_parsed",
"[",
"template",
"]",
"=",
"{",
"\"container\"",
":",
"[",
"]",
"}",
"# fetch repo name from directives of each component.",
"for",
"directives",
"in",
"p",
".",
"directives",
".",
"values",
"(",
")",
":",
"try",
":",
"repo",
"=",
"directives",
"[",
"\"container\"",
"]",
"default_version",
"=",
"directives",
"[",
"\"version\"",
"]",
"except",
"KeyError",
":",
"# adds the default container if container key isn't present",
"# this happens for instance in integrity_coverage",
"repo",
"=",
"\"flowcraft/flowcraft_base\"",
"default_version",
"=",
"\"1.0.0-1\"",
"# checks if repo_version already exists in list of the",
"# containers for the current component being queried",
"repo_version",
"=",
"repo",
"+",
"default_version",
"if",
"repo_version",
"not",
"in",
"dict_of_parsed",
"[",
"template",
"]",
"[",
"\"container\"",
"]",
":",
"# make the request to docker hub",
"r",
"=",
"requests",
".",
"get",
"(",
"\"https://hub.docker.com/v2/repositories/{}/tags/\"",
".",
"format",
"(",
"repo",
")",
")",
"# checks the status code of the request, if it is 200 then",
"# parses docker hub entry, otherwise retrieve no tags but",
"# alerts the user",
"if",
"r",
".",
"status_code",
"!=",
"404",
":",
"# parse response content to dict and fetch results key",
"r_content",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"content",
")",
"[",
"\"results\"",
"]",
"for",
"version",
"in",
"r_content",
":",
"printed_version",
"=",
"(",
"version",
"[",
"\"name\"",
"]",
"+",
"\"*\"",
")",
"if",
"version",
"[",
"\"name\"",
"]",
"==",
"default_version",
"else",
"version",
"[",
"\"name\"",
"]",
"tags_list",
".",
"append",
"(",
"[",
"template",
",",
"repo",
",",
"printed_version",
"]",
")",
"else",
":",
"tags_list",
".",
"append",
"(",
"[",
"template",
",",
"repo",
",",
"\"No DockerHub tags\"",
"]",
")",
"dict_of_parsed",
"[",
"template",
"]",
"[",
"\"container\"",
"]",
".",
"append",
"(",
"repo_version",
")",
"# iterate through each entry in tags_list and print the list of tags",
"# for each component. Each entry (excluding the headers) contains",
"# 3 elements (component name, container and tag version)",
"for",
"x",
",",
"entry",
"in",
"enumerate",
"(",
"tags_list",
")",
":",
"# adds different color to the header in the first list and",
"# if row is pair add one color and if is even add another (different",
"# background)",
"color",
"=",
"\"blue_bold\"",
"if",
"x",
"<",
"3",
"else",
"(",
"\"white\"",
"if",
"x",
"%",
"2",
"!=",
"0",
"else",
"\"0;37;40m\"",
")",
"# generates a small list with the terminal width for each column,",
"# this will be given to string formatting as the 3, 4 and 5 element",
"final_width",
"=",
"[",
"int",
"(",
"terminal_width",
"/",
"4",
")",
",",
"int",
"(",
"terminal_width",
"/",
"2",
")",
",",
"int",
"(",
"terminal_width",
"/",
"4",
")",
"]",
"# writes the string to the stdout",
"sys",
".",
"stdout",
".",
"write",
"(",
"colored_print",
"(",
"\"\\n {0: <{3}} {1: ^{4}} {2: >{5}}\"",
".",
"format",
"(",
"*",
"entry",
",",
"*",
"final_width",
")",
",",
"color",
")",
")",
"# assures that the entire line gets the same color",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\n{0: >{1}}\\n\"",
".",
"format",
"(",
"\"(* = default)\"",
",",
"terminal_width",
"+",
"3",
")",
")"
] |
Export all dockerhub tags associated with each component given by
the -t flag.
|
[
"Export",
"all",
"dockerhub",
"tags",
"associated",
"with",
"each",
"component",
"given",
"by",
"the",
"-",
"t",
"flag",
"."
] |
python
|
test
|
googleads/googleads-python-lib
|
examples/adwords/v201809/migration/migrate_to_extension_settings.py
|
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/migration/migrate_to_extension_settings.py#L178-L228
|
def GetCampaignFeeds(client, feed, placeholder_type):
"""Get a list of Feed Item Ids used by a campaign via a given Campaign Feed.
Args:
client: an AdWordsClient instance.
feed: a Campaign Feed.
placeholder_type: the Placeholder Type.
Returns:
A list of Feed Item Ids.
"""
campaign_feed_service = client.GetService('CampaignFeedService', 'v201809')
campaign_feeds = []
more_pages = True
selector = {
'fields': ['CampaignId', 'MatchingFunction', 'PlaceholderTypes'],
'predicates': [
{
'field': 'Status',
'operator': 'EQUALS',
'values': ['ENABLED']
},
{
'field': 'FeedId',
'operator': 'EQUALS',
'values': [feed['id']]
},
{
'field': 'PlaceholderTypes',
'operator': 'CONTAINS_ANY',
'values': [placeholder_type]
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
while more_pages:
page = campaign_feed_service.get(selector)
if 'entries' in page:
campaign_feeds.extend(page['entries'])
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])
return campaign_feeds
|
[
"def",
"GetCampaignFeeds",
"(",
"client",
",",
"feed",
",",
"placeholder_type",
")",
":",
"campaign_feed_service",
"=",
"client",
".",
"GetService",
"(",
"'CampaignFeedService'",
",",
"'v201809'",
")",
"campaign_feeds",
"=",
"[",
"]",
"more_pages",
"=",
"True",
"selector",
"=",
"{",
"'fields'",
":",
"[",
"'CampaignId'",
",",
"'MatchingFunction'",
",",
"'PlaceholderTypes'",
"]",
",",
"'predicates'",
":",
"[",
"{",
"'field'",
":",
"'Status'",
",",
"'operator'",
":",
"'EQUALS'",
",",
"'values'",
":",
"[",
"'ENABLED'",
"]",
"}",
",",
"{",
"'field'",
":",
"'FeedId'",
",",
"'operator'",
":",
"'EQUALS'",
",",
"'values'",
":",
"[",
"feed",
"[",
"'id'",
"]",
"]",
"}",
",",
"{",
"'field'",
":",
"'PlaceholderTypes'",
",",
"'operator'",
":",
"'CONTAINS_ANY'",
",",
"'values'",
":",
"[",
"placeholder_type",
"]",
"}",
"]",
",",
"'paging'",
":",
"{",
"'startIndex'",
":",
"0",
",",
"'numberResults'",
":",
"PAGE_SIZE",
"}",
"}",
"while",
"more_pages",
":",
"page",
"=",
"campaign_feed_service",
".",
"get",
"(",
"selector",
")",
"if",
"'entries'",
"in",
"page",
":",
"campaign_feeds",
".",
"extend",
"(",
"page",
"[",
"'entries'",
"]",
")",
"selector",
"[",
"'paging'",
"]",
"[",
"'startIndex'",
"]",
"+=",
"PAGE_SIZE",
"more_pages",
"=",
"selector",
"[",
"'paging'",
"]",
"[",
"'startIndex'",
"]",
"<",
"int",
"(",
"page",
"[",
"'totalNumEntries'",
"]",
")",
"return",
"campaign_feeds"
] |
Get a list of Feed Item Ids used by a campaign via a given Campaign Feed.
Args:
client: an AdWordsClient instance.
feed: a Campaign Feed.
placeholder_type: the Placeholder Type.
Returns:
A list of Feed Item Ids.
|
[
"Get",
"a",
"list",
"of",
"Feed",
"Item",
"Ids",
"used",
"by",
"a",
"campaign",
"via",
"a",
"given",
"Campaign",
"Feed",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/rvm.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rvm.py#L410-L440
|
def gemset_list_all(runas=None):
'''
List all gemsets for all installed rubies.
Note that you must have set a default ruby before this can work.
runas
The user under which to run rvm. If not specified, then rvm will be run
as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rvm.gemset_list_all
'''
gemsets = {}
current_ruby = None
output = _rvm_do('default', ['rvm', 'gemset', 'list_all'], runas=runas)
if output:
gems_regex = re.compile('^ ([^ ]+)')
gemset_regex = re.compile('^gemsets for ([^ ]+)')
for line in output.splitlines():
match = gemset_regex.match(line)
if match:
current_ruby = match.group(1)
gemsets[current_ruby] = []
match = gems_regex.match(line)
if match:
gemsets[current_ruby].append(match.group(1))
return gemsets
|
[
"def",
"gemset_list_all",
"(",
"runas",
"=",
"None",
")",
":",
"gemsets",
"=",
"{",
"}",
"current_ruby",
"=",
"None",
"output",
"=",
"_rvm_do",
"(",
"'default'",
",",
"[",
"'rvm'",
",",
"'gemset'",
",",
"'list_all'",
"]",
",",
"runas",
"=",
"runas",
")",
"if",
"output",
":",
"gems_regex",
"=",
"re",
".",
"compile",
"(",
"'^ ([^ ]+)'",
")",
"gemset_regex",
"=",
"re",
".",
"compile",
"(",
"'^gemsets for ([^ ]+)'",
")",
"for",
"line",
"in",
"output",
".",
"splitlines",
"(",
")",
":",
"match",
"=",
"gemset_regex",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"current_ruby",
"=",
"match",
".",
"group",
"(",
"1",
")",
"gemsets",
"[",
"current_ruby",
"]",
"=",
"[",
"]",
"match",
"=",
"gems_regex",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"gemsets",
"[",
"current_ruby",
"]",
".",
"append",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"return",
"gemsets"
] |
List all gemsets for all installed rubies.
Note that you must have set a default ruby before this can work.
runas
The user under which to run rvm. If not specified, then rvm will be run
as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rvm.gemset_list_all
|
[
"List",
"all",
"gemsets",
"for",
"all",
"installed",
"rubies",
"."
] |
python
|
train
|
fracpete/python-weka-wrapper3
|
python/weka/core/packages.py
|
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/packages.py#L133-L148
|
def check_constraint(self, pkge=None, constr=None):
"""
Checks the constraints.
:param pkge: the package to check
:type pkge: Package
:param constr: the package constraint to check
:type constr: PackageConstraint
"""
if not pkge is None:
return javabridge.call(
self.jobject, "checkConstraint", "(Lweka/core/packageManagement/Package;)Z", pkge.jobject)
if not constr is None:
return javabridge.call(
self.jobject, "checkConstraint", "(Lweka/core/packageManagement/PackageConstraint;)Z", pkge.jobject)
raise Exception("Either package or package constraing must be provided!")
|
[
"def",
"check_constraint",
"(",
"self",
",",
"pkge",
"=",
"None",
",",
"constr",
"=",
"None",
")",
":",
"if",
"not",
"pkge",
"is",
"None",
":",
"return",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"checkConstraint\"",
",",
"\"(Lweka/core/packageManagement/Package;)Z\"",
",",
"pkge",
".",
"jobject",
")",
"if",
"not",
"constr",
"is",
"None",
":",
"return",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"checkConstraint\"",
",",
"\"(Lweka/core/packageManagement/PackageConstraint;)Z\"",
",",
"pkge",
".",
"jobject",
")",
"raise",
"Exception",
"(",
"\"Either package or package constraing must be provided!\"",
")"
] |
Checks the constraints.
:param pkge: the package to check
:type pkge: Package
:param constr: the package constraint to check
:type constr: PackageConstraint
|
[
"Checks",
"the",
"constraints",
"."
] |
python
|
train
|
python-cmd2/cmd2
|
examples/async_printing.py
|
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/examples/async_printing.py#L118-L141
|
def _generate_alert_str(self) -> str:
"""
Combines alerts into one string that can be printed to the terminal
:return: the alert string
"""
global ALERTS
alert_str = ''
alerts = self._get_alerts()
longest_alert = max(ALERTS, key=len)
num_asterisks = len(longest_alert) + 8
for i, cur_alert in enumerate(alerts):
# Use padding to center the alert
padding = ' ' * int((num_asterisks - len(cur_alert)) / 2)
if i > 0:
alert_str += '\n'
alert_str += '*' * num_asterisks + '\n'
alert_str += padding + cur_alert + padding + '\n'
alert_str += '*' * num_asterisks + '\n'
return alert_str
|
[
"def",
"_generate_alert_str",
"(",
"self",
")",
"->",
"str",
":",
"global",
"ALERTS",
"alert_str",
"=",
"''",
"alerts",
"=",
"self",
".",
"_get_alerts",
"(",
")",
"longest_alert",
"=",
"max",
"(",
"ALERTS",
",",
"key",
"=",
"len",
")",
"num_asterisks",
"=",
"len",
"(",
"longest_alert",
")",
"+",
"8",
"for",
"i",
",",
"cur_alert",
"in",
"enumerate",
"(",
"alerts",
")",
":",
"# Use padding to center the alert",
"padding",
"=",
"' '",
"*",
"int",
"(",
"(",
"num_asterisks",
"-",
"len",
"(",
"cur_alert",
")",
")",
"/",
"2",
")",
"if",
"i",
">",
"0",
":",
"alert_str",
"+=",
"'\\n'",
"alert_str",
"+=",
"'*'",
"*",
"num_asterisks",
"+",
"'\\n'",
"alert_str",
"+=",
"padding",
"+",
"cur_alert",
"+",
"padding",
"+",
"'\\n'",
"alert_str",
"+=",
"'*'",
"*",
"num_asterisks",
"+",
"'\\n'",
"return",
"alert_str"
] |
Combines alerts into one string that can be printed to the terminal
:return: the alert string
|
[
"Combines",
"alerts",
"into",
"one",
"string",
"that",
"can",
"be",
"printed",
"to",
"the",
"terminal",
":",
"return",
":",
"the",
"alert",
"string"
] |
python
|
train
|
intel-analytics/BigDL
|
pyspark/bigdl/util/tf_utils.py
|
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/util/tf_utils.py#L167-L202
|
def merge_checkpoint(input_graph,
checkpoint,
output_node_names,
output_graph,
sess):
"""
Get the variable values from the checkpoint file, and merge them to the GraphDef file
Args:
input_graph: the GraphDef file, doesn't contain variable values
checkpoint: the checkpoint file
output_node_names: A list of string, the output names
output_graph: String of the location and the name of the
output graph
"""
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
input_graph_def = graph_pb2.GraphDef()
with gfile.FastGFile(input_graph, "r") as f:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(input_graph_def, name="")
sess.run([restore_op_name], {filename_tensor_name: checkpoint})
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names,
variable_names_blacklist=""
)
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
|
[
"def",
"merge_checkpoint",
"(",
"input_graph",
",",
"checkpoint",
",",
"output_node_names",
",",
"output_graph",
",",
"sess",
")",
":",
"restore_op_name",
"=",
"\"save/restore_all\"",
"filename_tensor_name",
"=",
"\"save/Const:0\"",
"input_graph_def",
"=",
"graph_pb2",
".",
"GraphDef",
"(",
")",
"with",
"gfile",
".",
"FastGFile",
"(",
"input_graph",
",",
"\"r\"",
")",
"as",
"f",
":",
"text_format",
".",
"Merge",
"(",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
",",
"input_graph_def",
")",
"for",
"node",
"in",
"input_graph_def",
".",
"node",
":",
"node",
".",
"device",
"=",
"\"\"",
"importer",
".",
"import_graph_def",
"(",
"input_graph_def",
",",
"name",
"=",
"\"\"",
")",
"sess",
".",
"run",
"(",
"[",
"restore_op_name",
"]",
",",
"{",
"filename_tensor_name",
":",
"checkpoint",
"}",
")",
"output_graph_def",
"=",
"graph_util",
".",
"convert_variables_to_constants",
"(",
"sess",
",",
"input_graph_def",
",",
"output_node_names",
",",
"variable_names_blacklist",
"=",
"\"\"",
")",
"with",
"gfile",
".",
"GFile",
"(",
"output_graph",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"output_graph_def",
".",
"SerializeToString",
"(",
")",
")"
] |
Get the variable values from the checkpoint file, and merge them to the GraphDef file
Args:
input_graph: the GraphDef file, doesn't contain variable values
checkpoint: the checkpoint file
output_node_names: A list of string, the output names
output_graph: String of the location and the name of the
output graph
|
[
"Get",
"the",
"variable",
"values",
"from",
"the",
"checkpoint",
"file",
"and",
"merge",
"them",
"to",
"the",
"GraphDef",
"file",
"Args",
":",
"input_graph",
":",
"the",
"GraphDef",
"file",
"doesn",
"t",
"contain",
"variable",
"values",
"checkpoint",
":",
"the",
"checkpoint",
"file",
"output_node_names",
":",
"A",
"list",
"of",
"string",
"the",
"output",
"names",
"output_graph",
":",
"String",
"of",
"the",
"location",
"and",
"the",
"name",
"of",
"the",
"output",
"graph"
] |
python
|
test
|
Esri/ArcREST
|
src/arcrest/manageags/_services.py
|
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_services.py#L524-L552
|
def exists(self, folderName, serviceName=None, serviceType=None):
"""
This operation allows you to check whether a folder or a service
exists. To test if a folder exists, supply only a folderName. To
test if a service exists in a root folder, supply both serviceName
and serviceType with folderName=None. To test if a service exists
in a folder, supply all three parameters.
Inputs:
folderName - a folder name
serviceName - a service name
serviceType - a service type. Allowed values:
"GPSERVER", "GLOBESERVER", "MAPSERVER",
"GEOMETRYSERVER", "IMAGESERVER", "SEARCHSERVER",
"GEODATASERVER", "GEOCODESERVER"
"""
url = self._url + "/exists"
params = {
"f" : "json",
"folderName" : folderName,
"serviceName" : serviceName,
"type" : serviceType
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
[
"def",
"exists",
"(",
"self",
",",
"folderName",
",",
"serviceName",
"=",
"None",
",",
"serviceType",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/exists\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"folderName\"",
":",
"folderName",
",",
"\"serviceName\"",
":",
"serviceName",
",",
"\"type\"",
":",
"serviceType",
"}",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] |
This operation allows you to check whether a folder or a service
exists. To test if a folder exists, supply only a folderName. To
test if a service exists in a root folder, supply both serviceName
and serviceType with folderName=None. To test if a service exists
in a folder, supply all three parameters.
Inputs:
folderName - a folder name
serviceName - a service name
serviceType - a service type. Allowed values:
"GPSERVER", "GLOBESERVER", "MAPSERVER",
"GEOMETRYSERVER", "IMAGESERVER", "SEARCHSERVER",
"GEODATASERVER", "GEOCODESERVER"
|
[
"This",
"operation",
"allows",
"you",
"to",
"check",
"whether",
"a",
"folder",
"or",
"a",
"service",
"exists",
".",
"To",
"test",
"if",
"a",
"folder",
"exists",
"supply",
"only",
"a",
"folderName",
".",
"To",
"test",
"if",
"a",
"service",
"exists",
"in",
"a",
"root",
"folder",
"supply",
"both",
"serviceName",
"and",
"serviceType",
"with",
"folderName",
"=",
"None",
".",
"To",
"test",
"if",
"a",
"service",
"exists",
"in",
"a",
"folder",
"supply",
"all",
"three",
"parameters",
"."
] |
python
|
train
|
architv/soccer-cli
|
soccer/writers.py
|
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L230-L242
|
def team_scores(self, team_scores, time):
"""Store output of team scores to a CSV file"""
headers = ['Date', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
result.extend([score["utcDate"].split('T')[0],
score['homeTeam']['name'],
score['score']['fullTime']['homeTeam'],
score['score']['fullTime']['awayTeam'],
score['awayTeam']['name']]
for score in team_scores['matches']
if score['status'] == 'FINISHED')
self.generate_output(result)
|
[
"def",
"team_scores",
"(",
"self",
",",
"team_scores",
",",
"time",
")",
":",
"headers",
"=",
"[",
"'Date'",
",",
"'Home Team Name'",
",",
"'Home Team Goals'",
",",
"'Away Team Goals'",
",",
"'Away Team Name'",
"]",
"result",
"=",
"[",
"headers",
"]",
"result",
".",
"extend",
"(",
"[",
"score",
"[",
"\"utcDate\"",
"]",
".",
"split",
"(",
"'T'",
")",
"[",
"0",
"]",
",",
"score",
"[",
"'homeTeam'",
"]",
"[",
"'name'",
"]",
",",
"score",
"[",
"'score'",
"]",
"[",
"'fullTime'",
"]",
"[",
"'homeTeam'",
"]",
",",
"score",
"[",
"'score'",
"]",
"[",
"'fullTime'",
"]",
"[",
"'awayTeam'",
"]",
",",
"score",
"[",
"'awayTeam'",
"]",
"[",
"'name'",
"]",
"]",
"for",
"score",
"in",
"team_scores",
"[",
"'matches'",
"]",
"if",
"score",
"[",
"'status'",
"]",
"==",
"'FINISHED'",
")",
"self",
".",
"generate_output",
"(",
"result",
")"
] |
Store output of team scores to a CSV file
|
[
"Store",
"output",
"of",
"team",
"scores",
"to",
"a",
"CSV",
"file"
] |
python
|
train
|
mozilla-iot/webthing-python
|
webthing/server.py
|
https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/server.py#L165-L171
|
def prepare(self):
"""Validate Host header."""
host = self.request.headers.get('Host', None)
if host is not None and host in self.hosts:
return
raise tornado.web.HTTPError(403)
|
[
"def",
"prepare",
"(",
"self",
")",
":",
"host",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"'Host'",
",",
"None",
")",
"if",
"host",
"is",
"not",
"None",
"and",
"host",
"in",
"self",
".",
"hosts",
":",
"return",
"raise",
"tornado",
".",
"web",
".",
"HTTPError",
"(",
"403",
")"
] |
Validate Host header.
|
[
"Validate",
"Host",
"header",
"."
] |
python
|
test
|
cloudbase/python-hnvclient
|
hnv/client.py
|
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L139-L145
|
def _get_client():
"""Create a new client for the HNV REST API."""
return utils.get_client(url=CONFIG.HNV.url,
username=CONFIG.HNV.username,
password=CONFIG.HNV.password,
allow_insecure=CONFIG.HNV.https_allow_insecure,
ca_bundle=CONFIG.HNV.https_ca_bundle)
|
[
"def",
"_get_client",
"(",
")",
":",
"return",
"utils",
".",
"get_client",
"(",
"url",
"=",
"CONFIG",
".",
"HNV",
".",
"url",
",",
"username",
"=",
"CONFIG",
".",
"HNV",
".",
"username",
",",
"password",
"=",
"CONFIG",
".",
"HNV",
".",
"password",
",",
"allow_insecure",
"=",
"CONFIG",
".",
"HNV",
".",
"https_allow_insecure",
",",
"ca_bundle",
"=",
"CONFIG",
".",
"HNV",
".",
"https_ca_bundle",
")"
] |
Create a new client for the HNV REST API.
|
[
"Create",
"a",
"new",
"client",
"for",
"the",
"HNV",
"REST",
"API",
"."
] |
python
|
train
|
frictionlessdata/tableschema-sql-py
|
tableschema_sql/mapper.py
|
https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/mapper.py#L159-L164
|
def restore_bucket(self, table_name):
"""Restore bucket from SQL
"""
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None
|
[
"def",
"restore_bucket",
"(",
"self",
",",
"table_name",
")",
":",
"if",
"table_name",
".",
"startswith",
"(",
"self",
".",
"__prefix",
")",
":",
"return",
"table_name",
".",
"replace",
"(",
"self",
".",
"__prefix",
",",
"''",
",",
"1",
")",
"return",
"None"
] |
Restore bucket from SQL
|
[
"Restore",
"bucket",
"from",
"SQL"
] |
python
|
train
|
Julian/jsonschema
|
jsonschema/validators.py
|
https://github.com/Julian/jsonschema/blob/a72332004cdc3ba456de7918bc32059822b2f69a/jsonschema/validators.py#L367-L437
|
def extend(validator, validators=(), version=None, type_checker=None):
"""
Create a new validator class by extending an existing one.
Arguments:
validator (jsonschema.IValidator):
an existing validator class
validators (collections.Mapping):
a mapping of new validator callables to extend with, whose
structure is as in `create`.
.. note::
Any validator callables with the same name as an existing one
will (silently) replace the old validator callable entirely,
effectively overriding any validation done in the "parent"
validator class.
If you wish to instead extend the behavior of a parent's
validator callable, delegate and call it directly in the new
validator function by retrieving it using
``OldValidator.VALIDATORS["validator_name"]``.
version (str):
a version for the new validator class
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, the type checker of the extended
`jsonschema.IValidator` will be carried along.`
Returns:
a new `jsonschema.IValidator` class extending the one provided
.. note:: Meta Schemas
The new validator class will have its parent's meta schema.
If you wish to change or extend the meta schema in the new
validator class, modify ``META_SCHEMA`` directly on the returned
class. Note that no implicit copying is done, so a copy should
likely be made before modifying it, in order to not affect the
old validator.
"""
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
if type_checker is None:
type_checker = validator.TYPE_CHECKER
elif validator._CREATED_WITH_DEFAULT_TYPES:
raise TypeError(
"Cannot extend a validator created with default_types "
"with a type_checker. Update the validator to use a "
"type_checker when created."
)
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
type_checker=type_checker,
id_of=validator.ID_OF,
)
|
[
"def",
"extend",
"(",
"validator",
",",
"validators",
"=",
"(",
")",
",",
"version",
"=",
"None",
",",
"type_checker",
"=",
"None",
")",
":",
"all_validators",
"=",
"dict",
"(",
"validator",
".",
"VALIDATORS",
")",
"all_validators",
".",
"update",
"(",
"validators",
")",
"if",
"type_checker",
"is",
"None",
":",
"type_checker",
"=",
"validator",
".",
"TYPE_CHECKER",
"elif",
"validator",
".",
"_CREATED_WITH_DEFAULT_TYPES",
":",
"raise",
"TypeError",
"(",
"\"Cannot extend a validator created with default_types \"",
"\"with a type_checker. Update the validator to use a \"",
"\"type_checker when created.\"",
")",
"return",
"create",
"(",
"meta_schema",
"=",
"validator",
".",
"META_SCHEMA",
",",
"validators",
"=",
"all_validators",
",",
"version",
"=",
"version",
",",
"type_checker",
"=",
"type_checker",
",",
"id_of",
"=",
"validator",
".",
"ID_OF",
",",
")"
] |
Create a new validator class by extending an existing one.
Arguments:
validator (jsonschema.IValidator):
an existing validator class
validators (collections.Mapping):
a mapping of new validator callables to extend with, whose
structure is as in `create`.
.. note::
Any validator callables with the same name as an existing one
will (silently) replace the old validator callable entirely,
effectively overriding any validation done in the "parent"
validator class.
If you wish to instead extend the behavior of a parent's
validator callable, delegate and call it directly in the new
validator function by retrieving it using
``OldValidator.VALIDATORS["validator_name"]``.
version (str):
a version for the new validator class
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, the type checker of the extended
`jsonschema.IValidator` will be carried along.`
Returns:
a new `jsonschema.IValidator` class extending the one provided
.. note:: Meta Schemas
The new validator class will have its parent's meta schema.
If you wish to change or extend the meta schema in the new
validator class, modify ``META_SCHEMA`` directly on the returned
class. Note that no implicit copying is done, so a copy should
likely be made before modifying it, in order to not affect the
old validator.
|
[
"Create",
"a",
"new",
"validator",
"class",
"by",
"extending",
"an",
"existing",
"one",
"."
] |
python
|
train
|
apache/spark
|
python/pyspark/mllib/clustering.py
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L625-L632
|
def load(cls, sc, path):
"""
Load a model from the given path.
"""
model = cls._load_java(sc, path)
wrapper =\
sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model)
return PowerIterationClusteringModel(wrapper)
|
[
"def",
"load",
"(",
"cls",
",",
"sc",
",",
"path",
")",
":",
"model",
"=",
"cls",
".",
"_load_java",
"(",
"sc",
",",
"path",
")",
"wrapper",
"=",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"mllib",
".",
"api",
".",
"python",
".",
"PowerIterationClusteringModelWrapper",
"(",
"model",
")",
"return",
"PowerIterationClusteringModel",
"(",
"wrapper",
")"
] |
Load a model from the given path.
|
[
"Load",
"a",
"model",
"from",
"the",
"given",
"path",
"."
] |
python
|
train
|
quaddra/provision
|
provision/config.py
|
https://github.com/quaddra/provision/blob/d84dca80abb34ed93381aae4d5b8005bd08a5681/provision/config.py#L110-L114
|
def is_node_destroyable(name, prefixes=DESTROYABLE_PREFIXES):
"""Return True if name starts with a destroyable prefix"""
return any([name.startswith(p) for p in prefixes])
|
[
"def",
"is_node_destroyable",
"(",
"name",
",",
"prefixes",
"=",
"DESTROYABLE_PREFIXES",
")",
":",
"return",
"any",
"(",
"[",
"name",
".",
"startswith",
"(",
"p",
")",
"for",
"p",
"in",
"prefixes",
"]",
")"
] |
Return True if name starts with a destroyable prefix
|
[
"Return",
"True",
"if",
"name",
"starts",
"with",
"a",
"destroyable",
"prefix"
] |
python
|
train
|
annoviko/pyclustering
|
pyclustering/cluster/clarans.py
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/clarans.py#L163-L233
|
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
while (index_neighbor < self.__maxneighbor):
# get random current medoid that is to be replaced
current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)
other_medoid_cluster_index = self.__belong[other_medoid_index]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])
# from the point to candidate median
distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index])
# from the point to nearest (own) medoid
distance_nearest = float('inf')
if ( (point_medoid_index != candidate_medoid_index) and (point_medoid_index != current_medoid_cluster_index) ):
distance_nearest = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[point_medoid_index])
# apply rules for cost calculation
if (point_cluster_index == current_medoid_cluster_index):
# case 1:
if (distance_candidate >= distance_nearest):
candidate_cost += distance_nearest - distance_current
# case 2:
else:
candidate_cost += distance_candidate - distance_current
elif (point_cluster_index == other_medoid_cluster_index):
# case 3 ('nearest medoid' is the representative object of that cluster and object is more similar to 'nearest' than to 'candidate'):
if (distance_candidate > distance_nearest):
pass;
# case 4:
else:
candidate_cost += distance_candidate - distance_nearest
if (candidate_cost < 0):
# set candidate that has won
self.__current[current_medoid_cluster_index] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
|
[
"def",
"__optimize_configuration",
"(",
"self",
")",
":",
"index_neighbor",
"=",
"0",
"while",
"(",
"index_neighbor",
"<",
"self",
".",
"__maxneighbor",
")",
":",
"# get random current medoid that is to be replaced\r",
"current_medoid_index",
"=",
"self",
".",
"__current",
"[",
"random",
".",
"randint",
"(",
"0",
",",
"self",
".",
"__number_clusters",
"-",
"1",
")",
"]",
"current_medoid_cluster_index",
"=",
"self",
".",
"__belong",
"[",
"current_medoid_index",
"]",
"# get new candidate to be medoid\r",
"candidate_medoid_index",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"__pointer_data",
")",
"-",
"1",
")",
"while",
"candidate_medoid_index",
"in",
"self",
".",
"__current",
":",
"candidate_medoid_index",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"__pointer_data",
")",
"-",
"1",
")",
"candidate_cost",
"=",
"0.0",
"for",
"point_index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"__pointer_data",
")",
")",
":",
"if",
"point_index",
"not",
"in",
"self",
".",
"__current",
":",
"# get non-medoid point and its medoid\r",
"point_cluster_index",
"=",
"self",
".",
"__belong",
"[",
"point_index",
"]",
"point_medoid_index",
"=",
"self",
".",
"__current",
"[",
"point_cluster_index",
"]",
"# get other medoid that is nearest to the point (except current and candidate)\r",
"other_medoid_index",
"=",
"self",
".",
"__find_another_nearest_medoid",
"(",
"point_index",
",",
"current_medoid_index",
")",
"other_medoid_cluster_index",
"=",
"self",
".",
"__belong",
"[",
"other_medoid_index",
"]",
"# for optimization calculate all required distances\r",
"# from the point to current medoid\r",
"distance_current",
"=",
"euclidean_distance_square",
"(",
"self",
".",
"__pointer_data",
"[",
"point_index",
"]",
",",
"self",
".",
"__pointer_data",
"[",
"current_medoid_index",
"]",
")",
"# from the point to candidate median\r",
"distance_candidate",
"=",
"euclidean_distance_square",
"(",
"self",
".",
"__pointer_data",
"[",
"point_index",
"]",
",",
"self",
".",
"__pointer_data",
"[",
"candidate_medoid_index",
"]",
")",
"# from the point to nearest (own) medoid\r",
"distance_nearest",
"=",
"float",
"(",
"'inf'",
")",
"if",
"(",
"(",
"point_medoid_index",
"!=",
"candidate_medoid_index",
")",
"and",
"(",
"point_medoid_index",
"!=",
"current_medoid_cluster_index",
")",
")",
":",
"distance_nearest",
"=",
"euclidean_distance_square",
"(",
"self",
".",
"__pointer_data",
"[",
"point_index",
"]",
",",
"self",
".",
"__pointer_data",
"[",
"point_medoid_index",
"]",
")",
"# apply rules for cost calculation\r",
"if",
"(",
"point_cluster_index",
"==",
"current_medoid_cluster_index",
")",
":",
"# case 1:\r",
"if",
"(",
"distance_candidate",
">=",
"distance_nearest",
")",
":",
"candidate_cost",
"+=",
"distance_nearest",
"-",
"distance_current",
"# case 2:\r",
"else",
":",
"candidate_cost",
"+=",
"distance_candidate",
"-",
"distance_current",
"elif",
"(",
"point_cluster_index",
"==",
"other_medoid_cluster_index",
")",
":",
"# case 3 ('nearest medoid' is the representative object of that cluster and object is more similar to 'nearest' than to 'candidate'):\r",
"if",
"(",
"distance_candidate",
">",
"distance_nearest",
")",
":",
"pass",
"# case 4:\r",
"else",
":",
"candidate_cost",
"+=",
"distance_candidate",
"-",
"distance_nearest",
"if",
"(",
"candidate_cost",
"<",
"0",
")",
":",
"# set candidate that has won\r",
"self",
".",
"__current",
"[",
"current_medoid_cluster_index",
"]",
"=",
"candidate_medoid_index",
"# recalculate clusters\r",
"self",
".",
"__update_clusters",
"(",
"self",
".",
"__current",
")",
"# reset iterations and starts investigation from the begining\r",
"index_neighbor",
"=",
"0",
"else",
":",
"index_neighbor",
"+=",
"1"
] |
!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
|
[
"!"
] |
python
|
valid
|
cjhutto/vaderSentiment
|
additional_resources/build_emoji_lexicon.py
|
https://github.com/cjhutto/vaderSentiment/blob/cfc2bce747afb2c49799c1de1dcf517358948d71/additional_resources/build_emoji_lexicon.py#L7-L11
|
def get_list_from_file(file_name):
"""read the lines from a file into a list"""
with open(file_name, mode='r', encoding='utf-8') as f1:
lst = f1.readlines()
return lst
|
[
"def",
"get_list_from_file",
"(",
"file_name",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f1",
":",
"lst",
"=",
"f1",
".",
"readlines",
"(",
")",
"return",
"lst"
] |
read the lines from a file into a list
|
[
"read",
"the",
"lines",
"from",
"a",
"file",
"into",
"a",
"list"
] |
python
|
train
|
fermiPy/fermipy
|
fermipy/scripts/cluster_sources.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/cluster_sources.py#L34-L51
|
def make_cos_vects(lon_vect, lat_vect):
""" Convert from longitude (RA or GLON) and latitude (DEC or GLAT) values to directional cosines
Parameters
----------
lon_vect,lat_vect : np.ndarray(nsrc)
Input values
returns (np.ndarray(3,nsrc)) with the directional cosine (i.e., x,y,z component) values
"""
lon_rad = np.radians(lon_vect)
lat_rad = np.radians(lat_vect)
cvals = np.cos(lat_rad)
xvals = cvals * np.sin(lon_rad)
yvals = cvals * np.cos(lon_rad)
zvals = np.sin(lat_rad)
cvects = np.vstack([xvals, yvals, zvals])
return cvects
|
[
"def",
"make_cos_vects",
"(",
"lon_vect",
",",
"lat_vect",
")",
":",
"lon_rad",
"=",
"np",
".",
"radians",
"(",
"lon_vect",
")",
"lat_rad",
"=",
"np",
".",
"radians",
"(",
"lat_vect",
")",
"cvals",
"=",
"np",
".",
"cos",
"(",
"lat_rad",
")",
"xvals",
"=",
"cvals",
"*",
"np",
".",
"sin",
"(",
"lon_rad",
")",
"yvals",
"=",
"cvals",
"*",
"np",
".",
"cos",
"(",
"lon_rad",
")",
"zvals",
"=",
"np",
".",
"sin",
"(",
"lat_rad",
")",
"cvects",
"=",
"np",
".",
"vstack",
"(",
"[",
"xvals",
",",
"yvals",
",",
"zvals",
"]",
")",
"return",
"cvects"
] |
Convert from longitude (RA or GLON) and latitude (DEC or GLAT) values to directional cosines
Parameters
----------
lon_vect,lat_vect : np.ndarray(nsrc)
Input values
returns (np.ndarray(3,nsrc)) with the directional cosine (i.e., x,y,z component) values
|
[
"Convert",
"from",
"longitude",
"(",
"RA",
"or",
"GLON",
")",
"and",
"latitude",
"(",
"DEC",
"or",
"GLAT",
")",
"values",
"to",
"directional",
"cosines"
] |
python
|
train
|
prthkms/alex
|
alex/handler.py
|
https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L18-L26
|
def words(query):
"""lines(query) -- print the number of words in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.read().split())
else:
print 'File not found : ' + filename
|
[
"def",
"words",
"(",
"query",
")",
":",
"filename",
"=",
"support",
".",
"get_file_name",
"(",
"query",
")",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"openfile",
":",
"print",
"len",
"(",
"openfile",
".",
"read",
"(",
")",
".",
"split",
"(",
")",
")",
"else",
":",
"print",
"'File not found : '",
"+",
"filename"
] |
lines(query) -- print the number of words in a given file
|
[
"lines",
"(",
"query",
")",
"--",
"print",
"the",
"number",
"of",
"words",
"in",
"a",
"given",
"file"
] |
python
|
train
|
estnltk/estnltk
|
estnltk/text.py
|
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L696-L700
|
def root_tokens(self):
"""Root tokens of word roots."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT_TOKENS)
|
[
"def",
"root_tokens",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_tagged",
"(",
"ANALYSIS",
")",
":",
"self",
".",
"tag_analysis",
"(",
")",
"return",
"self",
".",
"get_analysis_element",
"(",
"ROOT_TOKENS",
")"
] |
Root tokens of word roots.
|
[
"Root",
"tokens",
"of",
"word",
"roots",
"."
] |
python
|
train
|
ib-lundgren/flask-oauthprovider
|
examples/mongo_demoprovider/login.py
|
https://github.com/ib-lundgren/flask-oauthprovider/blob/6c91e8c11fc3cee410cb755d52d9d2c5331ee324/examples/mongo_demoprovider/login.py#L86-L109
|
def edit_profile():
"""Updates a profile"""
if g.user is None:
abort(401)
form = dict(name=g.user.name, email=g.user.email)
if request.method == 'POST':
if 'delete' in request.form:
User.get_collection().remove(g.user)
session['openid'] = None
flash(u'Profile deleted')
return redirect(url_for('index'))
form['name'] = request.form['name']
form['email'] = request.form['email']
if not form['name']:
flash(u'Error: you have to provide a name')
elif '@' not in form['email']:
flash(u'Error: you have to enter a valid email address')
else:
flash(u'Profile successfully created')
g.user.name = form['name']
g.user.email = form['email']
uid = User.get_collection().save(g.user)
return redirect(url_for('edit_profile'))
return render_template('edit_profile.html', form=form)
|
[
"def",
"edit_profile",
"(",
")",
":",
"if",
"g",
".",
"user",
"is",
"None",
":",
"abort",
"(",
"401",
")",
"form",
"=",
"dict",
"(",
"name",
"=",
"g",
".",
"user",
".",
"name",
",",
"email",
"=",
"g",
".",
"user",
".",
"email",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"if",
"'delete'",
"in",
"request",
".",
"form",
":",
"User",
".",
"get_collection",
"(",
")",
".",
"remove",
"(",
"g",
".",
"user",
")",
"session",
"[",
"'openid'",
"]",
"=",
"None",
"flash",
"(",
"u'Profile deleted'",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'index'",
")",
")",
"form",
"[",
"'name'",
"]",
"=",
"request",
".",
"form",
"[",
"'name'",
"]",
"form",
"[",
"'email'",
"]",
"=",
"request",
".",
"form",
"[",
"'email'",
"]",
"if",
"not",
"form",
"[",
"'name'",
"]",
":",
"flash",
"(",
"u'Error: you have to provide a name'",
")",
"elif",
"'@'",
"not",
"in",
"form",
"[",
"'email'",
"]",
":",
"flash",
"(",
"u'Error: you have to enter a valid email address'",
")",
"else",
":",
"flash",
"(",
"u'Profile successfully created'",
")",
"g",
".",
"user",
".",
"name",
"=",
"form",
"[",
"'name'",
"]",
"g",
".",
"user",
".",
"email",
"=",
"form",
"[",
"'email'",
"]",
"uid",
"=",
"User",
".",
"get_collection",
"(",
")",
".",
"save",
"(",
"g",
".",
"user",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'edit_profile'",
")",
")",
"return",
"render_template",
"(",
"'edit_profile.html'",
",",
"form",
"=",
"form",
")"
] |
Updates a profile
|
[
"Updates",
"a",
"profile"
] |
python
|
train
|
PmagPy/PmagPy
|
programs/incfish.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/incfish.py#L8-L63
|
def main():
"""
NAME
incfish.py
DESCRIPTION
calculates fisher parameters from inc only data
INPUT FORMAT
takes inc data
SYNTAX
incfish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file name
-F FILE, specify output file name
< filename for reading from standard input
OUTPUT
mean inc,Fisher inc, N, R, k, a95
NOTES
takes the absolute value of inclinations (to take into account reversals),
but returns gaussian mean if < 50.0, because of polarity ambiguity and
lack of bias.
"""
inc=[]
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-i' in sys.argv: # ask for filename
file=input("Enter file name with inc data: ")
inc=numpy.loadtxt(file)
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
inc=numpy.loadtxt(file)
else:
inc = numpy.loadtxt(sys.stdin,dtype=numpy.float)
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
#
#get doincfish to do the dirty work:
fpars= pmag.doincfish(inc)
outstring='%7.1f %7.1f %i %8.1f %7.1f %7.1f'%(fpars['ginc'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'])
if ofile == "":
print(outstring)
else:
out.write(outstring+'\n')
|
[
"def",
"main",
"(",
")",
":",
"inc",
"=",
"[",
"]",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"# check if help is needed",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"# graceful quit",
"if",
"'-i'",
"in",
"sys",
".",
"argv",
":",
"# ask for filename",
"file",
"=",
"input",
"(",
"\"Enter file name with inc data: \"",
")",
"inc",
"=",
"numpy",
".",
"loadtxt",
"(",
"file",
")",
"elif",
"'-f'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-f'",
")",
"file",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"inc",
"=",
"numpy",
".",
"loadtxt",
"(",
"file",
")",
"else",
":",
"inc",
"=",
"numpy",
".",
"loadtxt",
"(",
"sys",
".",
"stdin",
",",
"dtype",
"=",
"numpy",
".",
"float",
")",
"ofile",
"=",
"\"\"",
"if",
"'-F'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-F'",
")",
"ofile",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"out",
"=",
"open",
"(",
"ofile",
",",
"'w + a'",
")",
"#",
"#get doincfish to do the dirty work:",
"fpars",
"=",
"pmag",
".",
"doincfish",
"(",
"inc",
")",
"outstring",
"=",
"'%7.1f %7.1f %i %8.1f %7.1f %7.1f'",
"%",
"(",
"fpars",
"[",
"'ginc'",
"]",
",",
"fpars",
"[",
"'inc'",
"]",
",",
"fpars",
"[",
"'n'",
"]",
",",
"fpars",
"[",
"'r'",
"]",
",",
"fpars",
"[",
"'k'",
"]",
",",
"fpars",
"[",
"'alpha95'",
"]",
")",
"if",
"ofile",
"==",
"\"\"",
":",
"print",
"(",
"outstring",
")",
"else",
":",
"out",
".",
"write",
"(",
"outstring",
"+",
"'\\n'",
")"
] |
NAME
incfish.py
DESCRIPTION
calculates fisher parameters from inc only data
INPUT FORMAT
takes inc data
SYNTAX
incfish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file name
-F FILE, specify output file name
< filename for reading from standard input
OUTPUT
mean inc,Fisher inc, N, R, k, a95
NOTES
takes the absolute value of inclinations (to take into account reversals),
but returns gaussian mean if < 50.0, because of polarity ambiguity and
lack of bias.
|
[
"NAME",
"incfish",
".",
"py"
] |
python
|
train
|
google/openhtf
|
openhtf/plugs/usb/fastboot_protocol.py
|
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/fastboot_protocol.py#L276-L288
|
def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Flashes the last downloaded file to the given partition.
Args:
partition: Partition to flash.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing.
"""
return self._simple_command('flash', arg=partition, info_cb=info_cb,
timeout_ms=timeout_ms)
|
[
"def",
"flash",
"(",
"self",
",",
"partition",
",",
"timeout_ms",
"=",
"None",
",",
"info_cb",
"=",
"DEFAULT_MESSAGE_CALLBACK",
")",
":",
"return",
"self",
".",
"_simple_command",
"(",
"'flash'",
",",
"arg",
"=",
"partition",
",",
"info_cb",
"=",
"info_cb",
",",
"timeout_ms",
"=",
"timeout_ms",
")"
] |
Flashes the last downloaded file to the given partition.
Args:
partition: Partition to flash.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing.
|
[
"Flashes",
"the",
"last",
"downloaded",
"file",
"to",
"the",
"given",
"partition",
"."
] |
python
|
train
|
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py#L302-L307
|
def get_all_sources(self):
"""Returns all sources for all batches of this Executor."""
result = []
for batch in self.batches:
result.extend(batch.sources)
return result
|
[
"def",
"get_all_sources",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"for",
"batch",
"in",
"self",
".",
"batches",
":",
"result",
".",
"extend",
"(",
"batch",
".",
"sources",
")",
"return",
"result"
] |
Returns all sources for all batches of this Executor.
|
[
"Returns",
"all",
"sources",
"for",
"all",
"batches",
"of",
"this",
"Executor",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/github.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/github.py#L1870-L1959
|
def _query(profile,
action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.github.com/',
per_page=None):
'''
Make a web call to the GitHub API and deal with paginated results.
'''
if not isinstance(args, dict):
args = {}
if action:
url += action
if command:
url += '/{0}'.format(command)
log.debug('GitHub URL: %s', url)
if 'access_token' not in args.keys():
args['access_token'] = _get_config_value(profile, 'token')
if per_page and 'per_page' not in args.keys():
args['per_page'] = per_page
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
# GitHub paginates all queries when returning many items.
# Gather all data using multiple queries and handle pagination.
complete_result = []
next_page = True
page_number = ''
while next_page is True:
if page_number:
args['page'] = page_number
result = salt.utils.http.query(url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
headers=True,
status=True,
text=True,
hide_fields=['access_token'],
opts=__opts__,
)
log.debug('GitHub Response Status Code: %s',
result['status'])
if result['status'] == 200:
if isinstance(result['dict'], dict):
# If only querying for one item, such as a single issue
# The GitHub API returns a single dictionary, instead of
# A list of dictionaries. In that case, we can return.
return result['dict']
complete_result = complete_result + result['dict']
else:
raise CommandExecutionError(
'GitHub Response Error: {0}'.format(result.get('error'))
)
try:
link_info = result.get('headers').get('Link').split(',')[0]
except AttributeError:
# Only one page of data was returned; exit the loop.
next_page = False
continue
if 'next' in link_info:
# Get the 'next' page number from the Link header.
page_number = link_info.split('>')[0].split('&page=')[1]
else:
# Last page already processed; break the loop.
next_page = False
return complete_result
|
[
"def",
"_query",
"(",
"profile",
",",
"action",
"=",
"None",
",",
"command",
"=",
"None",
",",
"args",
"=",
"None",
",",
"method",
"=",
"'GET'",
",",
"header_dict",
"=",
"None",
",",
"data",
"=",
"None",
",",
"url",
"=",
"'https://api.github.com/'",
",",
"per_page",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"args",
",",
"dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"action",
":",
"url",
"+=",
"action",
"if",
"command",
":",
"url",
"+=",
"'/{0}'",
".",
"format",
"(",
"command",
")",
"log",
".",
"debug",
"(",
"'GitHub URL: %s'",
",",
"url",
")",
"if",
"'access_token'",
"not",
"in",
"args",
".",
"keys",
"(",
")",
":",
"args",
"[",
"'access_token'",
"]",
"=",
"_get_config_value",
"(",
"profile",
",",
"'token'",
")",
"if",
"per_page",
"and",
"'per_page'",
"not",
"in",
"args",
".",
"keys",
"(",
")",
":",
"args",
"[",
"'per_page'",
"]",
"=",
"per_page",
"if",
"header_dict",
"is",
"None",
":",
"header_dict",
"=",
"{",
"}",
"if",
"method",
"!=",
"'POST'",
":",
"header_dict",
"[",
"'Accept'",
"]",
"=",
"'application/json'",
"decode",
"=",
"True",
"if",
"method",
"==",
"'DELETE'",
":",
"decode",
"=",
"False",
"# GitHub paginates all queries when returning many items.",
"# Gather all data using multiple queries and handle pagination.",
"complete_result",
"=",
"[",
"]",
"next_page",
"=",
"True",
"page_number",
"=",
"''",
"while",
"next_page",
"is",
"True",
":",
"if",
"page_number",
":",
"args",
"[",
"'page'",
"]",
"=",
"page_number",
"result",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"url",
",",
"method",
",",
"params",
"=",
"args",
",",
"data",
"=",
"data",
",",
"header_dict",
"=",
"header_dict",
",",
"decode",
"=",
"decode",
",",
"decode_type",
"=",
"'json'",
",",
"headers",
"=",
"True",
",",
"status",
"=",
"True",
",",
"text",
"=",
"True",
",",
"hide_fields",
"=",
"[",
"'access_token'",
"]",
",",
"opts",
"=",
"__opts__",
",",
")",
"log",
".",
"debug",
"(",
"'GitHub Response Status Code: %s'",
",",
"result",
"[",
"'status'",
"]",
")",
"if",
"result",
"[",
"'status'",
"]",
"==",
"200",
":",
"if",
"isinstance",
"(",
"result",
"[",
"'dict'",
"]",
",",
"dict",
")",
":",
"# If only querying for one item, such as a single issue",
"# The GitHub API returns a single dictionary, instead of",
"# A list of dictionaries. In that case, we can return.",
"return",
"result",
"[",
"'dict'",
"]",
"complete_result",
"=",
"complete_result",
"+",
"result",
"[",
"'dict'",
"]",
"else",
":",
"raise",
"CommandExecutionError",
"(",
"'GitHub Response Error: {0}'",
".",
"format",
"(",
"result",
".",
"get",
"(",
"'error'",
")",
")",
")",
"try",
":",
"link_info",
"=",
"result",
".",
"get",
"(",
"'headers'",
")",
".",
"get",
"(",
"'Link'",
")",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"except",
"AttributeError",
":",
"# Only one page of data was returned; exit the loop.",
"next_page",
"=",
"False",
"continue",
"if",
"'next'",
"in",
"link_info",
":",
"# Get the 'next' page number from the Link header.",
"page_number",
"=",
"link_info",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'&page='",
")",
"[",
"1",
"]",
"else",
":",
"# Last page already processed; break the loop.",
"next_page",
"=",
"False",
"return",
"complete_result"
] |
Make a web call to the GitHub API and deal with paginated results.
|
[
"Make",
"a",
"web",
"call",
"to",
"the",
"GitHub",
"API",
"and",
"deal",
"with",
"paginated",
"results",
"."
] |
python
|
train
|
pennersr/django-allauth
|
allauth/account/adapter.py
|
https://github.com/pennersr/django-allauth/blob/f70cb3d622f992f15fe9b57098e0b328445b664e/allauth/account/adapter.py#L246-L278
|
def clean_username(self, username, shallow=False):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
"""
for validator in app_settings.USERNAME_VALIDATORS:
validator(username)
# TODO: Add regexp support to USERNAME_BLACKLIST
username_blacklist_lower = [ub.lower()
for ub in app_settings.USERNAME_BLACKLIST]
if username.lower() in username_blacklist_lower:
raise forms.ValidationError(
self.error_messages['username_blacklisted'])
# Skipping database lookups when shallow is True, needed for unique
# username generation.
if not shallow:
from .utils import filter_users_by_username
if filter_users_by_username(username).exists():
user_model = get_user_model()
username_field = app_settings.USER_MODEL_USERNAME_FIELD
error_message = user_model._meta.get_field(
username_field).error_messages.get('unique')
if not error_message:
error_message = self.error_messages['username_taken']
raise forms.ValidationError(
error_message,
params={
'model_name': user_model.__name__,
'field_label': username_field,
}
)
return username
|
[
"def",
"clean_username",
"(",
"self",
",",
"username",
",",
"shallow",
"=",
"False",
")",
":",
"for",
"validator",
"in",
"app_settings",
".",
"USERNAME_VALIDATORS",
":",
"validator",
"(",
"username",
")",
"# TODO: Add regexp support to USERNAME_BLACKLIST",
"username_blacklist_lower",
"=",
"[",
"ub",
".",
"lower",
"(",
")",
"for",
"ub",
"in",
"app_settings",
".",
"USERNAME_BLACKLIST",
"]",
"if",
"username",
".",
"lower",
"(",
")",
"in",
"username_blacklist_lower",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'username_blacklisted'",
"]",
")",
"# Skipping database lookups when shallow is True, needed for unique",
"# username generation.",
"if",
"not",
"shallow",
":",
"from",
".",
"utils",
"import",
"filter_users_by_username",
"if",
"filter_users_by_username",
"(",
"username",
")",
".",
"exists",
"(",
")",
":",
"user_model",
"=",
"get_user_model",
"(",
")",
"username_field",
"=",
"app_settings",
".",
"USER_MODEL_USERNAME_FIELD",
"error_message",
"=",
"user_model",
".",
"_meta",
".",
"get_field",
"(",
"username_field",
")",
".",
"error_messages",
".",
"get",
"(",
"'unique'",
")",
"if",
"not",
"error_message",
":",
"error_message",
"=",
"self",
".",
"error_messages",
"[",
"'username_taken'",
"]",
"raise",
"forms",
".",
"ValidationError",
"(",
"error_message",
",",
"params",
"=",
"{",
"'model_name'",
":",
"user_model",
".",
"__name__",
",",
"'field_label'",
":",
"username_field",
",",
"}",
")",
"return",
"username"
] |
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
|
[
"Validates",
"the",
"username",
".",
"You",
"can",
"hook",
"into",
"this",
"if",
"you",
"want",
"to",
"(",
"dynamically",
")",
"restrict",
"what",
"usernames",
"can",
"be",
"chosen",
"."
] |
python
|
train
|
spacetelescope/drizzlepac
|
drizzlepac/stisData.py
|
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/stisData.py#L415-L433
|
def expand_image(image, shape):
""" Expand image from original shape to requested shape. Output shape
must be an integer multiple of input image shape for each axis. """
if (shape[0] % image.shape[0]) or (shape[1] % image.shape[1]):
raise ValueError("Output shape must be an integer multiple of input "
"image shape.")
sx = shape[1] // image.shape[1]
sy = shape[0] // image.shape[0]
ox = (sx - 1.0) / (2.0 * sx)
oy = (sy - 1.0) / (2.0 * sy)
# generate output coordinates:
y, x = np.indices(shape, dtype=np.float)
x = x / sx - ox
y = y / sy - oy
# interpolate:
return bilinear_interp(image, x, y)
|
[
"def",
"expand_image",
"(",
"image",
",",
"shape",
")",
":",
"if",
"(",
"shape",
"[",
"0",
"]",
"%",
"image",
".",
"shape",
"[",
"0",
"]",
")",
"or",
"(",
"shape",
"[",
"1",
"]",
"%",
"image",
".",
"shape",
"[",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Output shape must be an integer multiple of input \"",
"\"image shape.\"",
")",
"sx",
"=",
"shape",
"[",
"1",
"]",
"//",
"image",
".",
"shape",
"[",
"1",
"]",
"sy",
"=",
"shape",
"[",
"0",
"]",
"//",
"image",
".",
"shape",
"[",
"0",
"]",
"ox",
"=",
"(",
"sx",
"-",
"1.0",
")",
"/",
"(",
"2.0",
"*",
"sx",
")",
"oy",
"=",
"(",
"sy",
"-",
"1.0",
")",
"/",
"(",
"2.0",
"*",
"sy",
")",
"# generate output coordinates:",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"x",
"=",
"x",
"/",
"sx",
"-",
"ox",
"y",
"=",
"y",
"/",
"sy",
"-",
"oy",
"# interpolate:",
"return",
"bilinear_interp",
"(",
"image",
",",
"x",
",",
"y",
")"
] |
Expand image from original shape to requested shape. Output shape
must be an integer multiple of input image shape for each axis.
|
[
"Expand",
"image",
"from",
"original",
"shape",
"to",
"requested",
"shape",
".",
"Output",
"shape",
"must",
"be",
"an",
"integer",
"multiple",
"of",
"input",
"image",
"shape",
"for",
"each",
"axis",
"."
] |
python
|
train
|
openstack/horizon
|
horizon/middleware/base.py
|
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/middleware/base.py#L123-L159
|
def process_exception(self, request, exception):
"""Catches internal Horizon exception classes.
Exception classes such as NotAuthorized, NotFound and Http302
are caught and handles them gracefully.
"""
if isinstance(exception, (exceptions.NotAuthorized,
exceptions.NotAuthenticated)):
auth_url = settings.LOGIN_URL
next_url = iri_to_uri(request.get_full_path())
if next_url != auth_url:
field_name = REDIRECT_FIELD_NAME
else:
field_name = None
login_url = request.build_absolute_uri(auth_url)
response = redirect_to_login(next_url, login_url=login_url,
redirect_field_name=field_name)
if isinstance(exception, exceptions.NotAuthorized):
response.delete_cookie('messages')
return shortcuts.render(request, 'not_authorized.html',
status=403)
if request.is_ajax():
response_401 = http.HttpResponse(status=401)
response_401['X-Horizon-Location'] = response['location']
return response_401
return response
# If an internal "NotFound" error gets this far, return a real 404.
if isinstance(exception, exceptions.NotFound):
raise http.Http404(exception)
if isinstance(exception, exceptions.Http302):
# TODO(gabriel): Find a way to display an appropriate message to
# the user *on* the login form...
return shortcuts.redirect(exception.location)
|
[
"def",
"process_exception",
"(",
"self",
",",
"request",
",",
"exception",
")",
":",
"if",
"isinstance",
"(",
"exception",
",",
"(",
"exceptions",
".",
"NotAuthorized",
",",
"exceptions",
".",
"NotAuthenticated",
")",
")",
":",
"auth_url",
"=",
"settings",
".",
"LOGIN_URL",
"next_url",
"=",
"iri_to_uri",
"(",
"request",
".",
"get_full_path",
"(",
")",
")",
"if",
"next_url",
"!=",
"auth_url",
":",
"field_name",
"=",
"REDIRECT_FIELD_NAME",
"else",
":",
"field_name",
"=",
"None",
"login_url",
"=",
"request",
".",
"build_absolute_uri",
"(",
"auth_url",
")",
"response",
"=",
"redirect_to_login",
"(",
"next_url",
",",
"login_url",
"=",
"login_url",
",",
"redirect_field_name",
"=",
"field_name",
")",
"if",
"isinstance",
"(",
"exception",
",",
"exceptions",
".",
"NotAuthorized",
")",
":",
"response",
".",
"delete_cookie",
"(",
"'messages'",
")",
"return",
"shortcuts",
".",
"render",
"(",
"request",
",",
"'not_authorized.html'",
",",
"status",
"=",
"403",
")",
"if",
"request",
".",
"is_ajax",
"(",
")",
":",
"response_401",
"=",
"http",
".",
"HttpResponse",
"(",
"status",
"=",
"401",
")",
"response_401",
"[",
"'X-Horizon-Location'",
"]",
"=",
"response",
"[",
"'location'",
"]",
"return",
"response_401",
"return",
"response",
"# If an internal \"NotFound\" error gets this far, return a real 404.",
"if",
"isinstance",
"(",
"exception",
",",
"exceptions",
".",
"NotFound",
")",
":",
"raise",
"http",
".",
"Http404",
"(",
"exception",
")",
"if",
"isinstance",
"(",
"exception",
",",
"exceptions",
".",
"Http302",
")",
":",
"# TODO(gabriel): Find a way to display an appropriate message to",
"# the user *on* the login form...",
"return",
"shortcuts",
".",
"redirect",
"(",
"exception",
".",
"location",
")"
] |
Catches internal Horizon exception classes.
Exception classes such as NotAuthorized, NotFound and Http302
are caught and handles them gracefully.
|
[
"Catches",
"internal",
"Horizon",
"exception",
"classes",
"."
] |
python
|
train
|
edx/edx-enterprise
|
enterprise/migrations/0065_add_enterprise_feature_roles.py
|
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/migrations/0065_add_enterprise_feature_roles.py#L13-L18
|
def create_roles(apps, schema_editor):
"""Create the enterprise roles if they do not already exist."""
EnterpriseFeatureRole = apps.get_model('enterprise', 'EnterpriseFeatureRole')
EnterpriseFeatureRole.objects.update_or_create(name=ENTERPRISE_CATALOG_ADMIN_ROLE)
EnterpriseFeatureRole.objects.update_or_create(name=ENTERPRISE_DASHBOARD_ADMIN_ROLE)
EnterpriseFeatureRole.objects.update_or_create(name=ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE)
|
[
"def",
"create_roles",
"(",
"apps",
",",
"schema_editor",
")",
":",
"EnterpriseFeatureRole",
"=",
"apps",
".",
"get_model",
"(",
"'enterprise'",
",",
"'EnterpriseFeatureRole'",
")",
"EnterpriseFeatureRole",
".",
"objects",
".",
"update_or_create",
"(",
"name",
"=",
"ENTERPRISE_CATALOG_ADMIN_ROLE",
")",
"EnterpriseFeatureRole",
".",
"objects",
".",
"update_or_create",
"(",
"name",
"=",
"ENTERPRISE_DASHBOARD_ADMIN_ROLE",
")",
"EnterpriseFeatureRole",
".",
"objects",
".",
"update_or_create",
"(",
"name",
"=",
"ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE",
")"
] |
Create the enterprise roles if they do not already exist.
|
[
"Create",
"the",
"enterprise",
"roles",
"if",
"they",
"do",
"not",
"already",
"exist",
"."
] |
python
|
valid
|
cjdrake/pyeda
|
pyeda/boolalg/bfarray.py
|
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bfarray.py#L1001-L1007
|
def _uint2farray(ftype, num, length=None):
"""Convert an unsigned integer to an farray."""
if num < 0:
raise ValueError("expected num >= 0")
else:
objs = _uint2objs(ftype, num, length)
return farray(objs)
|
[
"def",
"_uint2farray",
"(",
"ftype",
",",
"num",
",",
"length",
"=",
"None",
")",
":",
"if",
"num",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"expected num >= 0\"",
")",
"else",
":",
"objs",
"=",
"_uint2objs",
"(",
"ftype",
",",
"num",
",",
"length",
")",
"return",
"farray",
"(",
"objs",
")"
] |
Convert an unsigned integer to an farray.
|
[
"Convert",
"an",
"unsigned",
"integer",
"to",
"an",
"farray",
"."
] |
python
|
train
|
AndrewAnnex/SpiceyPy
|
spiceypy/spiceypy.py
|
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L2737-L2758
|
def drdlat(r, lon, lat):
"""
Compute the Jacobian of the transformation from latitudinal to
rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdlat_c.html
:param r: Distance of a point from the origin.
:type r: float
:param lon: Angle of the point from the XZ plane in radians.
:type lon: float
:param lat: Angle of the point from the XY plane in radians.
:type lat: float
:return: Matrix of partial derivatives.
:rtype: 3x3-Element Array of floats
"""
r = ctypes.c_double(r)
lon = ctypes.c_double(lon)
lat = ctypes.c_double(lat)
jacobi = stypes.emptyDoubleMatrix()
libspice.drdlat_c(r, lon, lat, jacobi)
return stypes.cMatrixToNumpy(jacobi)
|
[
"def",
"drdlat",
"(",
"r",
",",
"lon",
",",
"lat",
")",
":",
"r",
"=",
"ctypes",
".",
"c_double",
"(",
"r",
")",
"lon",
"=",
"ctypes",
".",
"c_double",
"(",
"lon",
")",
"lat",
"=",
"ctypes",
".",
"c_double",
"(",
"lat",
")",
"jacobi",
"=",
"stypes",
".",
"emptyDoubleMatrix",
"(",
")",
"libspice",
".",
"drdlat_c",
"(",
"r",
",",
"lon",
",",
"lat",
",",
"jacobi",
")",
"return",
"stypes",
".",
"cMatrixToNumpy",
"(",
"jacobi",
")"
] |
Compute the Jacobian of the transformation from latitudinal to
rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdlat_c.html
:param r: Distance of a point from the origin.
:type r: float
:param lon: Angle of the point from the XZ plane in radians.
:type lon: float
:param lat: Angle of the point from the XY plane in radians.
:type lat: float
:return: Matrix of partial derivatives.
:rtype: 3x3-Element Array of floats
|
[
"Compute",
"the",
"Jacobian",
"of",
"the",
"transformation",
"from",
"latitudinal",
"to",
"rectangular",
"coordinates",
"."
] |
python
|
train
|
maas/python-libmaas
|
maas/client/viscera/partitions.py
|
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L97-L103
|
async def mount(self, mount_point, *, mount_options=None):
"""Mount this partition."""
self._data = await self._handler.mount(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id,
mount_point=mount_point,
mount_options=mount_options)
|
[
"async",
"def",
"mount",
"(",
"self",
",",
"mount_point",
",",
"*",
",",
"mount_options",
"=",
"None",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"mount",
"(",
"system_id",
"=",
"self",
".",
"block_device",
".",
"node",
".",
"system_id",
",",
"device_id",
"=",
"self",
".",
"block_device",
".",
"id",
",",
"id",
"=",
"self",
".",
"id",
",",
"mount_point",
"=",
"mount_point",
",",
"mount_options",
"=",
"mount_options",
")"
] |
Mount this partition.
|
[
"Mount",
"this",
"partition",
"."
] |
python
|
train
|
angr/angr
|
angr/knowledge_plugins/variables/variable_manager.py
|
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/knowledge_plugins/variables/variable_manager.py#L405-L423
|
def get_variable_accesses(self, variable, same_name=False):
"""
Get a list of all references to the given variable.
:param SimVariable variable: The variable.
:param bool same_name: Whether to include all variables with the same variable name, or just
based on the variable identifier.
:return: All references to the variable.
:rtype: list
"""
if variable.region == 'global':
return self.global_manager.get_variable_accesses(variable, same_name=same_name)
elif variable.region in self.function_managers:
return self.function_managers[variable.region].get_variable_accesses(variable, same_name=same_name)
l.warning('get_variable_accesses(): Region %s is not found.', variable.region)
return [ ]
|
[
"def",
"get_variable_accesses",
"(",
"self",
",",
"variable",
",",
"same_name",
"=",
"False",
")",
":",
"if",
"variable",
".",
"region",
"==",
"'global'",
":",
"return",
"self",
".",
"global_manager",
".",
"get_variable_accesses",
"(",
"variable",
",",
"same_name",
"=",
"same_name",
")",
"elif",
"variable",
".",
"region",
"in",
"self",
".",
"function_managers",
":",
"return",
"self",
".",
"function_managers",
"[",
"variable",
".",
"region",
"]",
".",
"get_variable_accesses",
"(",
"variable",
",",
"same_name",
"=",
"same_name",
")",
"l",
".",
"warning",
"(",
"'get_variable_accesses(): Region %s is not found.'",
",",
"variable",
".",
"region",
")",
"return",
"[",
"]"
] |
Get a list of all references to the given variable.
:param SimVariable variable: The variable.
:param bool same_name: Whether to include all variables with the same variable name, or just
based on the variable identifier.
:return: All references to the variable.
:rtype: list
|
[
"Get",
"a",
"list",
"of",
"all",
"references",
"to",
"the",
"given",
"variable",
"."
] |
python
|
train
|
eeue56/PyChat.js
|
pychatjs/server/connections.py
|
https://github.com/eeue56/PyChat.js/blob/45056de6f988350c90a6dbe674459a4affde8abc/pychatjs/server/connections.py#L21-L24
|
def write_message(self, message):
""" Writes a message to this chat connection's handler """
logging.debug("Sending message {mes} to {usr}".format(mes=message, usr=self.id))
self.handler.write_message(message)
|
[
"def",
"write_message",
"(",
"self",
",",
"message",
")",
":",
"logging",
".",
"debug",
"(",
"\"Sending message {mes} to {usr}\"",
".",
"format",
"(",
"mes",
"=",
"message",
",",
"usr",
"=",
"self",
".",
"id",
")",
")",
"self",
".",
"handler",
".",
"write_message",
"(",
"message",
")"
] |
Writes a message to this chat connection's handler
|
[
"Writes",
"a",
"message",
"to",
"this",
"chat",
"connection",
"s",
"handler"
] |
python
|
train
|
PmagPy/PmagPy
|
pmagpy/ipmag.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6790-L6968
|
def curie(path_to_file='.', file_name='', magic=False,
window_length=3, save=False, save_folder='.', fmt='svg', t_begin="", t_end=""):
"""
Plots and interprets curie temperature data.
***
The 1st derivative is calculated from smoothed M-T curve (convolution
with trianfular window with width= <-w> degrees)
***
The 2nd derivative is calculated from smoothed 1st derivative curve
(using the same sliding window width)
***
The estimated curie temp. is the maximum of the 2nd derivative.
Temperature steps should be in multiples of 1.0 degrees.
Parameters
__________
file_name : name of file to be opened
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
window_length : dimension of smoothing window (input to smooth() function)
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures
t_begin: start of truncated window for search
t_end: end of truncated window for search
magic : True if MagIC formated measurements.txt file
"""
plot = 0
window_len = window_length
# read data from file
complete_path = os.path.join(path_to_file, file_name)
if magic:
data_df = pd.read_csv(complete_path, sep='\t', header=1)
T = data_df['meas_temp'].values-273
magn_key = cb.get_intensity_col(data_df)
M = data_df[magn_key].values
else:
Data = np.loadtxt(complete_path, dtype=np.float)
T = Data.transpose()[0]
M = Data.transpose()[1]
T = list(T)
M = list(M)
# cut the data if -t is one of the flags
if t_begin != "":
while T[0] < t_begin:
M.pop(0)
T.pop(0)
while T[-1] > t_end:
M.pop(-1)
T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i = 0
while i < (len(T) - 1):
if (T[i + 1] - T[i]) % 1 > 0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:", T[i], T[i + 1])
sys.exit()
if (T[i + 1] - T[i]) == 0.:
M[i] = np.average([M[i], M[i + 1]])
M.pop(i + 1)
T.pop(i + 1)
elif (T[i + 1] - T[i]) < 0.:
M.pop(i + 1)
T.pop(i + 1)
print("check data in T=%.0f ,M[T] is ignored" % (T[i]))
elif (T[i + 1] - T[i]) > 1.:
slope, b = np.polyfit([T[i], T[i + 1]], [M[i], M[i + 1]], 1)
for j in range(int(T[i + 1]) - int(T[i]) - 1):
M.insert(i + 1, slope * (T[i] + 1.) + b)
T.insert(i + 1, (T[i] + 1.))
i = i + 1
i = i + 1
# calculate the smoothed signal
M = np.array(M, 'f')
T = np.array(T, 'f')
M_smooth = []
M_smooth = smooth(M, window_len)
# plot the original data and the smooth data
PLT = {'M_T': 1, 'der1': 2, 'der2': 3, 'Curie': 4}
plt.figure(num=PLT['M_T'], figsize=(5, 5))
string = 'M-T (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['M_T'], T, M_smooth, sym='-')
pmagplotlib.plot_xy(PLT['M_T'], T, M, sym='--',
xlab='Temperature C', ylab='Magnetization', title=string)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(old_div(Dy, Dx))
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, window_len)
# plot the first derivative
plt.figure(num=PLT['der1'], figsize=(5, 5))
string = '1st derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1_smooth,
sym='-', xlab='Temperature C', title=string)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1, sym='b--')
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
# print Dy/Dx
d2.append(old_div(Dy, Dx))
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, window_len)
# plot the second derivative
plt.figure(num=PLT['der2'], figsize=(5, 5))
string = '2nd derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der2'], T_d2, d2, sym='-',
xlab='Temperature C', title=string)
d2 = list(d2)
print('second derivative maximum is at T=%i' %
int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie, curie_1 = [], []
wn = list(range(5, 50, 1))
for win in wn:
# calculate the smoothed signal
M_smooth = []
M_smooth = smooth(M, win)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(old_div(Dy, Dx))
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, win)
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d2.append(old_div(Dy, Dx))
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, win)
d2 = list(d2)
d2_smooth = list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
# plot Curie temp for different sliding window length
plt.figure(num=PLT['Curie'], figsize=(5, 5))
pmagplotlib.plot_xy(PLT['Curie'], wn, curie, sym='.',
xlab='Sliding Window Width (degrees)', ylab='Curie Temp', title='Curie Statistics')
files = {}
for key in list(PLT.keys()):
files[key] = str(key) + '.' + fmt
if save == True:
for key in list(PLT.keys()):
try:
plt.figure(num=PLT[key])
plt.savefig(save_folder + '/' + files[key].replace('/', '-'))
except:
print('could not save: ', PLT[key], files[key])
print("output file format not supported ")
plt.show()
|
[
"def",
"curie",
"(",
"path_to_file",
"=",
"'.'",
",",
"file_name",
"=",
"''",
",",
"magic",
"=",
"False",
",",
"window_length",
"=",
"3",
",",
"save",
"=",
"False",
",",
"save_folder",
"=",
"'.'",
",",
"fmt",
"=",
"'svg'",
",",
"t_begin",
"=",
"\"\"",
",",
"t_end",
"=",
"\"\"",
")",
":",
"plot",
"=",
"0",
"window_len",
"=",
"window_length",
"# read data from file",
"complete_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_to_file",
",",
"file_name",
")",
"if",
"magic",
":",
"data_df",
"=",
"pd",
".",
"read_csv",
"(",
"complete_path",
",",
"sep",
"=",
"'\\t'",
",",
"header",
"=",
"1",
")",
"T",
"=",
"data_df",
"[",
"'meas_temp'",
"]",
".",
"values",
"-",
"273",
"magn_key",
"=",
"cb",
".",
"get_intensity_col",
"(",
"data_df",
")",
"M",
"=",
"data_df",
"[",
"magn_key",
"]",
".",
"values",
"else",
":",
"Data",
"=",
"np",
".",
"loadtxt",
"(",
"complete_path",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"T",
"=",
"Data",
".",
"transpose",
"(",
")",
"[",
"0",
"]",
"M",
"=",
"Data",
".",
"transpose",
"(",
")",
"[",
"1",
"]",
"T",
"=",
"list",
"(",
"T",
")",
"M",
"=",
"list",
"(",
"M",
")",
"# cut the data if -t is one of the flags",
"if",
"t_begin",
"!=",
"\"\"",
":",
"while",
"T",
"[",
"0",
"]",
"<",
"t_begin",
":",
"M",
".",
"pop",
"(",
"0",
")",
"T",
".",
"pop",
"(",
"0",
")",
"while",
"T",
"[",
"-",
"1",
"]",
">",
"t_end",
":",
"M",
".",
"pop",
"(",
"-",
"1",
")",
"T",
".",
"pop",
"(",
"-",
"1",
")",
"# prepare the signal:",
"# from M(T) array with unequal deltaT",
"# to M(T) array with deltaT=(1 degree).",
"# if delataT is larger, then points are added using linear fit between",
"# consecutive data points.",
"# exit if deltaT is not integer",
"i",
"=",
"0",
"while",
"i",
"<",
"(",
"len",
"(",
"T",
")",
"-",
"1",
")",
":",
"if",
"(",
"T",
"[",
"i",
"+",
"1",
"]",
"-",
"T",
"[",
"i",
"]",
")",
"%",
"1",
">",
"0.001",
":",
"print",
"(",
"\"delta T should be integer, this program will not work!\"",
")",
"print",
"(",
"\"temperature range:\"",
",",
"T",
"[",
"i",
"]",
",",
"T",
"[",
"i",
"+",
"1",
"]",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"(",
"T",
"[",
"i",
"+",
"1",
"]",
"-",
"T",
"[",
"i",
"]",
")",
"==",
"0.",
":",
"M",
"[",
"i",
"]",
"=",
"np",
".",
"average",
"(",
"[",
"M",
"[",
"i",
"]",
",",
"M",
"[",
"i",
"+",
"1",
"]",
"]",
")",
"M",
".",
"pop",
"(",
"i",
"+",
"1",
")",
"T",
".",
"pop",
"(",
"i",
"+",
"1",
")",
"elif",
"(",
"T",
"[",
"i",
"+",
"1",
"]",
"-",
"T",
"[",
"i",
"]",
")",
"<",
"0.",
":",
"M",
".",
"pop",
"(",
"i",
"+",
"1",
")",
"T",
".",
"pop",
"(",
"i",
"+",
"1",
")",
"print",
"(",
"\"check data in T=%.0f ,M[T] is ignored\"",
"%",
"(",
"T",
"[",
"i",
"]",
")",
")",
"elif",
"(",
"T",
"[",
"i",
"+",
"1",
"]",
"-",
"T",
"[",
"i",
"]",
")",
">",
"1.",
":",
"slope",
",",
"b",
"=",
"np",
".",
"polyfit",
"(",
"[",
"T",
"[",
"i",
"]",
",",
"T",
"[",
"i",
"+",
"1",
"]",
"]",
",",
"[",
"M",
"[",
"i",
"]",
",",
"M",
"[",
"i",
"+",
"1",
"]",
"]",
",",
"1",
")",
"for",
"j",
"in",
"range",
"(",
"int",
"(",
"T",
"[",
"i",
"+",
"1",
"]",
")",
"-",
"int",
"(",
"T",
"[",
"i",
"]",
")",
"-",
"1",
")",
":",
"M",
".",
"insert",
"(",
"i",
"+",
"1",
",",
"slope",
"*",
"(",
"T",
"[",
"i",
"]",
"+",
"1.",
")",
"+",
"b",
")",
"T",
".",
"insert",
"(",
"i",
"+",
"1",
",",
"(",
"T",
"[",
"i",
"]",
"+",
"1.",
")",
")",
"i",
"=",
"i",
"+",
"1",
"i",
"=",
"i",
"+",
"1",
"# calculate the smoothed signal",
"M",
"=",
"np",
".",
"array",
"(",
"M",
",",
"'f'",
")",
"T",
"=",
"np",
".",
"array",
"(",
"T",
",",
"'f'",
")",
"M_smooth",
"=",
"[",
"]",
"M_smooth",
"=",
"smooth",
"(",
"M",
",",
"window_len",
")",
"# plot the original data and the smooth data",
"PLT",
"=",
"{",
"'M_T'",
":",
"1",
",",
"'der1'",
":",
"2",
",",
"'der2'",
":",
"3",
",",
"'Curie'",
":",
"4",
"}",
"plt",
".",
"figure",
"(",
"num",
"=",
"PLT",
"[",
"'M_T'",
"]",
",",
"figsize",
"=",
"(",
"5",
",",
"5",
")",
")",
"string",
"=",
"'M-T (sliding window=%i)'",
"%",
"int",
"(",
"window_len",
")",
"pmagplotlib",
".",
"plot_xy",
"(",
"PLT",
"[",
"'M_T'",
"]",
",",
"T",
",",
"M_smooth",
",",
"sym",
"=",
"'-'",
")",
"pmagplotlib",
".",
"plot_xy",
"(",
"PLT",
"[",
"'M_T'",
"]",
",",
"T",
",",
"M",
",",
"sym",
"=",
"'--'",
",",
"xlab",
"=",
"'Temperature C'",
",",
"ylab",
"=",
"'Magnetization'",
",",
"title",
"=",
"string",
")",
"# calculate first derivative",
"d1",
",",
"T_d1",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"M_smooth",
")",
"-",
"1",
")",
":",
"Dy",
"=",
"M_smooth",
"[",
"i",
"-",
"1",
"]",
"-",
"M_smooth",
"[",
"i",
"+",
"1",
"]",
"Dx",
"=",
"T",
"[",
"i",
"-",
"1",
"]",
"-",
"T",
"[",
"i",
"+",
"1",
"]",
"d1",
".",
"append",
"(",
"old_div",
"(",
"Dy",
",",
"Dx",
")",
")",
"T_d1",
"=",
"T",
"[",
"1",
":",
"len",
"(",
"T",
"-",
"1",
")",
"]",
"d1",
"=",
"np",
".",
"array",
"(",
"d1",
",",
"'f'",
")",
"d1_smooth",
"=",
"smooth",
"(",
"d1",
",",
"window_len",
")",
"# plot the first derivative",
"plt",
".",
"figure",
"(",
"num",
"=",
"PLT",
"[",
"'der1'",
"]",
",",
"figsize",
"=",
"(",
"5",
",",
"5",
")",
")",
"string",
"=",
"'1st derivative (sliding window=%i)'",
"%",
"int",
"(",
"window_len",
")",
"pmagplotlib",
".",
"plot_xy",
"(",
"PLT",
"[",
"'der1'",
"]",
",",
"T_d1",
",",
"d1_smooth",
",",
"sym",
"=",
"'-'",
",",
"xlab",
"=",
"'Temperature C'",
",",
"title",
"=",
"string",
")",
"pmagplotlib",
".",
"plot_xy",
"(",
"PLT",
"[",
"'der1'",
"]",
",",
"T_d1",
",",
"d1",
",",
"sym",
"=",
"'b--'",
")",
"# calculate second derivative",
"d2",
",",
"T_d2",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"d1_smooth",
")",
"-",
"1",
")",
":",
"Dy",
"=",
"d1_smooth",
"[",
"i",
"-",
"1",
"]",
"-",
"d1_smooth",
"[",
"i",
"+",
"1",
"]",
"Dx",
"=",
"T",
"[",
"i",
"-",
"1",
"]",
"-",
"T",
"[",
"i",
"+",
"1",
"]",
"# print Dy/Dx",
"d2",
".",
"append",
"(",
"old_div",
"(",
"Dy",
",",
"Dx",
")",
")",
"T_d2",
"=",
"T",
"[",
"2",
":",
"len",
"(",
"T",
"-",
"2",
")",
"]",
"d2",
"=",
"np",
".",
"array",
"(",
"d2",
",",
"'f'",
")",
"d2_smooth",
"=",
"smooth",
"(",
"d2",
",",
"window_len",
")",
"# plot the second derivative",
"plt",
".",
"figure",
"(",
"num",
"=",
"PLT",
"[",
"'der2'",
"]",
",",
"figsize",
"=",
"(",
"5",
",",
"5",
")",
")",
"string",
"=",
"'2nd derivative (sliding window=%i)'",
"%",
"int",
"(",
"window_len",
")",
"pmagplotlib",
".",
"plot_xy",
"(",
"PLT",
"[",
"'der2'",
"]",
",",
"T_d2",
",",
"d2",
",",
"sym",
"=",
"'-'",
",",
"xlab",
"=",
"'Temperature C'",
",",
"title",
"=",
"string",
")",
"d2",
"=",
"list",
"(",
"d2",
")",
"print",
"(",
"'second derivative maximum is at T=%i'",
"%",
"int",
"(",
"T_d2",
"[",
"d2",
".",
"index",
"(",
"max",
"(",
"d2",
")",
")",
"]",
")",
")",
"# calculate Curie temperature for different width of sliding windows",
"curie",
",",
"curie_1",
"=",
"[",
"]",
",",
"[",
"]",
"wn",
"=",
"list",
"(",
"range",
"(",
"5",
",",
"50",
",",
"1",
")",
")",
"for",
"win",
"in",
"wn",
":",
"# calculate the smoothed signal",
"M_smooth",
"=",
"[",
"]",
"M_smooth",
"=",
"smooth",
"(",
"M",
",",
"win",
")",
"# calculate first derivative",
"d1",
",",
"T_d1",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"M_smooth",
")",
"-",
"1",
")",
":",
"Dy",
"=",
"M_smooth",
"[",
"i",
"-",
"1",
"]",
"-",
"M_smooth",
"[",
"i",
"+",
"1",
"]",
"Dx",
"=",
"T",
"[",
"i",
"-",
"1",
"]",
"-",
"T",
"[",
"i",
"+",
"1",
"]",
"d1",
".",
"append",
"(",
"old_div",
"(",
"Dy",
",",
"Dx",
")",
")",
"T_d1",
"=",
"T",
"[",
"1",
":",
"len",
"(",
"T",
"-",
"1",
")",
"]",
"d1",
"=",
"np",
".",
"array",
"(",
"d1",
",",
"'f'",
")",
"d1_smooth",
"=",
"smooth",
"(",
"d1",
",",
"win",
")",
"# calculate second derivative",
"d2",
",",
"T_d2",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"d1_smooth",
")",
"-",
"1",
")",
":",
"Dy",
"=",
"d1_smooth",
"[",
"i",
"-",
"1",
"]",
"-",
"d1_smooth",
"[",
"i",
"+",
"1",
"]",
"Dx",
"=",
"T",
"[",
"i",
"-",
"1",
"]",
"-",
"T",
"[",
"i",
"+",
"1",
"]",
"d2",
".",
"append",
"(",
"old_div",
"(",
"Dy",
",",
"Dx",
")",
")",
"T_d2",
"=",
"T",
"[",
"2",
":",
"len",
"(",
"T",
"-",
"2",
")",
"]",
"d2",
"=",
"np",
".",
"array",
"(",
"d2",
",",
"'f'",
")",
"d2_smooth",
"=",
"smooth",
"(",
"d2",
",",
"win",
")",
"d2",
"=",
"list",
"(",
"d2",
")",
"d2_smooth",
"=",
"list",
"(",
"d2_smooth",
")",
"curie",
".",
"append",
"(",
"T_d2",
"[",
"d2",
".",
"index",
"(",
"max",
"(",
"d2",
")",
")",
"]",
")",
"curie_1",
".",
"append",
"(",
"T_d2",
"[",
"d2_smooth",
".",
"index",
"(",
"max",
"(",
"d2_smooth",
")",
")",
"]",
")",
"# plot Curie temp for different sliding window length",
"plt",
".",
"figure",
"(",
"num",
"=",
"PLT",
"[",
"'Curie'",
"]",
",",
"figsize",
"=",
"(",
"5",
",",
"5",
")",
")",
"pmagplotlib",
".",
"plot_xy",
"(",
"PLT",
"[",
"'Curie'",
"]",
",",
"wn",
",",
"curie",
",",
"sym",
"=",
"'.'",
",",
"xlab",
"=",
"'Sliding Window Width (degrees)'",
",",
"ylab",
"=",
"'Curie Temp'",
",",
"title",
"=",
"'Curie Statistics'",
")",
"files",
"=",
"{",
"}",
"for",
"key",
"in",
"list",
"(",
"PLT",
".",
"keys",
"(",
")",
")",
":",
"files",
"[",
"key",
"]",
"=",
"str",
"(",
"key",
")",
"+",
"'.'",
"+",
"fmt",
"if",
"save",
"==",
"True",
":",
"for",
"key",
"in",
"list",
"(",
"PLT",
".",
"keys",
"(",
")",
")",
":",
"try",
":",
"plt",
".",
"figure",
"(",
"num",
"=",
"PLT",
"[",
"key",
"]",
")",
"plt",
".",
"savefig",
"(",
"save_folder",
"+",
"'/'",
"+",
"files",
"[",
"key",
"]",
".",
"replace",
"(",
"'/'",
",",
"'-'",
")",
")",
"except",
":",
"print",
"(",
"'could not save: '",
",",
"PLT",
"[",
"key",
"]",
",",
"files",
"[",
"key",
"]",
")",
"print",
"(",
"\"output file format not supported \"",
")",
"plt",
".",
"show",
"(",
")"
] |
Plots and interprets curie temperature data.
***
The 1st derivative is calculated from smoothed M-T curve (convolution
with trianfular window with width= <-w> degrees)
***
The 2nd derivative is calculated from smoothed 1st derivative curve
(using the same sliding window width)
***
The estimated curie temp. is the maximum of the 2nd derivative.
Temperature steps should be in multiples of 1.0 degrees.
Parameters
__________
file_name : name of file to be opened
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
window_length : dimension of smoothing window (input to smooth() function)
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures
t_begin: start of truncated window for search
t_end: end of truncated window for search
magic : True if MagIC formated measurements.txt file
|
[
"Plots",
"and",
"interprets",
"curie",
"temperature",
"data",
".",
"***",
"The",
"1st",
"derivative",
"is",
"calculated",
"from",
"smoothed",
"M",
"-",
"T",
"curve",
"(",
"convolution",
"with",
"trianfular",
"window",
"with",
"width",
"=",
"<",
"-",
"w",
">",
"degrees",
")",
"***",
"The",
"2nd",
"derivative",
"is",
"calculated",
"from",
"smoothed",
"1st",
"derivative",
"curve",
"(",
"using",
"the",
"same",
"sliding",
"window",
"width",
")",
"***",
"The",
"estimated",
"curie",
"temp",
".",
"is",
"the",
"maximum",
"of",
"the",
"2nd",
"derivative",
".",
"Temperature",
"steps",
"should",
"be",
"in",
"multiples",
"of",
"1",
".",
"0",
"degrees",
"."
] |
python
|
train
|
snowplow/snowplow-python-analytics-sdk
|
snowplow_analytics_sdk/run_manifests.py
|
https://github.com/snowplow/snowplow-python-analytics-sdk/blob/0ddca91e3f6d8bed88627fa557790aa4868bdace/snowplow_analytics_sdk/run_manifests.py#L112-L137
|
def split_full_path(path):
"""Return pair of bucket without protocol and path
Arguments:
path - valid S3 path, such as s3://somebucket/events
>>> split_full_path('s3://mybucket/path-to-events')
('mybucket', 'path-to-events/')
>>> split_full_path('s3://mybucket')
('mybucket', None)
>>> split_full_path('s3n://snowplow-bucket/some/prefix/')
('snowplow-bucket', 'some/prefix/')
"""
if path.startswith('s3://'):
path = path[5:]
elif path.startswith('s3n://'):
path = path[6:]
elif path.startswith('s3a://'):
path = path[6:]
else:
raise ValueError("S3 path should start with s3://, s3n:// or "
"s3a:// prefix")
parts = path.split('/')
bucket = parts[0]
path = '/'.join(parts[1:])
return bucket, normalize_prefix(path)
|
[
"def",
"split_full_path",
"(",
"path",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"'s3://'",
")",
":",
"path",
"=",
"path",
"[",
"5",
":",
"]",
"elif",
"path",
".",
"startswith",
"(",
"'s3n://'",
")",
":",
"path",
"=",
"path",
"[",
"6",
":",
"]",
"elif",
"path",
".",
"startswith",
"(",
"'s3a://'",
")",
":",
"path",
"=",
"path",
"[",
"6",
":",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"S3 path should start with s3://, s3n:// or \"",
"\"s3a:// prefix\"",
")",
"parts",
"=",
"path",
".",
"split",
"(",
"'/'",
")",
"bucket",
"=",
"parts",
"[",
"0",
"]",
"path",
"=",
"'/'",
".",
"join",
"(",
"parts",
"[",
"1",
":",
"]",
")",
"return",
"bucket",
",",
"normalize_prefix",
"(",
"path",
")"
] |
Return pair of bucket without protocol and path
Arguments:
path - valid S3 path, such as s3://somebucket/events
>>> split_full_path('s3://mybucket/path-to-events')
('mybucket', 'path-to-events/')
>>> split_full_path('s3://mybucket')
('mybucket', None)
>>> split_full_path('s3n://snowplow-bucket/some/prefix/')
('snowplow-bucket', 'some/prefix/')
|
[
"Return",
"pair",
"of",
"bucket",
"without",
"protocol",
"and",
"path"
] |
python
|
test
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L746-L752
|
def change_font_size(self, delta):
"""Change the font size by the specified amount (in points).
"""
font = self.font
size = max(font.pointSize() + delta, 1) # minimum 1 point
font.setPointSize(size)
self._set_font(font)
|
[
"def",
"change_font_size",
"(",
"self",
",",
"delta",
")",
":",
"font",
"=",
"self",
".",
"font",
"size",
"=",
"max",
"(",
"font",
".",
"pointSize",
"(",
")",
"+",
"delta",
",",
"1",
")",
"# minimum 1 point",
"font",
".",
"setPointSize",
"(",
"size",
")",
"self",
".",
"_set_font",
"(",
"font",
")"
] |
Change the font size by the specified amount (in points).
|
[
"Change",
"the",
"font",
"size",
"by",
"the",
"specified",
"amount",
"(",
"in",
"points",
")",
"."
] |
python
|
test
|
danielhrisca/asammdf
|
asammdf/mdf.py
|
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/mdf.py#L2193-L2425
|
def stack(files, version="4.10", sync=True, **kwargs):
""" stack several files and return the stacked *MDF* object
Parameters
----------
files : list | tuple
list of *MDF* file names or *MDF* instances
version : str
merged file version
sync : bool
sync the files based on the start of measurement, default *True*
Returns
-------
stacked : MDF
new *MDF* object with stacked channels
"""
if not files:
raise MdfException("No files given for stack")
version = validate_version_argument(version)
callback = kwargs.get("callback", None)
stacked = MDF(version=version, callback=callback)
files_nr = len(files)
if callback:
callback(0, files_nr)
if sync:
timestamps = []
for file in files:
if isinstance(file, MDF):
timestamps.append(file.header.start_time)
else:
with open(file, "rb") as mdf:
mdf.seek(64)
blk_id = mdf.read(2)
if blk_id == b"HD":
header = HeaderV3
else:
blk_id += mdf.read(2)
if blk_id == b"##HD":
header = HeaderV4
else:
raise MdfException(f'"{file}" is not a valid MDF file')
header = header(address=64, stream=mdf)
timestamps.append(header.start_time)
try:
oldest = min(timestamps)
except TypeError:
timestamps = [
timestamp.astimezone(timezone.utc)
for timestamp in timestamps
]
oldest = min(timestamps)
offsets = [(timestamp - oldest).total_seconds() for timestamp in timestamps]
stacked.header.start_time = oldest
else:
offsets = [0 for file in files]
cg_nr = -1
for offset, mdf in zip(offsets, files):
if not isinstance(mdf, MDF):
mdf = MDF(mdf)
cg_offset = cg_nr + 1
for i, group in enumerate(mdf.groups):
idx = 0
if version < "4.00":
encodings = []
included_channels = mdf._included_channels(i)
if included_channels:
cg_nr += 1
else:
continue
try:
for can_id, info in mdf.can_logging_db.items():
if can_id not in mdf.can_logging_db:
mdf.can_logging_db[can_id] = {}
mdf.can_logging_db[can_id].update(
{
message_id: cg_index + cg_offset
for message_id, cg_index in info.items()
}
)
except AttributeError:
pass
_, dtypes = mdf._prepare_record(group)
data = mdf._load_data(group)
for fragment in data:
if dtypes.itemsize:
group.record = np.core.records.fromstring(
fragment[0], dtype=dtypes
)
else:
group.record = None
if idx == 0:
signals = []
for j in included_channels:
sig = mdf.get(
group=i,
index=j,
data=fragment,
raw=True,
ignore_invalidation_bits=True,
copy_master=False,
)
if version < "4.00":
if sig.samples.dtype.kind == "S":
encodings.append(sig.encoding)
strsig = mdf.get(
group=i,
index=j,
samples_only=True,
ignore_invalidation_bits=True,
)[0]
sig.samples = sig.samples.astype(strsig.dtype)
del strsig
if sig.encoding != "latin-1":
if sig.encoding == "utf-16-le":
sig.samples = (
sig.samples.view(np.uint16)
.byteswap()
.view(sig.samples.dtype)
)
sig.samples = encode(
decode(sig.samples, "utf-16-be"),
"latin-1",
)
else:
sig.samples = encode(
decode(sig.samples, sig.encoding),
"latin-1",
)
else:
encodings.append(None)
if not sig.samples.flags.writeable:
sig.samples = sig.samples.copy()
signals.append(sig)
if signals:
if sync:
timestamps = signals[0].timestamps + offset
for sig in signals:
sig.timestamps = timestamps
stacked.append(signals, common_timebase=True)
try:
if group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT:
stacked.groups[-1].channel_group.flags = group.channel_group.flags
stacked.groups[-1].channel_group.acq_name = group.channel_group.acq_name
stacked.groups[-1].channel_group.acq_source = group.channel_group.acq_source
stacked.groups[-1].channel_group.comment = group.channel_group.comment
except AttributeError:
pass
idx += 1
else:
master = mdf.get_master(i, fragment, copy_master=False)
if sync:
master = master + offset
if len(master):
signals = [(master, None)]
for k, j in enumerate(included_channels):
sig = mdf.get(
group=i,
index=j,
data=fragment,
raw=True,
samples_only=True,
ignore_invalidation_bits=True,
)
signals.append(sig)
if version < "4.00":
encoding = encodings[k]
samples = sig[0]
if encoding:
if encoding != "latin-1":
if encoding == "utf-16-le":
samples = (
samples.view(np.uint16)
.byteswap()
.view(samples.dtype)
)
samples = encode(
decode(samples, "utf-16-be"),
"latin-1",
)
else:
samples = encode(
decode(samples, encoding), "latin-1"
)
sig.samples = samples
if signals:
stacked.extend(cg_nr, signals)
idx += 1
group.record = None
stacked.groups[
-1
].channel_group.comment = (
f'stacked from channel group {i} of "{mdf.name.parent}"'
)
if callback:
callback(idx, files_nr)
if MDF._terminate:
return
return stacked
|
[
"def",
"stack",
"(",
"files",
",",
"version",
"=",
"\"4.10\"",
",",
"sync",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"files",
":",
"raise",
"MdfException",
"(",
"\"No files given for stack\"",
")",
"version",
"=",
"validate_version_argument",
"(",
"version",
")",
"callback",
"=",
"kwargs",
".",
"get",
"(",
"\"callback\"",
",",
"None",
")",
"stacked",
"=",
"MDF",
"(",
"version",
"=",
"version",
",",
"callback",
"=",
"callback",
")",
"files_nr",
"=",
"len",
"(",
"files",
")",
"if",
"callback",
":",
"callback",
"(",
"0",
",",
"files_nr",
")",
"if",
"sync",
":",
"timestamps",
"=",
"[",
"]",
"for",
"file",
"in",
"files",
":",
"if",
"isinstance",
"(",
"file",
",",
"MDF",
")",
":",
"timestamps",
".",
"append",
"(",
"file",
".",
"header",
".",
"start_time",
")",
"else",
":",
"with",
"open",
"(",
"file",
",",
"\"rb\"",
")",
"as",
"mdf",
":",
"mdf",
".",
"seek",
"(",
"64",
")",
"blk_id",
"=",
"mdf",
".",
"read",
"(",
"2",
")",
"if",
"blk_id",
"==",
"b\"HD\"",
":",
"header",
"=",
"HeaderV3",
"else",
":",
"blk_id",
"+=",
"mdf",
".",
"read",
"(",
"2",
")",
"if",
"blk_id",
"==",
"b\"##HD\"",
":",
"header",
"=",
"HeaderV4",
"else",
":",
"raise",
"MdfException",
"(",
"f'\"{file}\" is not a valid MDF file'",
")",
"header",
"=",
"header",
"(",
"address",
"=",
"64",
",",
"stream",
"=",
"mdf",
")",
"timestamps",
".",
"append",
"(",
"header",
".",
"start_time",
")",
"try",
":",
"oldest",
"=",
"min",
"(",
"timestamps",
")",
"except",
"TypeError",
":",
"timestamps",
"=",
"[",
"timestamp",
".",
"astimezone",
"(",
"timezone",
".",
"utc",
")",
"for",
"timestamp",
"in",
"timestamps",
"]",
"oldest",
"=",
"min",
"(",
"timestamps",
")",
"offsets",
"=",
"[",
"(",
"timestamp",
"-",
"oldest",
")",
".",
"total_seconds",
"(",
")",
"for",
"timestamp",
"in",
"timestamps",
"]",
"stacked",
".",
"header",
".",
"start_time",
"=",
"oldest",
"else",
":",
"offsets",
"=",
"[",
"0",
"for",
"file",
"in",
"files",
"]",
"cg_nr",
"=",
"-",
"1",
"for",
"offset",
",",
"mdf",
"in",
"zip",
"(",
"offsets",
",",
"files",
")",
":",
"if",
"not",
"isinstance",
"(",
"mdf",
",",
"MDF",
")",
":",
"mdf",
"=",
"MDF",
"(",
"mdf",
")",
"cg_offset",
"=",
"cg_nr",
"+",
"1",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"mdf",
".",
"groups",
")",
":",
"idx",
"=",
"0",
"if",
"version",
"<",
"\"4.00\"",
":",
"encodings",
"=",
"[",
"]",
"included_channels",
"=",
"mdf",
".",
"_included_channels",
"(",
"i",
")",
"if",
"included_channels",
":",
"cg_nr",
"+=",
"1",
"else",
":",
"continue",
"try",
":",
"for",
"can_id",
",",
"info",
"in",
"mdf",
".",
"can_logging_db",
".",
"items",
"(",
")",
":",
"if",
"can_id",
"not",
"in",
"mdf",
".",
"can_logging_db",
":",
"mdf",
".",
"can_logging_db",
"[",
"can_id",
"]",
"=",
"{",
"}",
"mdf",
".",
"can_logging_db",
"[",
"can_id",
"]",
".",
"update",
"(",
"{",
"message_id",
":",
"cg_index",
"+",
"cg_offset",
"for",
"message_id",
",",
"cg_index",
"in",
"info",
".",
"items",
"(",
")",
"}",
")",
"except",
"AttributeError",
":",
"pass",
"_",
",",
"dtypes",
"=",
"mdf",
".",
"_prepare_record",
"(",
"group",
")",
"data",
"=",
"mdf",
".",
"_load_data",
"(",
"group",
")",
"for",
"fragment",
"in",
"data",
":",
"if",
"dtypes",
".",
"itemsize",
":",
"group",
".",
"record",
"=",
"np",
".",
"core",
".",
"records",
".",
"fromstring",
"(",
"fragment",
"[",
"0",
"]",
",",
"dtype",
"=",
"dtypes",
")",
"else",
":",
"group",
".",
"record",
"=",
"None",
"if",
"idx",
"==",
"0",
":",
"signals",
"=",
"[",
"]",
"for",
"j",
"in",
"included_channels",
":",
"sig",
"=",
"mdf",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"j",
",",
"data",
"=",
"fragment",
",",
"raw",
"=",
"True",
",",
"ignore_invalidation_bits",
"=",
"True",
",",
"copy_master",
"=",
"False",
",",
")",
"if",
"version",
"<",
"\"4.00\"",
":",
"if",
"sig",
".",
"samples",
".",
"dtype",
".",
"kind",
"==",
"\"S\"",
":",
"encodings",
".",
"append",
"(",
"sig",
".",
"encoding",
")",
"strsig",
"=",
"mdf",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"j",
",",
"samples_only",
"=",
"True",
",",
"ignore_invalidation_bits",
"=",
"True",
",",
")",
"[",
"0",
"]",
"sig",
".",
"samples",
"=",
"sig",
".",
"samples",
".",
"astype",
"(",
"strsig",
".",
"dtype",
")",
"del",
"strsig",
"if",
"sig",
".",
"encoding",
"!=",
"\"latin-1\"",
":",
"if",
"sig",
".",
"encoding",
"==",
"\"utf-16-le\"",
":",
"sig",
".",
"samples",
"=",
"(",
"sig",
".",
"samples",
".",
"view",
"(",
"np",
".",
"uint16",
")",
".",
"byteswap",
"(",
")",
".",
"view",
"(",
"sig",
".",
"samples",
".",
"dtype",
")",
")",
"sig",
".",
"samples",
"=",
"encode",
"(",
"decode",
"(",
"sig",
".",
"samples",
",",
"\"utf-16-be\"",
")",
",",
"\"latin-1\"",
",",
")",
"else",
":",
"sig",
".",
"samples",
"=",
"encode",
"(",
"decode",
"(",
"sig",
".",
"samples",
",",
"sig",
".",
"encoding",
")",
",",
"\"latin-1\"",
",",
")",
"else",
":",
"encodings",
".",
"append",
"(",
"None",
")",
"if",
"not",
"sig",
".",
"samples",
".",
"flags",
".",
"writeable",
":",
"sig",
".",
"samples",
"=",
"sig",
".",
"samples",
".",
"copy",
"(",
")",
"signals",
".",
"append",
"(",
"sig",
")",
"if",
"signals",
":",
"if",
"sync",
":",
"timestamps",
"=",
"signals",
"[",
"0",
"]",
".",
"timestamps",
"+",
"offset",
"for",
"sig",
"in",
"signals",
":",
"sig",
".",
"timestamps",
"=",
"timestamps",
"stacked",
".",
"append",
"(",
"signals",
",",
"common_timebase",
"=",
"True",
")",
"try",
":",
"if",
"group",
".",
"channel_group",
".",
"flags",
"&",
"v4c",
".",
"FLAG_CG_BUS_EVENT",
":",
"stacked",
".",
"groups",
"[",
"-",
"1",
"]",
".",
"channel_group",
".",
"flags",
"=",
"group",
".",
"channel_group",
".",
"flags",
"stacked",
".",
"groups",
"[",
"-",
"1",
"]",
".",
"channel_group",
".",
"acq_name",
"=",
"group",
".",
"channel_group",
".",
"acq_name",
"stacked",
".",
"groups",
"[",
"-",
"1",
"]",
".",
"channel_group",
".",
"acq_source",
"=",
"group",
".",
"channel_group",
".",
"acq_source",
"stacked",
".",
"groups",
"[",
"-",
"1",
"]",
".",
"channel_group",
".",
"comment",
"=",
"group",
".",
"channel_group",
".",
"comment",
"except",
"AttributeError",
":",
"pass",
"idx",
"+=",
"1",
"else",
":",
"master",
"=",
"mdf",
".",
"get_master",
"(",
"i",
",",
"fragment",
",",
"copy_master",
"=",
"False",
")",
"if",
"sync",
":",
"master",
"=",
"master",
"+",
"offset",
"if",
"len",
"(",
"master",
")",
":",
"signals",
"=",
"[",
"(",
"master",
",",
"None",
")",
"]",
"for",
"k",
",",
"j",
"in",
"enumerate",
"(",
"included_channels",
")",
":",
"sig",
"=",
"mdf",
".",
"get",
"(",
"group",
"=",
"i",
",",
"index",
"=",
"j",
",",
"data",
"=",
"fragment",
",",
"raw",
"=",
"True",
",",
"samples_only",
"=",
"True",
",",
"ignore_invalidation_bits",
"=",
"True",
",",
")",
"signals",
".",
"append",
"(",
"sig",
")",
"if",
"version",
"<",
"\"4.00\"",
":",
"encoding",
"=",
"encodings",
"[",
"k",
"]",
"samples",
"=",
"sig",
"[",
"0",
"]",
"if",
"encoding",
":",
"if",
"encoding",
"!=",
"\"latin-1\"",
":",
"if",
"encoding",
"==",
"\"utf-16-le\"",
":",
"samples",
"=",
"(",
"samples",
".",
"view",
"(",
"np",
".",
"uint16",
")",
".",
"byteswap",
"(",
")",
".",
"view",
"(",
"samples",
".",
"dtype",
")",
")",
"samples",
"=",
"encode",
"(",
"decode",
"(",
"samples",
",",
"\"utf-16-be\"",
")",
",",
"\"latin-1\"",
",",
")",
"else",
":",
"samples",
"=",
"encode",
"(",
"decode",
"(",
"samples",
",",
"encoding",
")",
",",
"\"latin-1\"",
")",
"sig",
".",
"samples",
"=",
"samples",
"if",
"signals",
":",
"stacked",
".",
"extend",
"(",
"cg_nr",
",",
"signals",
")",
"idx",
"+=",
"1",
"group",
".",
"record",
"=",
"None",
"stacked",
".",
"groups",
"[",
"-",
"1",
"]",
".",
"channel_group",
".",
"comment",
"=",
"(",
"f'stacked from channel group {i} of \"{mdf.name.parent}\"'",
")",
"if",
"callback",
":",
"callback",
"(",
"idx",
",",
"files_nr",
")",
"if",
"MDF",
".",
"_terminate",
":",
"return",
"return",
"stacked"
] |
stack several files and return the stacked *MDF* object
Parameters
----------
files : list | tuple
list of *MDF* file names or *MDF* instances
version : str
merged file version
sync : bool
sync the files based on the start of measurement, default *True*
Returns
-------
stacked : MDF
new *MDF* object with stacked channels
|
[
"stack",
"several",
"files",
"and",
"return",
"the",
"stacked",
"*",
"MDF",
"*",
"object"
] |
python
|
train
|
jhermann/rudiments
|
src/rudiments/reamed/click.py
|
https://github.com/jhermann/rudiments/blob/028ec7237946115c7b18e50557cbc5f6b824653e/src/rudiments/reamed/click.py#L72-L75
|
def get_command(self, ctx, cmd_name):
"""Map some aliases to their 'real' names."""
cmd_name = self.MAP.get(cmd_name, cmd_name)
return super(AliasedGroup, self).get_command(ctx, cmd_name)
|
[
"def",
"get_command",
"(",
"self",
",",
"ctx",
",",
"cmd_name",
")",
":",
"cmd_name",
"=",
"self",
".",
"MAP",
".",
"get",
"(",
"cmd_name",
",",
"cmd_name",
")",
"return",
"super",
"(",
"AliasedGroup",
",",
"self",
")",
".",
"get_command",
"(",
"ctx",
",",
"cmd_name",
")"
] |
Map some aliases to their 'real' names.
|
[
"Map",
"some",
"aliases",
"to",
"their",
"real",
"names",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.