repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
Qiskit/qiskit-terra | qiskit/qobj/converters/lo_config.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qobj/converters/lo_config.py#L79-L102 | def get_meas_los(self, user_lo_config):
"""Embed default meas LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of meas LOs.
Raises:
PulseError: when LO frequencies are missing.
"""
try:
_m_los = self.default_meas_los.copy()
except KeyError:
raise PulseError('Default measurement frequencies not exist.')
for channel, lo_freq in user_lo_config.meas_lo_dict().items():
_m_los[channel.index] = lo_freq
if _m_los == self.default_meas_los:
return None
return _m_los | [
"def",
"get_meas_los",
"(",
"self",
",",
"user_lo_config",
")",
":",
"try",
":",
"_m_los",
"=",
"self",
".",
"default_meas_los",
".",
"copy",
"(",
")",
"except",
"KeyError",
":",
"raise",
"PulseError",
"(",
"'Default measurement frequencies not exist.'",
")",
"for",
"channel",
",",
"lo_freq",
"in",
"user_lo_config",
".",
"meas_lo_dict",
"(",
")",
".",
"items",
"(",
")",
":",
"_m_los",
"[",
"channel",
".",
"index",
"]",
"=",
"lo_freq",
"if",
"_m_los",
"==",
"self",
".",
"default_meas_los",
":",
"return",
"None",
"return",
"_m_los"
]
| Embed default meas LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of meas LOs.
Raises:
PulseError: when LO frequencies are missing. | [
"Embed",
"default",
"meas",
"LO",
"frequencies",
"from",
"backend",
"and",
"format",
"them",
"to",
"list",
"object",
".",
"If",
"configured",
"lo",
"frequency",
"is",
"the",
"same",
"as",
"default",
"this",
"method",
"returns",
"None",
"."
]
| python | test | 32.916667 |
idlesign/uwsgiconf | uwsgiconf/utils.py | https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/utils.py#L287-L302 | def cmd_log(self, reopen=False, rotate=False):
"""Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation.
"""
cmd = b''
if reopen:
cmd += b'l'
if rotate:
cmd += b'L'
return self.send_command(cmd) | [
"def",
"cmd_log",
"(",
"self",
",",
"reopen",
"=",
"False",
",",
"rotate",
"=",
"False",
")",
":",
"cmd",
"=",
"b''",
"if",
"reopen",
":",
"cmd",
"+=",
"b'l'",
"if",
"rotate",
":",
"cmd",
"+=",
"b'L'",
"return",
"self",
".",
"send_command",
"(",
"cmd",
")"
]
| Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation. | [
"Allows",
"managing",
"of",
"uWSGI",
"log",
"related",
"stuff"
]
| python | train | 24.625 |
J535D165/recordlinkage | recordlinkage/algorithms/indexing.py | https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/indexing.py#L27-L44 | def random_pairs_with_replacement(n, shape, random_state=None):
"""make random record pairs"""
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
n_max = max_pairs(shape)
if n_max <= 0:
raise ValueError('n_max must be larger than 0')
# make random pairs
indices = random_state.randint(0, n_max, n)
if len(shape) == 1:
return _map_tril_1d_on_2d(indices, shape[0])
else:
return np.unravel_index(indices, shape) | [
"def",
"random_pairs_with_replacement",
"(",
"n",
",",
"shape",
",",
"random_state",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"random_state",
",",
"np",
".",
"random",
".",
"RandomState",
")",
":",
"random_state",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"random_state",
")",
"n_max",
"=",
"max_pairs",
"(",
"shape",
")",
"if",
"n_max",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'n_max must be larger than 0'",
")",
"# make random pairs",
"indices",
"=",
"random_state",
".",
"randint",
"(",
"0",
",",
"n_max",
",",
"n",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"1",
":",
"return",
"_map_tril_1d_on_2d",
"(",
"indices",
",",
"shape",
"[",
"0",
"]",
")",
"else",
":",
"return",
"np",
".",
"unravel_index",
"(",
"indices",
",",
"shape",
")"
]
| make random record pairs | [
"make",
"random",
"record",
"pairs"
]
| python | train | 28.666667 |
pytroll/trollimage | trollimage/image.py | https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/image.py#L915-L933 | def invert(self, invert=True):
"""Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise
invertion is performed, otherwise all channels are inverted if *invert* is true (default).
Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated !
"""
if(isinstance(invert, (tuple, list)) and
len(self.channels) != len(invert)):
raise ValueError(
"Number of channels and invert components differ.")
logger.debug("Applying invert with parameters %s", str(invert))
if isinstance(invert, (tuple, list)):
for i, chn in enumerate(self.channels):
if invert[i]:
self.channels[i] = 1 - chn
elif invert:
for i, chn in enumerate(self.channels):
self.channels[i] = 1 - chn | [
"def",
"invert",
"(",
"self",
",",
"invert",
"=",
"True",
")",
":",
"if",
"(",
"isinstance",
"(",
"invert",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"self",
".",
"channels",
")",
"!=",
"len",
"(",
"invert",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Number of channels and invert components differ.\"",
")",
"logger",
".",
"debug",
"(",
"\"Applying invert with parameters %s\"",
",",
"str",
"(",
"invert",
")",
")",
"if",
"isinstance",
"(",
"invert",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"for",
"i",
",",
"chn",
"in",
"enumerate",
"(",
"self",
".",
"channels",
")",
":",
"if",
"invert",
"[",
"i",
"]",
":",
"self",
".",
"channels",
"[",
"i",
"]",
"=",
"1",
"-",
"chn",
"elif",
"invert",
":",
"for",
"i",
",",
"chn",
"in",
"enumerate",
"(",
"self",
".",
"channels",
")",
":",
"self",
".",
"channels",
"[",
"i",
"]",
"=",
"1",
"-",
"chn"
]
| Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise
invertion is performed, otherwise all channels are inverted if *invert* is true (default).
Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated ! | [
"Inverts",
"all",
"the",
"channels",
"of",
"a",
"image",
"according",
"to",
"*",
"invert",
"*",
".",
"If",
"invert",
"is",
"a",
"tuple",
"or",
"a",
"list",
"elementwise",
"invertion",
"is",
"performed",
"otherwise",
"all",
"channels",
"are",
"inverted",
"if",
"*",
"invert",
"*",
"is",
"true",
"(",
"default",
")",
"."
]
| python | train | 47.578947 |
sendwithus/sendwithus_python | sendwithus/__init__.py | https://github.com/sendwithus/sendwithus_python/blob/8ae50d514febd44f7d9be3c838b4d92f99412832/sendwithus/__init__.py#L776-L806 | def execute(self, timeout=None):
"""Execute all currently queued batch commands"""
logger.debug(' > Batch API request (length %s)' % len(self._commands))
auth = self._build_http_auth()
headers = self._build_request_headers()
logger.debug('\tbatch headers: %s' % headers)
logger.debug('\tbatch command length: %s' % len(self._commands))
path = self._build_request_path(self.BATCH_ENDPOINT)
data = json.dumps(self._commands, cls=self._json_encoder)
r = requests.post(
path,
auth=auth,
headers=headers,
data=data,
timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout)
)
self._commands = []
logger.debug('\tresponse code:%s' % r.status_code)
try:
logger.debug('\tresponse: %s' % r.json())
except:
logger.debug('\tresponse: %s' % r.content)
return r | [
"def",
"execute",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"' > Batch API request (length %s)'",
"%",
"len",
"(",
"self",
".",
"_commands",
")",
")",
"auth",
"=",
"self",
".",
"_build_http_auth",
"(",
")",
"headers",
"=",
"self",
".",
"_build_request_headers",
"(",
")",
"logger",
".",
"debug",
"(",
"'\\tbatch headers: %s'",
"%",
"headers",
")",
"logger",
".",
"debug",
"(",
"'\\tbatch command length: %s'",
"%",
"len",
"(",
"self",
".",
"_commands",
")",
")",
"path",
"=",
"self",
".",
"_build_request_path",
"(",
"self",
".",
"BATCH_ENDPOINT",
")",
"data",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_commands",
",",
"cls",
"=",
"self",
".",
"_json_encoder",
")",
"r",
"=",
"requests",
".",
"post",
"(",
"path",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"(",
"self",
".",
"DEFAULT_TIMEOUT",
"if",
"timeout",
"is",
"None",
"else",
"timeout",
")",
")",
"self",
".",
"_commands",
"=",
"[",
"]",
"logger",
".",
"debug",
"(",
"'\\tresponse code:%s'",
"%",
"r",
".",
"status_code",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"'\\tresponse: %s'",
"%",
"r",
".",
"json",
"(",
")",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"'\\tresponse: %s'",
"%",
"r",
".",
"content",
")",
"return",
"r"
]
| Execute all currently queued batch commands | [
"Execute",
"all",
"currently",
"queued",
"batch",
"commands"
]
| python | valid | 30.16129 |
thespacedoctor/polyglot | polyglot/ebook.py | https://github.com/thespacedoctor/polyglot/blob/98038d746aa67e343b73b3ccee1e02d31dab81ec/polyglot/ebook.py#L144-L186 | def get(self):
"""
*get the ebook object*
**Return:**
- ``ebook``
**Usage:**
See class docstring for usage
"""
self.log.debug('starting the ``get`` method')
if self.format == "epub":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
ebook = self._url_to_epub()
elif ".docx" in self.urlOrPath:
ebook = self._docx_to_epub()
if self.format == "mobi":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
epub = self._url_to_epub()
elif ".docx" in self.urlOrPath:
epub = self._docx_to_epub()
if not epub:
return None
ebook = self._epub_to_mobi(
epubPath=epub,
deleteEpub=False
)
tag(
log=self.log,
filepath=ebook,
tags=False,
rating=False,
wherefrom=self.url
)
self.log.debug('completed the ``get`` method')
return ebook | [
"def",
"get",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``get`` method'",
")",
"if",
"self",
".",
"format",
"==",
"\"epub\"",
":",
"if",
"self",
".",
"urlOrPath",
"[",
":",
"4",
"]",
"==",
"\"http\"",
"or",
"self",
".",
"urlOrPath",
"[",
":",
"4",
"]",
"==",
"\"www.\"",
":",
"ebook",
"=",
"self",
".",
"_url_to_epub",
"(",
")",
"elif",
"\".docx\"",
"in",
"self",
".",
"urlOrPath",
":",
"ebook",
"=",
"self",
".",
"_docx_to_epub",
"(",
")",
"if",
"self",
".",
"format",
"==",
"\"mobi\"",
":",
"if",
"self",
".",
"urlOrPath",
"[",
":",
"4",
"]",
"==",
"\"http\"",
"or",
"self",
".",
"urlOrPath",
"[",
":",
"4",
"]",
"==",
"\"www.\"",
":",
"epub",
"=",
"self",
".",
"_url_to_epub",
"(",
")",
"elif",
"\".docx\"",
"in",
"self",
".",
"urlOrPath",
":",
"epub",
"=",
"self",
".",
"_docx_to_epub",
"(",
")",
"if",
"not",
"epub",
":",
"return",
"None",
"ebook",
"=",
"self",
".",
"_epub_to_mobi",
"(",
"epubPath",
"=",
"epub",
",",
"deleteEpub",
"=",
"False",
")",
"tag",
"(",
"log",
"=",
"self",
".",
"log",
",",
"filepath",
"=",
"ebook",
",",
"tags",
"=",
"False",
",",
"rating",
"=",
"False",
",",
"wherefrom",
"=",
"self",
".",
"url",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``get`` method'",
")",
"return",
"ebook"
]
| *get the ebook object*
**Return:**
- ``ebook``
**Usage:**
See class docstring for usage | [
"*",
"get",
"the",
"ebook",
"object",
"*"
]
| python | train | 25.255814 |
josiah-wolf-oberholtzer/uqbar | uqbar/sphinx/style.py | https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/sphinx/style.py#L22-L38 | def handle_class(signature_node, module, object_name, cache):
"""
Styles ``autoclass`` entries.
Adds ``abstract`` prefix to abstract classes.
"""
class_ = getattr(module, object_name, None)
if class_ is None:
return
if class_ not in cache:
cache[class_] = {}
attributes = inspect.classify_class_attrs(class_)
for attribute in attributes:
cache[class_][attribute.name] = attribute
if inspect.isabstract(class_):
emphasis = nodes.emphasis("abstract ", "abstract ", classes=["property"])
signature_node.insert(0, emphasis) | [
"def",
"handle_class",
"(",
"signature_node",
",",
"module",
",",
"object_name",
",",
"cache",
")",
":",
"class_",
"=",
"getattr",
"(",
"module",
",",
"object_name",
",",
"None",
")",
"if",
"class_",
"is",
"None",
":",
"return",
"if",
"class_",
"not",
"in",
"cache",
":",
"cache",
"[",
"class_",
"]",
"=",
"{",
"}",
"attributes",
"=",
"inspect",
".",
"classify_class_attrs",
"(",
"class_",
")",
"for",
"attribute",
"in",
"attributes",
":",
"cache",
"[",
"class_",
"]",
"[",
"attribute",
".",
"name",
"]",
"=",
"attribute",
"if",
"inspect",
".",
"isabstract",
"(",
"class_",
")",
":",
"emphasis",
"=",
"nodes",
".",
"emphasis",
"(",
"\"abstract \"",
",",
"\"abstract \"",
",",
"classes",
"=",
"[",
"\"property\"",
"]",
")",
"signature_node",
".",
"insert",
"(",
"0",
",",
"emphasis",
")"
]
| Styles ``autoclass`` entries.
Adds ``abstract`` prefix to abstract classes. | [
"Styles",
"autoclass",
"entries",
"."
]
| python | train | 35.058824 |
ethereum/py-evm | eth/vm/logic/arithmetic.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/logic/arithmetic.py#L96-L106 | def mulmod(computation: BaseComputation) -> None:
"""
Modulo Multiplication
"""
left, right, mod = computation.stack_pop(num_items=3, type_hint=constants.UINT256)
if mod == 0:
result = 0
else:
result = (left * right) % mod
computation.stack_push(result) | [
"def",
"mulmod",
"(",
"computation",
":",
"BaseComputation",
")",
"->",
"None",
":",
"left",
",",
"right",
",",
"mod",
"=",
"computation",
".",
"stack_pop",
"(",
"num_items",
"=",
"3",
",",
"type_hint",
"=",
"constants",
".",
"UINT256",
")",
"if",
"mod",
"==",
"0",
":",
"result",
"=",
"0",
"else",
":",
"result",
"=",
"(",
"left",
"*",
"right",
")",
"%",
"mod",
"computation",
".",
"stack_push",
"(",
"result",
")"
]
| Modulo Multiplication | [
"Modulo",
"Multiplication"
]
| python | train | 26.181818 |
ralphje/imagemounter | imagemounter/parser.py | https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L130-L139 | def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True):
"""Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls
:func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`.
:rtype: generator"""
for disk in self.disks:
logger.info("Mounting volumes in {0}".format(disk))
for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions):
yield volume | [
"def",
"init_volumes",
"(",
"self",
",",
"single",
"=",
"None",
",",
"only_mount",
"=",
"None",
",",
"skip_mount",
"=",
"None",
",",
"swallow_exceptions",
"=",
"True",
")",
":",
"for",
"disk",
"in",
"self",
".",
"disks",
":",
"logger",
".",
"info",
"(",
"\"Mounting volumes in {0}\"",
".",
"format",
"(",
"disk",
")",
")",
"for",
"volume",
"in",
"disk",
".",
"init_volumes",
"(",
"single",
",",
"only_mount",
",",
"skip_mount",
",",
"swallow_exceptions",
"=",
"swallow_exceptions",
")",
":",
"yield",
"volume"
]
| Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls
:func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`.
:rtype: generator | [
"Detects",
"volumes",
"(",
"as",
"volume",
"system",
"or",
"as",
"single",
"volume",
")",
"in",
"all",
"disks",
"and",
"yields",
"the",
"volumes",
".",
"This",
"calls",
":",
"func",
":",
"Disk",
".",
"init_volumes",
"on",
"all",
"disks",
"and",
"should",
"be",
"called",
"after",
":",
"func",
":",
"mount_disks",
"."
]
| python | train | 56.7 |
hobson/aima | aima/utils.py | https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/utils.py#L521-L535 | def num_or_str(x):
"""The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x'
"""
if isnumber(x): return x
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip() | [
"def",
"num_or_str",
"(",
"x",
")",
":",
"if",
"isnumber",
"(",
"x",
")",
":",
"return",
"x",
"try",
":",
"return",
"int",
"(",
"x",
")",
"except",
"ValueError",
":",
"try",
":",
"return",
"float",
"(",
"x",
")",
"except",
"ValueError",
":",
"return",
"str",
"(",
"x",
")",
".",
"strip",
"(",
")"
]
| The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x' | [
"The",
"argument",
"is",
"a",
"string",
";",
"convert",
"to",
"a",
"number",
"if",
"possible",
"or",
"strip",
"it",
".",
">>>",
"num_or_str",
"(",
"42",
")",
"42",
">>>",
"num_or_str",
"(",
"42x",
")",
"42x"
]
| python | valid | 23.066667 |
soravux/scoop | scoop/broker/brokerzmq.py | https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/broker/brokerzmq.py#L184-L322 | def run(self):
"""Redirects messages until a shutdown message is received."""
while True:
if not self.task_socket.poll(-1):
continue
msg = self.task_socket.recv_multipart()
msg_type = msg[1]
if self.debug:
self.stats.append((time.time(),
msg_type,
len(self.unassigned_tasks),
len(self.available_workers)))
if time.time() - self.lastDebugTs > TIME_BETWEEN_PARTIALDEBUG:
self.writeDebug("debug/partial-{0}".format(
round(time.time(), -1)
))
self.lastDebugTs = time.time()
# New task inbound
if msg_type == TASK:
task_id = msg[2]
task = msg[3]
self.logger.debug("Received task {0}".format(task_id))
try:
address = self.available_workers.popleft()
except IndexError:
self.unassigned_tasks.append((task_id, task))
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
# Request for task
elif msg_type == REQUEST:
address = msg[0]
try:
task_id, task = self.unassigned_tasks.popleft()
except IndexError:
self.available_workers.append(address)
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
# A task status request is requested
elif msg_type == STATUS_REQ:
self.pruneAssignedTasks()
address = msg[0]
task_id = msg[2]
if any(task_id in x for x in self.assigned_tasks.values()):
status = STATUS_GIVEN
elif task_id in (x[0] for x in self.unassigned_tasks):
status = STATUS_HERE
else:
status = STATUS_NONE
self.task_socket.send_multipart([
address, STATUS_ANS, task_id, status
])
# A task status set (task done) is received
elif msg_type == STATUS_DONE:
address = msg[0]
task_id = msg[2]
try:
self.assigned_tasks[address].discard(task_id)
except KeyError:
pass
elif msg_type == STATUS_UPDATE:
address = msg[0]
try:
tasks_ids = pickle.loads(msg[2])
except:
self.logger.error("Could not unpickle status update message.")
else:
self.assigned_tasks[address] = tasks_ids
self.status_times[address] = time.time()
# Answer needing delivery
elif msg_type == REPLY:
self.logger.debug("Relaying")
destination = msg[-1]
origin = msg[0]
self.task_socket.send_multipart([destination] + msg[1:] + [origin])
# Shared variable to distribute
elif msg_type == VARIABLE:
address = msg[4]
value = msg[3]
key = msg[2]
self.shared_variables[address].update(
{key: value},
)
self.info_socket.send_multipart([VARIABLE,
key,
value,
address])
# Initialize the variables of a new worker
elif msg_type == INIT:
address = msg[0]
try:
self.processConfig(pickle.loads(msg[2]))
except pickle.PickleError:
continue
self.task_socket.send_multipart([
address,
pickle.dumps(self.config,
pickle.HIGHEST_PROTOCOL),
pickle.dumps(self.shared_variables,
pickle.HIGHEST_PROTOCOL),
])
self.task_socket.send_multipart([
address,
pickle.dumps(self.cluster_available,
pickle.HIGHEST_PROTOCOL),
])
# Add a given broker to its fellow list
elif msg_type == CONNECT:
try:
connect_brokers = pickle.loads(msg[2])
except pickle.PickleError:
self.logger.error("Could not understand CONNECT message.")
continue
self.logger.info("Connecting to other brokers...")
self.addBrokerList(connect_brokers)
# Shutdown of this broker was requested
elif msg_type == SHUTDOWN:
self.logger.debug("SHUTDOWN command received.")
self.shutdown()
break | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"not",
"self",
".",
"task_socket",
".",
"poll",
"(",
"-",
"1",
")",
":",
"continue",
"msg",
"=",
"self",
".",
"task_socket",
".",
"recv_multipart",
"(",
")",
"msg_type",
"=",
"msg",
"[",
"1",
"]",
"if",
"self",
".",
"debug",
":",
"self",
".",
"stats",
".",
"append",
"(",
"(",
"time",
".",
"time",
"(",
")",
",",
"msg_type",
",",
"len",
"(",
"self",
".",
"unassigned_tasks",
")",
",",
"len",
"(",
"self",
".",
"available_workers",
")",
")",
")",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"lastDebugTs",
">",
"TIME_BETWEEN_PARTIALDEBUG",
":",
"self",
".",
"writeDebug",
"(",
"\"debug/partial-{0}\"",
".",
"format",
"(",
"round",
"(",
"time",
".",
"time",
"(",
")",
",",
"-",
"1",
")",
")",
")",
"self",
".",
"lastDebugTs",
"=",
"time",
".",
"time",
"(",
")",
"# New task inbound",
"if",
"msg_type",
"==",
"TASK",
":",
"task_id",
"=",
"msg",
"[",
"2",
"]",
"task",
"=",
"msg",
"[",
"3",
"]",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Received task {0}\"",
".",
"format",
"(",
"task_id",
")",
")",
"try",
":",
"address",
"=",
"self",
".",
"available_workers",
".",
"popleft",
"(",
")",
"except",
"IndexError",
":",
"self",
".",
"unassigned_tasks",
".",
"append",
"(",
"(",
"task_id",
",",
"task",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Sent {0}\"",
".",
"format",
"(",
"task_id",
")",
")",
"self",
".",
"task_socket",
".",
"send_multipart",
"(",
"[",
"address",
",",
"TASK",
",",
"task",
"]",
")",
"self",
".",
"assigned_tasks",
"[",
"address",
"]",
".",
"add",
"(",
"task_id",
")",
"# Request for task",
"elif",
"msg_type",
"==",
"REQUEST",
":",
"address",
"=",
"msg",
"[",
"0",
"]",
"try",
":",
"task_id",
",",
"task",
"=",
"self",
".",
"unassigned_tasks",
".",
"popleft",
"(",
")",
"except",
"IndexError",
":",
"self",
".",
"available_workers",
".",
"append",
"(",
"address",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Sent {0}\"",
".",
"format",
"(",
"task_id",
")",
")",
"self",
".",
"task_socket",
".",
"send_multipart",
"(",
"[",
"address",
",",
"TASK",
",",
"task",
"]",
")",
"self",
".",
"assigned_tasks",
"[",
"address",
"]",
".",
"add",
"(",
"task_id",
")",
"# A task status request is requested",
"elif",
"msg_type",
"==",
"STATUS_REQ",
":",
"self",
".",
"pruneAssignedTasks",
"(",
")",
"address",
"=",
"msg",
"[",
"0",
"]",
"task_id",
"=",
"msg",
"[",
"2",
"]",
"if",
"any",
"(",
"task_id",
"in",
"x",
"for",
"x",
"in",
"self",
".",
"assigned_tasks",
".",
"values",
"(",
")",
")",
":",
"status",
"=",
"STATUS_GIVEN",
"elif",
"task_id",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"self",
".",
"unassigned_tasks",
")",
":",
"status",
"=",
"STATUS_HERE",
"else",
":",
"status",
"=",
"STATUS_NONE",
"self",
".",
"task_socket",
".",
"send_multipart",
"(",
"[",
"address",
",",
"STATUS_ANS",
",",
"task_id",
",",
"status",
"]",
")",
"# A task status set (task done) is received",
"elif",
"msg_type",
"==",
"STATUS_DONE",
":",
"address",
"=",
"msg",
"[",
"0",
"]",
"task_id",
"=",
"msg",
"[",
"2",
"]",
"try",
":",
"self",
".",
"assigned_tasks",
"[",
"address",
"]",
".",
"discard",
"(",
"task_id",
")",
"except",
"KeyError",
":",
"pass",
"elif",
"msg_type",
"==",
"STATUS_UPDATE",
":",
"address",
"=",
"msg",
"[",
"0",
"]",
"try",
":",
"tasks_ids",
"=",
"pickle",
".",
"loads",
"(",
"msg",
"[",
"2",
"]",
")",
"except",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Could not unpickle status update message.\"",
")",
"else",
":",
"self",
".",
"assigned_tasks",
"[",
"address",
"]",
"=",
"tasks_ids",
"self",
".",
"status_times",
"[",
"address",
"]",
"=",
"time",
".",
"time",
"(",
")",
"# Answer needing delivery",
"elif",
"msg_type",
"==",
"REPLY",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Relaying\"",
")",
"destination",
"=",
"msg",
"[",
"-",
"1",
"]",
"origin",
"=",
"msg",
"[",
"0",
"]",
"self",
".",
"task_socket",
".",
"send_multipart",
"(",
"[",
"destination",
"]",
"+",
"msg",
"[",
"1",
":",
"]",
"+",
"[",
"origin",
"]",
")",
"# Shared variable to distribute",
"elif",
"msg_type",
"==",
"VARIABLE",
":",
"address",
"=",
"msg",
"[",
"4",
"]",
"value",
"=",
"msg",
"[",
"3",
"]",
"key",
"=",
"msg",
"[",
"2",
"]",
"self",
".",
"shared_variables",
"[",
"address",
"]",
".",
"update",
"(",
"{",
"key",
":",
"value",
"}",
",",
")",
"self",
".",
"info_socket",
".",
"send_multipart",
"(",
"[",
"VARIABLE",
",",
"key",
",",
"value",
",",
"address",
"]",
")",
"# Initialize the variables of a new worker",
"elif",
"msg_type",
"==",
"INIT",
":",
"address",
"=",
"msg",
"[",
"0",
"]",
"try",
":",
"self",
".",
"processConfig",
"(",
"pickle",
".",
"loads",
"(",
"msg",
"[",
"2",
"]",
")",
")",
"except",
"pickle",
".",
"PickleError",
":",
"continue",
"self",
".",
"task_socket",
".",
"send_multipart",
"(",
"[",
"address",
",",
"pickle",
".",
"dumps",
"(",
"self",
".",
"config",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
",",
"pickle",
".",
"dumps",
"(",
"self",
".",
"shared_variables",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
",",
"]",
")",
"self",
".",
"task_socket",
".",
"send_multipart",
"(",
"[",
"address",
",",
"pickle",
".",
"dumps",
"(",
"self",
".",
"cluster_available",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
",",
"]",
")",
"# Add a given broker to its fellow list",
"elif",
"msg_type",
"==",
"CONNECT",
":",
"try",
":",
"connect_brokers",
"=",
"pickle",
".",
"loads",
"(",
"msg",
"[",
"2",
"]",
")",
"except",
"pickle",
".",
"PickleError",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Could not understand CONNECT message.\"",
")",
"continue",
"self",
".",
"logger",
".",
"info",
"(",
"\"Connecting to other brokers...\"",
")",
"self",
".",
"addBrokerList",
"(",
"connect_brokers",
")",
"# Shutdown of this broker was requested",
"elif",
"msg_type",
"==",
"SHUTDOWN",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"SHUTDOWN command received.\"",
")",
"self",
".",
"shutdown",
"(",
")",
"break"
]
| Redirects messages until a shutdown message is received. | [
"Redirects",
"messages",
"until",
"a",
"shutdown",
"message",
"is",
"received",
"."
]
| python | train | 38.374101 |
maas/python-libmaas | maas/client/viscera/maas.py | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/maas.py#L310-L316 | async def set_config(cls, name: str, value):
"""Set a configuration value in MAAS.
Consult your MAAS server for recognised settings. Alternatively, use
the pre-canned functions also defined on this object.
"""
return await cls._handler.set_config(name=[name], value=[value]) | [
"async",
"def",
"set_config",
"(",
"cls",
",",
"name",
":",
"str",
",",
"value",
")",
":",
"return",
"await",
"cls",
".",
"_handler",
".",
"set_config",
"(",
"name",
"=",
"[",
"name",
"]",
",",
"value",
"=",
"[",
"value",
"]",
")"
]
| Set a configuration value in MAAS.
Consult your MAAS server for recognised settings. Alternatively, use
the pre-canned functions also defined on this object. | [
"Set",
"a",
"configuration",
"value",
"in",
"MAAS",
"."
]
| python | train | 44.142857 |
angr/angr | angr/project.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/project.py#L701-L707 | def is_java_project(self):
"""
Indicates if the project's main binary is a Java Archive.
"""
if self._is_java_project is None:
self._is_java_project = isinstance(self.arch, ArchSoot)
return self._is_java_project | [
"def",
"is_java_project",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_java_project",
"is",
"None",
":",
"self",
".",
"_is_java_project",
"=",
"isinstance",
"(",
"self",
".",
"arch",
",",
"ArchSoot",
")",
"return",
"self",
".",
"_is_java_project"
]
| Indicates if the project's main binary is a Java Archive. | [
"Indicates",
"if",
"the",
"project",
"s",
"main",
"binary",
"is",
"a",
"Java",
"Archive",
"."
]
| python | train | 36.714286 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L106-L115 | def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx) | [
"def",
"to_instants_dataframe",
"(",
"self",
",",
"sql_ctx",
")",
":",
"ssql_ctx",
"=",
"sql_ctx",
".",
"_ssql_ctx",
"jdf",
"=",
"self",
".",
"_jtsrdd",
".",
"toInstantsDataFrame",
"(",
"ssql_ctx",
",",
"-",
"1",
")",
"return",
"DataFrame",
"(",
"jdf",
",",
"sql_ctx",
")"
]
| Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD. | [
"Returns",
"a",
"DataFrame",
"of",
"instants",
"each",
"a",
"horizontal",
"slice",
"of",
"this",
"TimeSeriesRDD",
"at",
"a",
"time",
"."
]
| python | train | 44.7 |
jic-dtool/dtool-info | dtool_info/dataset.py | https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L193-L231 | def summary(dataset_uri, format):
"""Report summary information about a dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
creator_username = dataset._admin_metadata["creator_username"]
frozen_at = dataset._admin_metadata["frozen_at"]
num_items = len(dataset.identifiers)
tot_size = sum([dataset.item_properties(i)["size_in_bytes"]
for i in dataset.identifiers])
if format == "json":
json_lines = [
'{',
' "name": "{}",'.format(dataset.name),
' "uuid": "{}",'.format(dataset.uuid),
' "creator_username": "{}",'.format(creator_username),
' "number_of_items": {},'.format(num_items),
' "size_in_bytes": {},'.format(tot_size),
' "frozen_at": {}'.format(frozen_at),
'}',
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
else:
info = [
("name", dataset.name),
("uuid", dataset.uuid),
("creator_username", creator_username),
("number_of_items", str(num_items)),
("size", sizeof_fmt(tot_size).strip()),
("frozen_at", date_fmt(frozen_at)),
]
for key, value in info:
click.secho("{}: ".format(key), nl=False)
click.secho(value, fg="green") | [
"def",
"summary",
"(",
"dataset_uri",
",",
"format",
")",
":",
"dataset",
"=",
"dtoolcore",
".",
"DataSet",
".",
"from_uri",
"(",
"dataset_uri",
")",
"creator_username",
"=",
"dataset",
".",
"_admin_metadata",
"[",
"\"creator_username\"",
"]",
"frozen_at",
"=",
"dataset",
".",
"_admin_metadata",
"[",
"\"frozen_at\"",
"]",
"num_items",
"=",
"len",
"(",
"dataset",
".",
"identifiers",
")",
"tot_size",
"=",
"sum",
"(",
"[",
"dataset",
".",
"item_properties",
"(",
"i",
")",
"[",
"\"size_in_bytes\"",
"]",
"for",
"i",
"in",
"dataset",
".",
"identifiers",
"]",
")",
"if",
"format",
"==",
"\"json\"",
":",
"json_lines",
"=",
"[",
"'{'",
",",
"' \"name\": \"{}\",'",
".",
"format",
"(",
"dataset",
".",
"name",
")",
",",
"' \"uuid\": \"{}\",'",
".",
"format",
"(",
"dataset",
".",
"uuid",
")",
",",
"' \"creator_username\": \"{}\",'",
".",
"format",
"(",
"creator_username",
")",
",",
"' \"number_of_items\": {},'",
".",
"format",
"(",
"num_items",
")",
",",
"' \"size_in_bytes\": {},'",
".",
"format",
"(",
"tot_size",
")",
",",
"' \"frozen_at\": {}'",
".",
"format",
"(",
"frozen_at",
")",
",",
"'}'",
",",
"]",
"formatted_json",
"=",
"\"\\n\"",
".",
"join",
"(",
"json_lines",
")",
"colorful_json",
"=",
"pygments",
".",
"highlight",
"(",
"formatted_json",
",",
"pygments",
".",
"lexers",
".",
"JsonLexer",
"(",
")",
",",
"pygments",
".",
"formatters",
".",
"TerminalFormatter",
"(",
")",
")",
"click",
".",
"secho",
"(",
"colorful_json",
",",
"nl",
"=",
"False",
")",
"else",
":",
"info",
"=",
"[",
"(",
"\"name\"",
",",
"dataset",
".",
"name",
")",
",",
"(",
"\"uuid\"",
",",
"dataset",
".",
"uuid",
")",
",",
"(",
"\"creator_username\"",
",",
"creator_username",
")",
",",
"(",
"\"number_of_items\"",
",",
"str",
"(",
"num_items",
")",
")",
",",
"(",
"\"size\"",
",",
"sizeof_fmt",
"(",
"tot_size",
")",
".",
"strip",
"(",
")",
")",
",",
"(",
"\"frozen_at\"",
",",
"date_fmt",
"(",
"frozen_at",
")",
")",
",",
"]",
"for",
"key",
",",
"value",
"in",
"info",
":",
"click",
".",
"secho",
"(",
"\"{}: \"",
".",
"format",
"(",
"key",
")",
",",
"nl",
"=",
"False",
")",
"click",
".",
"secho",
"(",
"value",
",",
"fg",
"=",
"\"green\"",
")"
]
| Report summary information about a dataset. | [
"Report",
"summary",
"information",
"about",
"a",
"dataset",
"."
]
| python | train | 38.615385 |
MolSSI-BSE/basis_set_exchange | basis_set_exchange/validator.py | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/validator.py#L106-L128 | def validate_file(file_type, file_path):
"""
Validates a file against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
file_path:
Full path to the file to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist
"""
file_data = fileio._read_plain_json(file_path, False)
validate_data(file_type, file_data) | [
"def",
"validate_file",
"(",
"file_type",
",",
"file_path",
")",
":",
"file_data",
"=",
"fileio",
".",
"_read_plain_json",
"(",
"file_path",
",",
"False",
")",
"validate_data",
"(",
"file_type",
",",
"file_data",
")"
]
| Validates a file against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
file_path:
Full path to the file to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist | [
"Validates",
"a",
"file",
"against",
"a",
"schema"
]
| python | train | 27.173913 |
wmayner/pyphi | pyphi/distance.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L181-L192 | def kld(d1, d2):
"""Return the Kullback-Leibler Divergence (KLD) between two distributions.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution.
Returns:
float: The KLD of ``d1`` from ``d2``.
"""
d1, d2 = flatten(d1), flatten(d2)
return entropy(d1, d2, 2.0) | [
"def",
"kld",
"(",
"d1",
",",
"d2",
")",
":",
"d1",
",",
"d2",
"=",
"flatten",
"(",
"d1",
")",
",",
"flatten",
"(",
"d2",
")",
"return",
"entropy",
"(",
"d1",
",",
"d2",
",",
"2.0",
")"
]
| Return the Kullback-Leibler Divergence (KLD) between two distributions.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution.
Returns:
float: The KLD of ``d1`` from ``d2``. | [
"Return",
"the",
"Kullback",
"-",
"Leibler",
"Divergence",
"(",
"KLD",
")",
"between",
"two",
"distributions",
"."
]
| python | train | 27.666667 |
UCBerkeleySETI/blimpy | blimpy/guppi.py | https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/guppi.py#L442-L459 | def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on guppi raw files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating spectra from GuppiRaw files.")
parser.add_argument('filename', type=str, help='Name of file to read')
parser.add_argument('-o', dest='outdir', type=str, default='./', help='output directory for PNG files')
args = parser.parse_args()
r = GuppiRaw(args.filename)
r.print_stats()
bname = os.path.splitext(os.path.basename(args.filename))[0]
bname = os.path.join(args.outdir, bname)
r.plot_histogram(filename="%s_hist.png" % bname)
r.plot_spectrum(filename="%s_spec.png" % bname) | [
"def",
"cmd_tool",
"(",
"args",
"=",
"None",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"\"Command line utility for creating spectra from GuppiRaw files.\"",
")",
"parser",
".",
"add_argument",
"(",
"'filename'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Name of file to read'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"dest",
"=",
"'outdir'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'./'",
",",
"help",
"=",
"'output directory for PNG files'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"r",
"=",
"GuppiRaw",
"(",
"args",
".",
"filename",
")",
"r",
".",
"print_stats",
"(",
")",
"bname",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"args",
".",
"filename",
")",
")",
"[",
"0",
"]",
"bname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"outdir",
",",
"bname",
")",
"r",
".",
"plot_histogram",
"(",
"filename",
"=",
"\"%s_hist.png\"",
"%",
"bname",
")",
"r",
".",
"plot_spectrum",
"(",
"filename",
"=",
"\"%s_spec.png\"",
"%",
"bname",
")"
]
| Command line tool for plotting and viewing info on guppi raw files | [
"Command",
"line",
"tool",
"for",
"plotting",
"and",
"viewing",
"info",
"on",
"guppi",
"raw",
"files"
]
| python | test | 39.888889 |
novopl/peltak | src/peltak/logic/lint.py | https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/logic/lint.py#L118-L152 | def run(self):
# type: () -> bool
""" Run all linters and report results.
Returns:
bool: **True** if all checks were successful, **False** otherwise.
"""
with util.timed_block() as t:
files = self._collect_files()
log.info("Collected <33>{} <32>files in <33>{}s".format(
len(files), t.elapsed_s
))
if self.verbose:
for p in files:
log.info(" <0>{}", p)
# No files to lint - return success if empty runs are allowed.
if not files:
return self.allow_empty
with util.timed_block() as t:
results = self._run_checks(files)
log.info("Code checked in <33>{}s", t.elapsed_s)
success = True
for name, retcodes in results.items():
if any(x != 0 for x in retcodes):
success = False
log.err("<35>{} <31>failed with: <33>{}".format(
name, retcodes
))
return success | [
"def",
"run",
"(",
"self",
")",
":",
"# type: () -> bool",
"with",
"util",
".",
"timed_block",
"(",
")",
"as",
"t",
":",
"files",
"=",
"self",
".",
"_collect_files",
"(",
")",
"log",
".",
"info",
"(",
"\"Collected <33>{} <32>files in <33>{}s\"",
".",
"format",
"(",
"len",
"(",
"files",
")",
",",
"t",
".",
"elapsed_s",
")",
")",
"if",
"self",
".",
"verbose",
":",
"for",
"p",
"in",
"files",
":",
"log",
".",
"info",
"(",
"\" <0>{}\"",
",",
"p",
")",
"# No files to lint - return success if empty runs are allowed.",
"if",
"not",
"files",
":",
"return",
"self",
".",
"allow_empty",
"with",
"util",
".",
"timed_block",
"(",
")",
"as",
"t",
":",
"results",
"=",
"self",
".",
"_run_checks",
"(",
"files",
")",
"log",
".",
"info",
"(",
"\"Code checked in <33>{}s\"",
",",
"t",
".",
"elapsed_s",
")",
"success",
"=",
"True",
"for",
"name",
",",
"retcodes",
"in",
"results",
".",
"items",
"(",
")",
":",
"if",
"any",
"(",
"x",
"!=",
"0",
"for",
"x",
"in",
"retcodes",
")",
":",
"success",
"=",
"False",
"log",
".",
"err",
"(",
"\"<35>{} <31>failed with: <33>{}\"",
".",
"format",
"(",
"name",
",",
"retcodes",
")",
")",
"return",
"success"
]
| Run all linters and report results.
Returns:
bool: **True** if all checks were successful, **False** otherwise. | [
"Run",
"all",
"linters",
"and",
"report",
"results",
"."
]
| python | train | 28.971429 |
juju/charm-helpers | charmhelpers/contrib/openstack/utils.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L486-L502 | def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
for cname, version in six.iteritems(vers_map):
if cname == codename:
return version[-1]
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version | [
"def",
"get_os_version_package",
"(",
"pkg",
",",
"fatal",
"=",
"True",
")",
":",
"codename",
"=",
"get_os_codename_package",
"(",
"pkg",
",",
"fatal",
"=",
"fatal",
")",
"if",
"not",
"codename",
":",
"return",
"None",
"if",
"'swift'",
"in",
"pkg",
":",
"vers_map",
"=",
"SWIFT_CODENAMES",
"for",
"cname",
",",
"version",
"in",
"six",
".",
"iteritems",
"(",
"vers_map",
")",
":",
"if",
"cname",
"==",
"codename",
":",
"return",
"version",
"[",
"-",
"1",
"]",
"else",
":",
"vers_map",
"=",
"OPENSTACK_CODENAMES",
"for",
"version",
",",
"cname",
"in",
"six",
".",
"iteritems",
"(",
"vers_map",
")",
":",
"if",
"cname",
"==",
"codename",
":",
"return",
"version"
]
| Derive OpenStack version number from an installed package. | [
"Derive",
"OpenStack",
"version",
"number",
"from",
"an",
"installed",
"package",
"."
]
| python | train | 32.235294 |
arindampradhan/cheesy | cheesy/ch.py | https://github.com/arindampradhan/cheesy/blob/a7dbc90bba551a562644b1563c595d4ac38f15ed/cheesy/ch.py#L97-L120 | def _release_info(jsn,VERSION):
"""Gives information about a particular package version."""
try:
release_point = jsn['releases'][VERSION][0]
except KeyError:
print "\033[91m\033[1mError: Release not found."
exit(1)
python_version = release_point['python_version']
filename = release_point['filename']
md5 = release_point['md5_digest']
download_url_for_release = release_point['url']
download_num_for_release = release_point['downloads']
download_size_for_release = _sizeof_fmt(int(release_point['size']))
print """
\033[1m\033[1m \033[4mPACKAGE VERSION INFO\033[0m
\033[1m md5 :\033[0m \033[93m%s \033[0m
\033[1m python version :\033[0m \033[93m%s \033[0m
\033[1m download url :\033[0m \033[93m%s \033[0m
\033[1m download number :\033[0m \033[93m%s \033[0m
\033[1m size :\033[0m \033[93m%s \033[0m
\033[1m filename :\033[0m \033[93m%s \033[0m
"""%(md5,python_version,download_url_for_release,\
download_num_for_release,download_size_for_release,filename) | [
"def",
"_release_info",
"(",
"jsn",
",",
"VERSION",
")",
":",
"try",
":",
"release_point",
"=",
"jsn",
"[",
"'releases'",
"]",
"[",
"VERSION",
"]",
"[",
"0",
"]",
"except",
"KeyError",
":",
"print",
"\"\\033[91m\\033[1mError: Release not found.\"",
"exit",
"(",
"1",
")",
"python_version",
"=",
"release_point",
"[",
"'python_version'",
"]",
"filename",
"=",
"release_point",
"[",
"'filename'",
"]",
"md5",
"=",
"release_point",
"[",
"'md5_digest'",
"]",
"download_url_for_release",
"=",
"release_point",
"[",
"'url'",
"]",
"download_num_for_release",
"=",
"release_point",
"[",
"'downloads'",
"]",
"download_size_for_release",
"=",
"_sizeof_fmt",
"(",
"int",
"(",
"release_point",
"[",
"'size'",
"]",
")",
")",
"print",
"\"\"\"\n\t\\033[1m\\033[1m \\033[4mPACKAGE VERSION INFO\\033[0m\n\n\t\\033[1m\tmd5 :\\033[0m \\033[93m%s \\033[0m\n\t\\033[1m\tpython version :\\033[0m \\033[93m%s \\033[0m\t\n\t\\033[1m\tdownload url :\\033[0m \\033[93m%s \\033[0m\t\n\t\\033[1m\tdownload number :\\033[0m \\033[93m%s \\033[0m\n\t\\033[1m\tsize :\\033[0m \\033[93m%s \\033[0m\t\n\t\\033[1m\tfilename :\\033[0m \\033[93m%s \\033[0m\t\n\t\"\"\"",
"%",
"(",
"md5",
",",
"python_version",
",",
"download_url_for_release",
",",
"download_num_for_release",
",",
"download_size_for_release",
",",
"filename",
")"
]
| Gives information about a particular package version. | [
"Gives",
"information",
"about",
"a",
"particular",
"package",
"version",
"."
]
| python | train | 43.958333 |
bcbio/bcbio-nextgen | bcbio/qc/qualimap.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L131-L139 | def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out | [
"def",
"_parse_qualimap_globals_inregion",
"(",
"table",
")",
":",
"out",
"=",
"{",
"}",
"for",
"row",
"in",
"table",
".",
"find_all",
"(",
"\"tr\"",
")",
":",
"col",
",",
"val",
"=",
"[",
"x",
".",
"text",
"for",
"x",
"in",
"row",
".",
"find_all",
"(",
"\"td\"",
")",
"]",
"if",
"col",
"==",
"\"Mapped reads\"",
":",
"out",
".",
"update",
"(",
"_parse_num_pct",
"(",
"\"%s (in regions)\"",
"%",
"col",
",",
"val",
")",
")",
"return",
"out"
]
| Retrieve metrics from the global targeted region table. | [
"Retrieve",
"metrics",
"from",
"the",
"global",
"targeted",
"region",
"table",
"."
]
| python | train | 36.777778 |
aestrivex/bctpy | bct/algorithms/modularity.py | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/modularity.py#L1043-L1171 | def modularity_louvain_und(W, gamma=1, hierarchy=False, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes
s = np.sum(W) # weight of edges
h = 0 # hierarchy index
ci = []
ci.append(np.arange(n) + 1) # hierarchical module assignments
q = []
q.append(-1) # hierarchical modularity values
n0 = n
#knm = np.zeros((n,n))
# for j in np.xrange(n0+1):
# knm[:,j] = np.sum(w[;,
while True:
if h > 300:
raise BCTParamError('Modularity Infinite Loop Style B. Please '
'contact the developer with this error.')
k = np.sum(W, axis=0) # node degree
Km = k.copy() # module degree
Knm = W.copy() # node-to-module degree
m = np.arange(n) + 1 # initial module assignments
flag = True # flag for within-hierarchy search
it = 0
while flag:
it += 1
if it > 1000:
raise BCTParamError('Modularity Infinite Loop Style C. Please '
'contact the developer with this error.')
flag = False
# loop over nodes in random order
for i in rng.permutation(n):
ma = m[i] - 1
# algorithm condition
dQ = ((Knm[i, :] - Knm[i, ma] + W[i, i]) -
gamma * k[i] * (Km - Km[ma] + k[i]) / s)
dQ[ma] = 0
max_dq = np.max(dQ) # find maximal modularity increase
if max_dq > 1e-10: # if maximal increase positive
j = np.argmax(dQ) # take only one value
# print max_dq,j,dQ[j]
Knm[:, j] += W[:, i] # change node-to-module degrees
Knm[:, ma] -= W[:, i]
Km[j] += k[i] # change module degrees
Km[ma] -= k[i]
m[i] = j + 1 # reassign module
flag = True
_, m = np.unique(m, return_inverse=True) # new module assignments
# print m,h
m += 1
h += 1
ci.append(np.zeros((n0,)))
# for i,mi in enumerate(m): #loop through initial module assignments
for i in range(n):
# print i, m[i], n0, h, len(m), n
# ci[h][np.where(ci[h-1]==i+1)]=mi #assign new modules
ci[h][np.where(ci[h - 1] == i + 1)] = m[i]
n = np.max(m) # new number of modules
W1 = np.zeros((n, n)) # new weighted matrix
for i in range(n):
for j in range(i, n):
# pool weights of nodes in same module
wp = np.sum(W[np.ix_(m == i + 1, m == j + 1)])
W1[i, j] = wp
W1[j, i] = wp
W = W1
q.append(0)
# compute modularity
q[h] = np.trace(W) / s - gamma * np.sum(np.dot(W / s, W / s))
if q[h] - q[h - 1] < 1e-10: # if modularity does not increase
break
ci = np.array(ci, dtype=int)
if hierarchy:
ci = ci[1:-1]
q = q[1:-1]
return ci, q
else:
return ci[h - 1], q[h - 1] | [
"def",
"modularity_louvain_und",
"(",
"W",
",",
"gamma",
"=",
"1",
",",
"hierarchy",
"=",
"False",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"n",
"=",
"len",
"(",
"W",
")",
"# number of nodes",
"s",
"=",
"np",
".",
"sum",
"(",
"W",
")",
"# weight of edges",
"h",
"=",
"0",
"# hierarchy index",
"ci",
"=",
"[",
"]",
"ci",
".",
"append",
"(",
"np",
".",
"arange",
"(",
"n",
")",
"+",
"1",
")",
"# hierarchical module assignments",
"q",
"=",
"[",
"]",
"q",
".",
"append",
"(",
"-",
"1",
")",
"# hierarchical modularity values",
"n0",
"=",
"n",
"#knm = np.zeros((n,n))",
"# for j in np.xrange(n0+1):",
"# knm[:,j] = np.sum(w[;,",
"while",
"True",
":",
"if",
"h",
">",
"300",
":",
"raise",
"BCTParamError",
"(",
"'Modularity Infinite Loop Style B. Please '",
"'contact the developer with this error.'",
")",
"k",
"=",
"np",
".",
"sum",
"(",
"W",
",",
"axis",
"=",
"0",
")",
"# node degree",
"Km",
"=",
"k",
".",
"copy",
"(",
")",
"# module degree",
"Knm",
"=",
"W",
".",
"copy",
"(",
")",
"# node-to-module degree",
"m",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"+",
"1",
"# initial module assignments",
"flag",
"=",
"True",
"# flag for within-hierarchy search",
"it",
"=",
"0",
"while",
"flag",
":",
"it",
"+=",
"1",
"if",
"it",
">",
"1000",
":",
"raise",
"BCTParamError",
"(",
"'Modularity Infinite Loop Style C. Please '",
"'contact the developer with this error.'",
")",
"flag",
"=",
"False",
"# loop over nodes in random order",
"for",
"i",
"in",
"rng",
".",
"permutation",
"(",
"n",
")",
":",
"ma",
"=",
"m",
"[",
"i",
"]",
"-",
"1",
"# algorithm condition",
"dQ",
"=",
"(",
"(",
"Knm",
"[",
"i",
",",
":",
"]",
"-",
"Knm",
"[",
"i",
",",
"ma",
"]",
"+",
"W",
"[",
"i",
",",
"i",
"]",
")",
"-",
"gamma",
"*",
"k",
"[",
"i",
"]",
"*",
"(",
"Km",
"-",
"Km",
"[",
"ma",
"]",
"+",
"k",
"[",
"i",
"]",
")",
"/",
"s",
")",
"dQ",
"[",
"ma",
"]",
"=",
"0",
"max_dq",
"=",
"np",
".",
"max",
"(",
"dQ",
")",
"# find maximal modularity increase",
"if",
"max_dq",
">",
"1e-10",
":",
"# if maximal increase positive",
"j",
"=",
"np",
".",
"argmax",
"(",
"dQ",
")",
"# take only one value",
"# print max_dq,j,dQ[j]",
"Knm",
"[",
":",
",",
"j",
"]",
"+=",
"W",
"[",
":",
",",
"i",
"]",
"# change node-to-module degrees",
"Knm",
"[",
":",
",",
"ma",
"]",
"-=",
"W",
"[",
":",
",",
"i",
"]",
"Km",
"[",
"j",
"]",
"+=",
"k",
"[",
"i",
"]",
"# change module degrees",
"Km",
"[",
"ma",
"]",
"-=",
"k",
"[",
"i",
"]",
"m",
"[",
"i",
"]",
"=",
"j",
"+",
"1",
"# reassign module",
"flag",
"=",
"True",
"_",
",",
"m",
"=",
"np",
".",
"unique",
"(",
"m",
",",
"return_inverse",
"=",
"True",
")",
"# new module assignments",
"# print m,h",
"m",
"+=",
"1",
"h",
"+=",
"1",
"ci",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"(",
"n0",
",",
")",
")",
")",
"# for i,mi in enumerate(m):\t#loop through initial module assignments",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"# print i, m[i], n0, h, len(m), n",
"# ci[h][np.where(ci[h-1]==i+1)]=mi\t#assign new modules",
"ci",
"[",
"h",
"]",
"[",
"np",
".",
"where",
"(",
"ci",
"[",
"h",
"-",
"1",
"]",
"==",
"i",
"+",
"1",
")",
"]",
"=",
"m",
"[",
"i",
"]",
"n",
"=",
"np",
".",
"max",
"(",
"m",
")",
"# new number of modules",
"W1",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"# new weighted matrix",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"n",
")",
":",
"# pool weights of nodes in same module",
"wp",
"=",
"np",
".",
"sum",
"(",
"W",
"[",
"np",
".",
"ix_",
"(",
"m",
"==",
"i",
"+",
"1",
",",
"m",
"==",
"j",
"+",
"1",
")",
"]",
")",
"W1",
"[",
"i",
",",
"j",
"]",
"=",
"wp",
"W1",
"[",
"j",
",",
"i",
"]",
"=",
"wp",
"W",
"=",
"W1",
"q",
".",
"append",
"(",
"0",
")",
"# compute modularity",
"q",
"[",
"h",
"]",
"=",
"np",
".",
"trace",
"(",
"W",
")",
"/",
"s",
"-",
"gamma",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"dot",
"(",
"W",
"/",
"s",
",",
"W",
"/",
"s",
")",
")",
"if",
"q",
"[",
"h",
"]",
"-",
"q",
"[",
"h",
"-",
"1",
"]",
"<",
"1e-10",
":",
"# if modularity does not increase",
"break",
"ci",
"=",
"np",
".",
"array",
"(",
"ci",
",",
"dtype",
"=",
"int",
")",
"if",
"hierarchy",
":",
"ci",
"=",
"ci",
"[",
"1",
":",
"-",
"1",
"]",
"q",
"=",
"q",
"[",
"1",
":",
"-",
"1",
"]",
"return",
"ci",
",",
"q",
"else",
":",
"return",
"ci",
"[",
"h",
"-",
"1",
"]",
",",
"q",
"[",
"h",
"-",
"1",
"]"
]
| The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs. | [
"The",
"optimal",
"community",
"structure",
"is",
"a",
"subdivision",
"of",
"the",
"network",
"into",
"nonoverlapping",
"groups",
"of",
"nodes",
"in",
"a",
"way",
"that",
"maximizes",
"the",
"number",
"of",
"within",
"-",
"group",
"edges",
"and",
"minimizes",
"the",
"number",
"of",
"between",
"-",
"group",
"edges",
".",
"The",
"modularity",
"is",
"a",
"statistic",
"that",
"quantifies",
"the",
"degree",
"to",
"which",
"the",
"network",
"may",
"be",
"subdivided",
"into",
"such",
"clearly",
"delineated",
"groups",
"."
]
| python | train | 35.697674 |
stephrdev/django-tapeforms | tapeforms/fieldsets.py | https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/fieldsets.py#L76-L97 | def visible_fields(self):
"""
Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple.
"""
form_visible_fields = self.form.visible_fields()
if self.render_fields:
fields = self.render_fields
else:
fields = [field.name for field in form_visible_fields]
filtered_fields = [field for field in fields if field not in self.exclude_fields]
return [field for field in form_visible_fields if field.name in filtered_fields] | [
"def",
"visible_fields",
"(",
"self",
")",
":",
"form_visible_fields",
"=",
"self",
".",
"form",
".",
"visible_fields",
"(",
")",
"if",
"self",
".",
"render_fields",
":",
"fields",
"=",
"self",
".",
"render_fields",
"else",
":",
"fields",
"=",
"[",
"field",
".",
"name",
"for",
"field",
"in",
"form_visible_fields",
"]",
"filtered_fields",
"=",
"[",
"field",
"for",
"field",
"in",
"fields",
"if",
"field",
"not",
"in",
"self",
".",
"exclude_fields",
"]",
"return",
"[",
"field",
"for",
"field",
"in",
"form_visible_fields",
"if",
"field",
".",
"name",
"in",
"filtered_fields",
"]"
]
| Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple. | [
"Returns",
"the",
"reduced",
"set",
"of",
"visible",
"fields",
"to",
"output",
"from",
"the",
"form",
"."
]
| python | train | 38.181818 |
marshmallow-code/apispec | src/apispec/ext/marshmallow/openapi.py | https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/ext/marshmallow/openapi.py#L138-L157 | def map_to_openapi_type(self, *args):
"""Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
"""
if len(args) == 1 and args[0] in self.field_mapping:
openapi_type_field = self.field_mapping[args[0]]
elif len(args) == 2:
openapi_type_field = args
else:
raise TypeError("Pass core marshmallow field type or (type, fmt) pair.")
def inner(field_type):
self.field_mapping[field_type] = openapi_type_field
return field_type
return inner | [
"def",
"map_to_openapi_type",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"args",
"[",
"0",
"]",
"in",
"self",
".",
"field_mapping",
":",
"openapi_type_field",
"=",
"self",
".",
"field_mapping",
"[",
"args",
"[",
"0",
"]",
"]",
"elif",
"len",
"(",
"args",
")",
"==",
"2",
":",
"openapi_type_field",
"=",
"args",
"else",
":",
"raise",
"TypeError",
"(",
"\"Pass core marshmallow field type or (type, fmt) pair.\"",
")",
"def",
"inner",
"(",
"field_type",
")",
":",
"self",
".",
"field_mapping",
"[",
"field_type",
"]",
"=",
"openapi_type_field",
"return",
"field_type",
"return",
"inner"
]
| Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping) | [
"Decorator",
"to",
"set",
"mapping",
"for",
"custom",
"fields",
"."
]
| python | train | 34.1 |
AshleySetter/optoanalysis | optoanalysis/optoanalysis/thermo/thermo.py | https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/thermo/thermo.py#L71-L97 | def calc_hamiltonian(self, mass, omega_array):
"""
Calculates the standard (pot+kin) Hamiltonian of your system.
Parameters
----------
mass : float
The mass of the particle in kg
omega_array : array
array which represents omega at every point in your time trace
and should therefore have the same length as self.position_data
Requirements
------------
self.position_data : array
Already filtered for the degree of freedom of intrest and converted into meters.
Returns
-------
Hamiltonian : array
The calculated Hamiltonian
"""
Kappa_t= mass*omega_array**2
self.E_pot = 0.5*Kappa_t*self.position_data**2
self.E_kin = 0.5*mass*(_np.insert(_np.diff(self.position_data), 0, (self.position_data[1]-self.position_data[0]))*self.SampleFreq)**2
self.Hamiltonian = self.E_pot + self.E_kin
return self.Hamiltonian | [
"def",
"calc_hamiltonian",
"(",
"self",
",",
"mass",
",",
"omega_array",
")",
":",
"Kappa_t",
"=",
"mass",
"*",
"omega_array",
"**",
"2",
"self",
".",
"E_pot",
"=",
"0.5",
"*",
"Kappa_t",
"*",
"self",
".",
"position_data",
"**",
"2",
"self",
".",
"E_kin",
"=",
"0.5",
"*",
"mass",
"*",
"(",
"_np",
".",
"insert",
"(",
"_np",
".",
"diff",
"(",
"self",
".",
"position_data",
")",
",",
"0",
",",
"(",
"self",
".",
"position_data",
"[",
"1",
"]",
"-",
"self",
".",
"position_data",
"[",
"0",
"]",
")",
")",
"*",
"self",
".",
"SampleFreq",
")",
"**",
"2",
"self",
".",
"Hamiltonian",
"=",
"self",
".",
"E_pot",
"+",
"self",
".",
"E_kin",
"return",
"self",
".",
"Hamiltonian"
]
| Calculates the standard (pot+kin) Hamiltonian of your system.
Parameters
----------
mass : float
The mass of the particle in kg
omega_array : array
array which represents omega at every point in your time trace
and should therefore have the same length as self.position_data
Requirements
------------
self.position_data : array
Already filtered for the degree of freedom of intrest and converted into meters.
Returns
-------
Hamiltonian : array
The calculated Hamiltonian | [
"Calculates",
"the",
"standard",
"(",
"pot",
"+",
"kin",
")",
"Hamiltonian",
"of",
"your",
"system",
".",
"Parameters",
"----------",
"mass",
":",
"float",
"The",
"mass",
"of",
"the",
"particle",
"in",
"kg",
"omega_array",
":",
"array",
"array",
"which",
"represents",
"omega",
"at",
"every",
"point",
"in",
"your",
"time",
"trace",
"and",
"should",
"therefore",
"have",
"the",
"same",
"length",
"as",
"self",
".",
"position_data",
"Requirements",
"------------",
"self",
".",
"position_data",
":",
"array",
"Already",
"filtered",
"for",
"the",
"degree",
"of",
"freedom",
"of",
"intrest",
"and",
"converted",
"into",
"meters",
"."
]
| python | train | 37.074074 |
EventRegistry/event-registry-python | eventregistry/TopicPage.py | https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L224-L231 | def addKeyword(self, keyword, weight):
"""
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight}) | [
"def",
"addKeyword",
"(",
"self",
",",
"keyword",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"keywords\"",
"]",
".",
"append",
"(",
"{",
"\"keyword\"",
":",
"keyword",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
]
| add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50) | [
"add",
"a",
"relevant",
"keyword",
"to",
"the",
"topic",
"page"
]
| python | train | 53.5 |
bitlabstudio/django-frequently | frequently/views.py | https://github.com/bitlabstudio/django-frequently/blob/93c76af62325afd1f09487dd1bb527fdd238ec8e/frequently/views.py#L28-L69 | def get_ordered_entries(self, queryset=False):
"""
Custom ordering. First we get the average views and rating for
the categories's entries. Second we created a rank by multiplying
both. Last, we sort categories by this rank from top to bottom.
Example:
- Cat_1
- Entry_1 (500 Views, Rating 2)
- Entry_2 (200 Views, Rating -4)
- Entry_3 (100 Views, Rating 3)
- Cat_2
- Entry_1 (200 Views, Rating 7)
- Entry_2 (50 Views, Rating 2)
Result:
Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33)
Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5)
Cat_2 will be displayed at the top. The algorithm is quality-oriented,
as you can see.
"""
if queryset:
self.queryset = queryset
else:
self.queryset = EntryCategory.objects.all()
if self.queryset:
for category in self.queryset:
entries = category.get_entries()
if entries:
amount_list = [e.amount_of_views for e in entries]
rating_list = [e.rating() for e in entries]
views_per_entry = fsum(amount_list) / len(amount_list)
rating_per_entry = fsum(rating_list) / len(rating_list)
category.last_rank = views_per_entry * rating_per_entry
category.save()
else:
self.queryset = self.queryset.exclude(pk=category.pk)
self.queryset = sorted(self.queryset, key=lambda c: c.last_rank,
reverse=True)
return self.queryset | [
"def",
"get_ordered_entries",
"(",
"self",
",",
"queryset",
"=",
"False",
")",
":",
"if",
"queryset",
":",
"self",
".",
"queryset",
"=",
"queryset",
"else",
":",
"self",
".",
"queryset",
"=",
"EntryCategory",
".",
"objects",
".",
"all",
"(",
")",
"if",
"self",
".",
"queryset",
":",
"for",
"category",
"in",
"self",
".",
"queryset",
":",
"entries",
"=",
"category",
".",
"get_entries",
"(",
")",
"if",
"entries",
":",
"amount_list",
"=",
"[",
"e",
".",
"amount_of_views",
"for",
"e",
"in",
"entries",
"]",
"rating_list",
"=",
"[",
"e",
".",
"rating",
"(",
")",
"for",
"e",
"in",
"entries",
"]",
"views_per_entry",
"=",
"fsum",
"(",
"amount_list",
")",
"/",
"len",
"(",
"amount_list",
")",
"rating_per_entry",
"=",
"fsum",
"(",
"rating_list",
")",
"/",
"len",
"(",
"rating_list",
")",
"category",
".",
"last_rank",
"=",
"views_per_entry",
"*",
"rating_per_entry",
"category",
".",
"save",
"(",
")",
"else",
":",
"self",
".",
"queryset",
"=",
"self",
".",
"queryset",
".",
"exclude",
"(",
"pk",
"=",
"category",
".",
"pk",
")",
"self",
".",
"queryset",
"=",
"sorted",
"(",
"self",
".",
"queryset",
",",
"key",
"=",
"lambda",
"c",
":",
"c",
".",
"last_rank",
",",
"reverse",
"=",
"True",
")",
"return",
"self",
".",
"queryset"
]
| Custom ordering. First we get the average views and rating for
the categories's entries. Second we created a rank by multiplying
both. Last, we sort categories by this rank from top to bottom.
Example:
- Cat_1
- Entry_1 (500 Views, Rating 2)
- Entry_2 (200 Views, Rating -4)
- Entry_3 (100 Views, Rating 3)
- Cat_2
- Entry_1 (200 Views, Rating 7)
- Entry_2 (50 Views, Rating 2)
Result:
Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33)
Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5)
Cat_2 will be displayed at the top. The algorithm is quality-oriented,
as you can see. | [
"Custom",
"ordering",
".",
"First",
"we",
"get",
"the",
"average",
"views",
"and",
"rating",
"for",
"the",
"categories",
"s",
"entries",
".",
"Second",
"we",
"created",
"a",
"rank",
"by",
"multiplying",
"both",
".",
"Last",
"we",
"sort",
"categories",
"by",
"this",
"rank",
"from",
"top",
"to",
"bottom",
"."
]
| python | train | 40.642857 |
rongcloud/server-sdk-python | rongcloud/message.py | https://github.com/rongcloud/server-sdk-python/blob/3daadd8b67c84cc5d2a9419e8d45fd69c9baf976/rongcloud/message.py#L319-L362 | def broadcast(self,
fromUserId,
objectName,
content,
pushContent=None,
pushData=None,
os=None):
"""
发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法
@param fromUserId:发送人用户 Id。(必传)
@param txtMessage:文本消息。
@param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选)
@param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选)
@param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/message/broadcast.json',
params={
"fromUserId": fromUserId,
"objectName": objectName,
"content": content,
"pushContent": pushContent,
"pushData": pushData,
"os": os
})
return Response(r, desc) | [
"def",
"broadcast",
"(",
"self",
",",
"fromUserId",
",",
"objectName",
",",
"content",
",",
"pushContent",
"=",
"None",
",",
"pushData",
"=",
"None",
",",
"os",
"=",
"None",
")",
":",
"desc",
"=",
"{",
"\"name\"",
":",
"\"CodeSuccessReslut\"",
",",
"\"desc\"",
":",
"\" http 成功返回结果\",",
"",
"\"fields\"",
":",
"[",
"{",
"\"name\"",
":",
"\"code\"",
",",
"\"type\"",
":",
"\"Integer\"",
",",
"\"desc\"",
":",
"\"返回码,200 为正常。\"",
"}",
",",
"{",
"\"name\"",
":",
"\"errorMessage\"",
",",
"\"type\"",
":",
"\"String\"",
",",
"\"desc\"",
":",
"\"错误信息。\"",
"}",
"]",
"}",
"r",
"=",
"self",
".",
"call_api",
"(",
"method",
"=",
"(",
"'API'",
",",
"'POST'",
",",
"'application/x-www-form-urlencoded'",
")",
",",
"action",
"=",
"'/message/broadcast.json'",
",",
"params",
"=",
"{",
"\"fromUserId\"",
":",
"fromUserId",
",",
"\"objectName\"",
":",
"objectName",
",",
"\"content\"",
":",
"content",
",",
"\"pushContent\"",
":",
"pushContent",
",",
"\"pushData\"",
":",
"pushData",
",",
"\"os\"",
":",
"os",
"}",
")",
"return",
"Response",
"(",
"r",
",",
"desc",
")"
]
| 发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法
@param fromUserId:发送人用户 Id。(必传)
@param txtMessage:文本消息。
@param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选)
@param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选)
@param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | [
"发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送",
"Push",
"信息,单条消息最大",
"128k,会话类型为",
"SYSTEM。每小时只能发送",
"1",
"次,每天最多发送",
"3",
"次。)",
"方法"
]
| python | train | 37.431818 |
delfick/harpoon | harpoon/actions.py | https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/actions.py#L226-L243 | def delete_untagged(collector, **kwargs):
"""Find the untagged images and remove them"""
configuration = collector.configuration
docker_api = configuration["harpoon"].docker_api
images = docker_api.images()
found = False
for image in images:
if image["RepoTags"] == ["<none>:<none>"]:
found = True
image_id = image["Id"]
log.info("Deleting untagged image\thash=%s", image_id)
try:
docker_api.remove_image(image["Id"])
except DockerAPIError as error:
log.error("Failed to delete image\thash=%s\terror=%s", image_id, error)
if not found:
log.info("Didn't find any untagged images to delete!") | [
"def",
"delete_untagged",
"(",
"collector",
",",
"*",
"*",
"kwargs",
")",
":",
"configuration",
"=",
"collector",
".",
"configuration",
"docker_api",
"=",
"configuration",
"[",
"\"harpoon\"",
"]",
".",
"docker_api",
"images",
"=",
"docker_api",
".",
"images",
"(",
")",
"found",
"=",
"False",
"for",
"image",
"in",
"images",
":",
"if",
"image",
"[",
"\"RepoTags\"",
"]",
"==",
"[",
"\"<none>:<none>\"",
"]",
":",
"found",
"=",
"True",
"image_id",
"=",
"image",
"[",
"\"Id\"",
"]",
"log",
".",
"info",
"(",
"\"Deleting untagged image\\thash=%s\"",
",",
"image_id",
")",
"try",
":",
"docker_api",
".",
"remove_image",
"(",
"image",
"[",
"\"Id\"",
"]",
")",
"except",
"DockerAPIError",
"as",
"error",
":",
"log",
".",
"error",
"(",
"\"Failed to delete image\\thash=%s\\terror=%s\"",
",",
"image_id",
",",
"error",
")",
"if",
"not",
"found",
":",
"log",
".",
"info",
"(",
"\"Didn't find any untagged images to delete!\"",
")"
]
| Find the untagged images and remove them | [
"Find",
"the",
"untagged",
"images",
"and",
"remove",
"them"
]
| python | train | 39.444444 |
gem/oq-engine | openquake/hazardlib/gsim/dowrickrhoades_2005.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/dowrickrhoades_2005.py#L145-L190 | def _get_site_class(self, vs30, mmi_mean):
"""
Return site class flag for:
Class E - Very Soft Soil vs30 < 180
Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360
Class C - Shallow Soil vs30 > 360 and vs30 <= 760
Class B - Rock vs30 > 760 and vs30 <= 1500
Class A - Strong Rock vs30 >= 180 and vs30 <= 360
The S site class is equal to
S = c1 if MMI <= 7
S = c1 - d *(MMI - 7.0) if 7<MMI<9.5
S = c2 if MMI >= 9.5
"""
if vs30[0] < 180:
c1 = 1.0
c2 = -0.25
d = 0.5
elif vs30[0] >= 180 and vs30[0] <= 360:
c1 = 0.5
c2 = -0.125
d = 0.25
elif vs30[0] > 360 and vs30[0] <= 760:
c1 = 0.
c2 = 0.
d = 0.
elif vs30[0] > 760 and vs30[0] <= 1500:
c1 = -0.5
c2 = 0.125
d = -0.25
elif vs30[0] > 1500:
c1 = -1.0
c2 = 0.25
d = -0.5
S = np.zeros_like(vs30)
for i in range(vs30.size):
if mmi_mean[i] <= 7.0:
S[i] += c1
elif mmi_mean[i] > 7 and mmi_mean[i] < 9.5:
S[i] += c1 - d * (mmi_mean[i] - 7.0)
else:
S[i] += c2
return S | [
"def",
"_get_site_class",
"(",
"self",
",",
"vs30",
",",
"mmi_mean",
")",
":",
"if",
"vs30",
"[",
"0",
"]",
"<",
"180",
":",
"c1",
"=",
"1.0",
"c2",
"=",
"-",
"0.25",
"d",
"=",
"0.5",
"elif",
"vs30",
"[",
"0",
"]",
">=",
"180",
"and",
"vs30",
"[",
"0",
"]",
"<=",
"360",
":",
"c1",
"=",
"0.5",
"c2",
"=",
"-",
"0.125",
"d",
"=",
"0.25",
"elif",
"vs30",
"[",
"0",
"]",
">",
"360",
"and",
"vs30",
"[",
"0",
"]",
"<=",
"760",
":",
"c1",
"=",
"0.",
"c2",
"=",
"0.",
"d",
"=",
"0.",
"elif",
"vs30",
"[",
"0",
"]",
">",
"760",
"and",
"vs30",
"[",
"0",
"]",
"<=",
"1500",
":",
"c1",
"=",
"-",
"0.5",
"c2",
"=",
"0.125",
"d",
"=",
"-",
"0.25",
"elif",
"vs30",
"[",
"0",
"]",
">",
"1500",
":",
"c1",
"=",
"-",
"1.0",
"c2",
"=",
"0.25",
"d",
"=",
"-",
"0.5",
"S",
"=",
"np",
".",
"zeros_like",
"(",
"vs30",
")",
"for",
"i",
"in",
"range",
"(",
"vs30",
".",
"size",
")",
":",
"if",
"mmi_mean",
"[",
"i",
"]",
"<=",
"7.0",
":",
"S",
"[",
"i",
"]",
"+=",
"c1",
"elif",
"mmi_mean",
"[",
"i",
"]",
">",
"7",
"and",
"mmi_mean",
"[",
"i",
"]",
"<",
"9.5",
":",
"S",
"[",
"i",
"]",
"+=",
"c1",
"-",
"d",
"*",
"(",
"mmi_mean",
"[",
"i",
"]",
"-",
"7.0",
")",
"else",
":",
"S",
"[",
"i",
"]",
"+=",
"c2",
"return",
"S"
]
| Return site class flag for:
Class E - Very Soft Soil vs30 < 180
Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360
Class C - Shallow Soil vs30 > 360 and vs30 <= 760
Class B - Rock vs30 > 760 and vs30 <= 1500
Class A - Strong Rock vs30 >= 180 and vs30 <= 360
The S site class is equal to
S = c1 if MMI <= 7
S = c1 - d *(MMI - 7.0) if 7<MMI<9.5
S = c2 if MMI >= 9.5 | [
"Return",
"site",
"class",
"flag",
"for",
":",
"Class",
"E",
"-",
"Very",
"Soft",
"Soil",
"vs30",
"<",
"180",
"Class",
"D",
"-",
"Deep",
"or",
"Soft",
"Soil",
"vs30",
">",
"=",
"180",
"and",
"vs30",
"<",
"=",
"360",
"Class",
"C",
"-",
"Shallow",
"Soil",
"vs30",
">",
"360",
"and",
"vs30",
"<",
"=",
"760",
"Class",
"B",
"-",
"Rock",
"vs30",
">",
"760",
"and",
"vs30",
"<",
"=",
"1500",
"Class",
"A",
"-",
"Strong",
"Rock",
"vs30",
">",
"=",
"180",
"and",
"vs30",
"<",
"=",
"360",
"The",
"S",
"site",
"class",
"is",
"equal",
"to",
"S",
"=",
"c1",
"if",
"MMI",
"<",
"=",
"7",
"S",
"=",
"c1",
"-",
"d",
"*",
"(",
"MMI",
"-",
"7",
".",
"0",
")",
"if",
"7<MMI<9",
".",
"5",
"S",
"=",
"c2",
"if",
"MMI",
">",
"=",
"9",
".",
"5"
]
| python | train | 29.391304 |
fastai/fastai | docs_src/nbval/nbdime_reporter.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/docs_src/nbval/nbdime_reporter.py#L76-L107 | def make_report(self, outcome):
"""Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells.
"""
failures = self.getreports('failed')
if not failures:
return
for rep in failures:
# Check if this is a notebook node
msg = self._getfailureheadline(rep)
lines = rep.longrepr.splitlines()
if len(lines) > 1:
self.section(msg, lines[1])
self._outrep_summary(rep)
tmpdir = tempfile.mkdtemp()
try:
ref_file = os.path.join(tmpdir, 'reference.ipynb')
test_file = os.path.join(tmpdir, 'test_result.ipynb')
with io.open(ref_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_ref, f)
with io.open(test_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_test, f)
run_server(
port=0, # Run on random port
cwd=tmpdir,
closable=True,
on_port=lambda port: browse(
port, ref_file, test_file, None))
finally:
shutil.rmtree(tmpdir) | [
"def",
"make_report",
"(",
"self",
",",
"outcome",
")",
":",
"failures",
"=",
"self",
".",
"getreports",
"(",
"'failed'",
")",
"if",
"not",
"failures",
":",
"return",
"for",
"rep",
"in",
"failures",
":",
"# Check if this is a notebook node",
"msg",
"=",
"self",
".",
"_getfailureheadline",
"(",
"rep",
")",
"lines",
"=",
"rep",
".",
"longrepr",
".",
"splitlines",
"(",
")",
"if",
"len",
"(",
"lines",
")",
">",
"1",
":",
"self",
".",
"section",
"(",
"msg",
",",
"lines",
"[",
"1",
"]",
")",
"self",
".",
"_outrep_summary",
"(",
"rep",
")",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"ref_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"'reference.ipynb'",
")",
"test_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"'test_result.ipynb'",
")",
"with",
"io",
".",
"open",
"(",
"ref_file",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf8\"",
")",
"as",
"f",
":",
"nbformat",
".",
"write",
"(",
"self",
".",
"nb_ref",
",",
"f",
")",
"with",
"io",
".",
"open",
"(",
"test_file",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf8\"",
")",
"as",
"f",
":",
"nbformat",
".",
"write",
"(",
"self",
".",
"nb_test",
",",
"f",
")",
"run_server",
"(",
"port",
"=",
"0",
",",
"# Run on random port",
"cwd",
"=",
"tmpdir",
",",
"closable",
"=",
"True",
",",
"on_port",
"=",
"lambda",
"port",
":",
"browse",
"(",
"port",
",",
"ref_file",
",",
"test_file",
",",
"None",
")",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmpdir",
")"
]
| Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells. | [
"Make",
"report",
"in",
"form",
"of",
"two",
"notebooks",
"."
]
| python | train | 38.0625 |
hydpy-dev/hydpy | hydpy/core/filetools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/filetools.py#L469-L482 | def currentpath(self) -> str:
"""Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir'
"""
return os.path.join(self.basepath, self.currentdir) | [
"def",
"currentpath",
"(",
"self",
")",
"->",
"str",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"basepath",
",",
"self",
".",
"currentdir",
")"
]
| Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir' | [
"Absolute",
"path",
"of",
"the",
"current",
"working",
"directory",
"."
]
| python | train | 42.714286 |
ladybug-tools/ladybug | ladybug/designday.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1289-L1294 | def from_analysis_period(cls, analysis_period, clearness=1,
daylight_savings_indicator='No'):
""""Initialize a OriginalClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, clearness,
daylight_savings_indicator) | [
"def",
"from_analysis_period",
"(",
"cls",
",",
"analysis_period",
",",
"clearness",
"=",
"1",
",",
"daylight_savings_indicator",
"=",
"'No'",
")",
":",
"_check_analysis_period",
"(",
"analysis_period",
")",
"return",
"cls",
"(",
"analysis_period",
".",
"st_month",
",",
"analysis_period",
".",
"st_day",
",",
"clearness",
",",
"daylight_savings_indicator",
")"
]
| Initialize a OriginalClearSkyCondition from an analysis_period | [
"Initialize",
"a",
"OriginalClearSkyCondition",
"from",
"an",
"analysis_period"
]
| python | train | 61.666667 |
zhanglab/psamm | psamm/datasource/native.py | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/native.py#L1212-L1224 | def parse_model_table_file(path, f):
"""Parse a file as a list of model reactions
Yields reactions IDs. Path can be given as a string or a context.
"""
for line in f:
line, _, comment = line.partition('#')
line = line.strip()
if line == '':
continue
yield line | [
"def",
"parse_model_table_file",
"(",
"path",
",",
"f",
")",
":",
"for",
"line",
"in",
"f",
":",
"line",
",",
"_",
",",
"comment",
"=",
"line",
".",
"partition",
"(",
"'#'",
")",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"==",
"''",
":",
"continue",
"yield",
"line"
]
| Parse a file as a list of model reactions
Yields reactions IDs. Path can be given as a string or a context. | [
"Parse",
"a",
"file",
"as",
"a",
"list",
"of",
"model",
"reactions"
]
| python | train | 23.923077 |
PlaidWeb/Pushl | pushl/entries.py | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/entries.py#L108-L142 | async def get_entry(config, url):
""" Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) """
previous = config.cache.get(
'entry', url,
schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
LOGGER.error("Could not get entry %s: %d", url,
request.status if request else -1)
return None, previous, False
# cache hit
if request.cached:
return previous, previous, False
current = Entry(request)
# Content updated
if config.cache:
config.cache.set('entry', url, current)
return current, previous, (not previous
or previous.digest != current.digest
or previous.status != current.status) | [
"async",
"def",
"get_entry",
"(",
"config",
",",
"url",
")",
":",
"previous",
"=",
"config",
".",
"cache",
".",
"get",
"(",
"'entry'",
",",
"url",
",",
"schema_version",
"=",
"SCHEMA_VERSION",
")",
"if",
"config",
".",
"cache",
"else",
"None",
"headers",
"=",
"previous",
".",
"caching",
"if",
"previous",
"else",
"None",
"request",
"=",
"await",
"utils",
".",
"retry_get",
"(",
"config",
",",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"not",
"request",
"or",
"not",
"request",
".",
"success",
":",
"LOGGER",
".",
"error",
"(",
"\"Could not get entry %s: %d\"",
",",
"url",
",",
"request",
".",
"status",
"if",
"request",
"else",
"-",
"1",
")",
"return",
"None",
",",
"previous",
",",
"False",
"# cache hit",
"if",
"request",
".",
"cached",
":",
"return",
"previous",
",",
"previous",
",",
"False",
"current",
"=",
"Entry",
"(",
"request",
")",
"# Content updated",
"if",
"config",
".",
"cache",
":",
"config",
".",
"cache",
".",
"set",
"(",
"'entry'",
",",
"url",
",",
"current",
")",
"return",
"current",
",",
"previous",
",",
"(",
"not",
"previous",
"or",
"previous",
".",
"digest",
"!=",
"current",
".",
"digest",
"or",
"previous",
".",
"status",
"!=",
"current",
".",
"status",
")"
]
| Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) | [
"Given",
"an",
"entry",
"URL",
"return",
"the",
"entry"
]
| python | train | 28.628571 |
waqasbhatti/astrobase | astrobase/periodbase/spdm.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/periodbase/spdm.py#L145-L182 | def _stellingwerf_pdm_worker(task):
'''
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_pdm : float
The theta value at the specified frequency. nan if the calculation
fails.
'''
times, mags, errs, frequency, binsize, minbin = task
try:
theta = stellingwerf_pdm_theta(times, mags, errs, frequency,
binsize=binsize, minbin=minbin)
return theta
except Exception as e:
return npnan | [
"def",
"_stellingwerf_pdm_worker",
"(",
"task",
")",
":",
"times",
",",
"mags",
",",
"errs",
",",
"frequency",
",",
"binsize",
",",
"minbin",
"=",
"task",
"try",
":",
"theta",
"=",
"stellingwerf_pdm_theta",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"frequency",
",",
"binsize",
"=",
"binsize",
",",
"minbin",
"=",
"minbin",
")",
"return",
"theta",
"except",
"Exception",
"as",
"e",
":",
"return",
"npnan"
]
| This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_pdm : float
The theta value at the specified frequency. nan if the calculation
fails. | [
"This",
"is",
"a",
"parallel",
"worker",
"for",
"the",
"function",
"below",
"."
]
| python | valid | 19.657895 |
PMBio/limix-backup | limix/varDecomp/varianceDecomposition.py | https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/varDecomp/varianceDecomposition.py#L371-L383 | def _init_params_default(self):
"""
Internal method for default parameter initialization
"""
# if there are some nan -> mean impute
Yimp = self.Y.copy()
Inan = sp.isnan(Yimp)
Yimp[Inan] = Yimp[~Inan].mean()
if self.P==1: C = sp.array([[Yimp.var()]])
else: C = sp.cov(Yimp.T)
C /= float(self.n_randEffs)
for ti in range(self.n_randEffs):
self.getTraitCovarFun(ti).setCovariance(C) | [
"def",
"_init_params_default",
"(",
"self",
")",
":",
"# if there are some nan -> mean impute",
"Yimp",
"=",
"self",
".",
"Y",
".",
"copy",
"(",
")",
"Inan",
"=",
"sp",
".",
"isnan",
"(",
"Yimp",
")",
"Yimp",
"[",
"Inan",
"]",
"=",
"Yimp",
"[",
"~",
"Inan",
"]",
".",
"mean",
"(",
")",
"if",
"self",
".",
"P",
"==",
"1",
":",
"C",
"=",
"sp",
".",
"array",
"(",
"[",
"[",
"Yimp",
".",
"var",
"(",
")",
"]",
"]",
")",
"else",
":",
"C",
"=",
"sp",
".",
"cov",
"(",
"Yimp",
".",
"T",
")",
"C",
"/=",
"float",
"(",
"self",
".",
"n_randEffs",
")",
"for",
"ti",
"in",
"range",
"(",
"self",
".",
"n_randEffs",
")",
":",
"self",
".",
"getTraitCovarFun",
"(",
"ti",
")",
".",
"setCovariance",
"(",
"C",
")"
]
| Internal method for default parameter initialization | [
"Internal",
"method",
"for",
"default",
"parameter",
"initialization"
]
| python | train | 36.846154 |
santosjorge/cufflinks | cufflinks/tools.py | https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/tools.py#L1252-L1361 | def get_shape(kind='line',x=None,y=None,x0=None,y0=None,x1=None,y1=None,span=0,color='red',dash='solid',width=1,
fillcolor=None,fill=False,opacity=1,xref='x',yref='y'):
"""
Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc
"""
if x1 is None:
if x0 is None:
if x is None:
xref='paper'
x0=0
x1=1
else:
x0=x1=x
else:
x1=x0
else:
x
if y1 is None:
if y0 is None:
if y is None:
yref='paper'
y0=0
y1=1
else:
y0=y1=y
else:
y1=y0
shape = { 'x0':x0,
'y0':y0,
'x1':x1,
'y1':y1,
'line' : {
'color':normalize(color),
'width':width,
'dash':dash
},
'xref':xref,
'yref':yref
}
if kind=='line':
shape['type']='line'
elif kind=='circle':
shape['type']='circle'
elif kind=='rect':
shape['type']='rect'
else:
raise Exception("Invalid or unkown shape type : {0}".format(kind))
if (fill or fillcolor) and kind!='line':
fillcolor = color if not fillcolor else fillcolor
fillcolor=to_rgba(normalize(fillcolor),opacity)
shape['fillcolor']=fillcolor
return shape | [
"def",
"get_shape",
"(",
"kind",
"=",
"'line'",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"x0",
"=",
"None",
",",
"y0",
"=",
"None",
",",
"x1",
"=",
"None",
",",
"y1",
"=",
"None",
",",
"span",
"=",
"0",
",",
"color",
"=",
"'red'",
",",
"dash",
"=",
"'solid'",
",",
"width",
"=",
"1",
",",
"fillcolor",
"=",
"None",
",",
"fill",
"=",
"False",
",",
"opacity",
"=",
"1",
",",
"xref",
"=",
"'x'",
",",
"yref",
"=",
"'y'",
")",
":",
"if",
"x1",
"is",
"None",
":",
"if",
"x0",
"is",
"None",
":",
"if",
"x",
"is",
"None",
":",
"xref",
"=",
"'paper'",
"x0",
"=",
"0",
"x1",
"=",
"1",
"else",
":",
"x0",
"=",
"x1",
"=",
"x",
"else",
":",
"x1",
"=",
"x0",
"else",
":",
"x",
"if",
"y1",
"is",
"None",
":",
"if",
"y0",
"is",
"None",
":",
"if",
"y",
"is",
"None",
":",
"yref",
"=",
"'paper'",
"y0",
"=",
"0",
"y1",
"=",
"1",
"else",
":",
"y0",
"=",
"y1",
"=",
"y",
"else",
":",
"y1",
"=",
"y0",
"shape",
"=",
"{",
"'x0'",
":",
"x0",
",",
"'y0'",
":",
"y0",
",",
"'x1'",
":",
"x1",
",",
"'y1'",
":",
"y1",
",",
"'line'",
":",
"{",
"'color'",
":",
"normalize",
"(",
"color",
")",
",",
"'width'",
":",
"width",
",",
"'dash'",
":",
"dash",
"}",
",",
"'xref'",
":",
"xref",
",",
"'yref'",
":",
"yref",
"}",
"if",
"kind",
"==",
"'line'",
":",
"shape",
"[",
"'type'",
"]",
"=",
"'line'",
"elif",
"kind",
"==",
"'circle'",
":",
"shape",
"[",
"'type'",
"]",
"=",
"'circle'",
"elif",
"kind",
"==",
"'rect'",
":",
"shape",
"[",
"'type'",
"]",
"=",
"'rect'",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid or unkown shape type : {0}\"",
".",
"format",
"(",
"kind",
")",
")",
"if",
"(",
"fill",
"or",
"fillcolor",
")",
"and",
"kind",
"!=",
"'line'",
":",
"fillcolor",
"=",
"color",
"if",
"not",
"fillcolor",
"else",
"fillcolor",
"fillcolor",
"=",
"to_rgba",
"(",
"normalize",
"(",
"fillcolor",
")",
",",
"opacity",
")",
"shape",
"[",
"'fillcolor'",
"]",
"=",
"fillcolor",
"return",
"shape"
]
| Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc | [
"Returns",
"a",
"plotly",
"shape"
]
| python | train | 16.872727 |
saltstack/salt | salt/states/boto_iam.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_iam.py#L1668-L1713 | def saml_provider_absent(name, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2016.11.0
Ensure the SAML provider with the specified name is absent.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
provider = __salt__['boto_iam.list_saml_providers'](region=region,
key=key, keyid=keyid,
profile=profile)
if not provider:
ret['comment'] = 'SAML provider {0} is absent.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'SAML provider {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_saml_provider'](name, region=region,
key=key, keyid=keyid,
profile=profile)
if deleted is not False:
ret['comment'] = 'SAML provider {0} was deleted.'.format(name)
ret['changes']['old'] = name
return ret
ret['result'] = False
ret['comment'] = 'SAML provider {0} failed to be deleted.'.format(name)
return ret | [
"def",
"saml_provider_absent",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"provider",
"=",
"__salt__",
"[",
"'boto_iam.list_saml_providers'",
"]",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"provider",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'SAML provider {0} is absent.'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'SAML provider {0} is set to be removed.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"deleted",
"=",
"__salt__",
"[",
"'boto_iam.delete_saml_provider'",
"]",
"(",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"deleted",
"is",
"not",
"False",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'SAML provider {0} was deleted.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"name",
"return",
"ret",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'SAML provider {0} failed to be deleted.'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
]
| .. versionadded:: 2016.11.0
Ensure the SAML provider with the specified name is absent.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid. | [
"..",
"versionadded",
"::",
"2016",
".",
"11",
".",
"0"
]
| python | train | 35.695652 |
PGower/PyCanvas | pycanvas/apis/assignments.py | https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L637-L657 | def redirect_to_assignment_override_for_group(self, group_id, assignment_id):
"""
Redirect to the assignment override for a group.
Responds with a redirect to the override for the given group, if any
(404 otherwise).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - assignment_id
"""ID"""
path["assignment_id"] = assignment_id
self.logger.debug("GET /api/v1/groups/{group_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True) | [
"def",
"redirect_to_assignment_override_for_group",
"(",
"self",
",",
"group_id",
",",
"assignment_id",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - group_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"group_id\"",
"]",
"=",
"group_id",
"# REQUIRED - PATH - assignment_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"assignment_id\"",
"]",
"=",
"assignment_id",
"self",
".",
"logger",
".",
"debug",
"(",
"\"GET /api/v1/groups/{group_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"GET\"",
",",
"\"/api/v1/groups/{group_id}/assignments/{assignment_id}/override\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"no_data",
"=",
"True",
")"
]
| Redirect to the assignment override for a group.
Responds with a redirect to the override for the given group, if any
(404 otherwise). | [
"Redirect",
"to",
"the",
"assignment",
"override",
"for",
"a",
"group",
".",
"Responds",
"with",
"a",
"redirect",
"to",
"the",
"override",
"for",
"the",
"given",
"group",
"if",
"any",
"(",
"404",
"otherwise",
")",
"."
]
| python | train | 41.238095 |
DLR-RM/RAFCON | source/rafcon/gui/controllers/utils/tree_view_controller.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L497-L575 | def mouse_click(self, widget, event=None):
"""Implements shift- and control-key handling features for mouse button press events explicit
The method is implements a fully defined mouse pattern to use shift- and control-key for multi-selection in a
TreeView and a ListStore as model. It avoid problems caused by special renderer types like the text combo
renderer by stopping the callback handler to continue with notifications.
:param Gtk.Object widget: Object which is the source of the event
:param Gtk.Event event: Event generated by mouse click
:rtype: bool
"""
if event.type == Gdk.EventType.BUTTON_PRESS:
pthinfo = self.tree_view.get_path_at_pos(int(event.x), int(event.y))
if not bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) \
and not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and \
event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3:
if pthinfo is not None:
model, paths = self._tree_selection.get_selected_rows()
# print(paths)
if pthinfo[0] not in paths:
# self._logger.info("force single selection for right click")
self.tree_view.set_cursor(pthinfo[0])
self._last_path_selection = pthinfo[0]
else:
# self._logger.info("single- or multi-selection for right click")
pass
self.on_right_click_menu()
return True
if (bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) or \
bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK)) and \
event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3:
return True
if not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1:
if pthinfo is not None:
# self._logger.info("last select row {}".format(pthinfo[0]))
self._last_path_selection = pthinfo[0]
# else:
# self._logger.info("deselect rows")
# self.tree_selection.unselect_all()
if bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1:
# self._logger.info("SHIFT adjust selection range")
model, paths = self._tree_selection.get_selected_rows()
# print(model, paths, pthinfo[0])
if paths and pthinfo and pthinfo[0]:
if self._last_path_selection[0] <= pthinfo[0][0]:
new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]+1))
else:
new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]-1, -1))
# self._logger.info("range to select {0}, {1}".format(new_row_ids_selected, model))
self._tree_selection.unselect_all()
for path in new_row_ids_selected:
self._tree_selection.select_path(path)
return True
else:
# self._logger.info("nothing selected {}".format(model))
if pthinfo and pthinfo[0]:
self._last_path_selection = pthinfo[0]
if bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) and event.get_button()[1] == 1:
# self._logger.info("CONTROL adjust selection range")
model, paths = self._tree_selection.get_selected_rows()
# print(model, paths, pthinfo[0])
if paths and pthinfo and pthinfo[0]:
if pthinfo[0] in paths:
self._tree_selection.unselect_path(pthinfo[0])
else:
self._tree_selection.select_path(pthinfo[0])
return True
elif pthinfo and pthinfo[0]:
self._tree_selection.select_path(pthinfo[0])
return True
elif event.type == Gdk.EventType._2BUTTON_PRESS:
self._handle_double_click(event) | [
"def",
"mouse_click",
"(",
"self",
",",
"widget",
",",
"event",
"=",
"None",
")",
":",
"if",
"event",
".",
"type",
"==",
"Gdk",
".",
"EventType",
".",
"BUTTON_PRESS",
":",
"pthinfo",
"=",
"self",
".",
"tree_view",
".",
"get_path_at_pos",
"(",
"int",
"(",
"event",
".",
"x",
")",
",",
"int",
"(",
"event",
".",
"y",
")",
")",
"if",
"not",
"bool",
"(",
"event",
".",
"get_state",
"(",
")",
"&",
"Gdk",
".",
"ModifierType",
".",
"CONTROL_MASK",
")",
"and",
"not",
"bool",
"(",
"event",
".",
"get_state",
"(",
")",
"&",
"Gdk",
".",
"ModifierType",
".",
"SHIFT_MASK",
")",
"and",
"event",
".",
"type",
"==",
"Gdk",
".",
"EventType",
".",
"BUTTON_PRESS",
"and",
"event",
".",
"get_button",
"(",
")",
"[",
"1",
"]",
"==",
"3",
":",
"if",
"pthinfo",
"is",
"not",
"None",
":",
"model",
",",
"paths",
"=",
"self",
".",
"_tree_selection",
".",
"get_selected_rows",
"(",
")",
"# print(paths)",
"if",
"pthinfo",
"[",
"0",
"]",
"not",
"in",
"paths",
":",
"# self._logger.info(\"force single selection for right click\")",
"self",
".",
"tree_view",
".",
"set_cursor",
"(",
"pthinfo",
"[",
"0",
"]",
")",
"self",
".",
"_last_path_selection",
"=",
"pthinfo",
"[",
"0",
"]",
"else",
":",
"# self._logger.info(\"single- or multi-selection for right click\")",
"pass",
"self",
".",
"on_right_click_menu",
"(",
")",
"return",
"True",
"if",
"(",
"bool",
"(",
"event",
".",
"get_state",
"(",
")",
"&",
"Gdk",
".",
"ModifierType",
".",
"CONTROL_MASK",
")",
"or",
"bool",
"(",
"event",
".",
"get_state",
"(",
")",
"&",
"Gdk",
".",
"ModifierType",
".",
"SHIFT_MASK",
")",
")",
"and",
"event",
".",
"type",
"==",
"Gdk",
".",
"EventType",
".",
"BUTTON_PRESS",
"and",
"event",
".",
"get_button",
"(",
")",
"[",
"1",
"]",
"==",
"3",
":",
"return",
"True",
"if",
"not",
"bool",
"(",
"event",
".",
"get_state",
"(",
")",
"&",
"Gdk",
".",
"ModifierType",
".",
"SHIFT_MASK",
")",
"and",
"event",
".",
"get_button",
"(",
")",
"[",
"1",
"]",
"==",
"1",
":",
"if",
"pthinfo",
"is",
"not",
"None",
":",
"# self._logger.info(\"last select row {}\".format(pthinfo[0]))",
"self",
".",
"_last_path_selection",
"=",
"pthinfo",
"[",
"0",
"]",
"# else:",
"# self._logger.info(\"deselect rows\")",
"# self.tree_selection.unselect_all()",
"if",
"bool",
"(",
"event",
".",
"get_state",
"(",
")",
"&",
"Gdk",
".",
"ModifierType",
".",
"SHIFT_MASK",
")",
"and",
"event",
".",
"get_button",
"(",
")",
"[",
"1",
"]",
"==",
"1",
":",
"# self._logger.info(\"SHIFT adjust selection range\")",
"model",
",",
"paths",
"=",
"self",
".",
"_tree_selection",
".",
"get_selected_rows",
"(",
")",
"# print(model, paths, pthinfo[0])",
"if",
"paths",
"and",
"pthinfo",
"and",
"pthinfo",
"[",
"0",
"]",
":",
"if",
"self",
".",
"_last_path_selection",
"[",
"0",
"]",
"<=",
"pthinfo",
"[",
"0",
"]",
"[",
"0",
"]",
":",
"new_row_ids_selected",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"_last_path_selection",
"[",
"0",
"]",
",",
"pthinfo",
"[",
"0",
"]",
"[",
"0",
"]",
"+",
"1",
")",
")",
"else",
":",
"new_row_ids_selected",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"_last_path_selection",
"[",
"0",
"]",
",",
"pthinfo",
"[",
"0",
"]",
"[",
"0",
"]",
"-",
"1",
",",
"-",
"1",
")",
")",
"# self._logger.info(\"range to select {0}, {1}\".format(new_row_ids_selected, model))",
"self",
".",
"_tree_selection",
".",
"unselect_all",
"(",
")",
"for",
"path",
"in",
"new_row_ids_selected",
":",
"self",
".",
"_tree_selection",
".",
"select_path",
"(",
"path",
")",
"return",
"True",
"else",
":",
"# self._logger.info(\"nothing selected {}\".format(model))",
"if",
"pthinfo",
"and",
"pthinfo",
"[",
"0",
"]",
":",
"self",
".",
"_last_path_selection",
"=",
"pthinfo",
"[",
"0",
"]",
"if",
"bool",
"(",
"event",
".",
"get_state",
"(",
")",
"&",
"Gdk",
".",
"ModifierType",
".",
"CONTROL_MASK",
")",
"and",
"event",
".",
"get_button",
"(",
")",
"[",
"1",
"]",
"==",
"1",
":",
"# self._logger.info(\"CONTROL adjust selection range\")",
"model",
",",
"paths",
"=",
"self",
".",
"_tree_selection",
".",
"get_selected_rows",
"(",
")",
"# print(model, paths, pthinfo[0])",
"if",
"paths",
"and",
"pthinfo",
"and",
"pthinfo",
"[",
"0",
"]",
":",
"if",
"pthinfo",
"[",
"0",
"]",
"in",
"paths",
":",
"self",
".",
"_tree_selection",
".",
"unselect_path",
"(",
"pthinfo",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"_tree_selection",
".",
"select_path",
"(",
"pthinfo",
"[",
"0",
"]",
")",
"return",
"True",
"elif",
"pthinfo",
"and",
"pthinfo",
"[",
"0",
"]",
":",
"self",
".",
"_tree_selection",
".",
"select_path",
"(",
"pthinfo",
"[",
"0",
"]",
")",
"return",
"True",
"elif",
"event",
".",
"type",
"==",
"Gdk",
".",
"EventType",
".",
"_2BUTTON_PRESS",
":",
"self",
".",
"_handle_double_click",
"(",
"event",
")"
]
| Implements shift- and control-key handling features for mouse button press events explicit
The method is implements a fully defined mouse pattern to use shift- and control-key for multi-selection in a
TreeView and a ListStore as model. It avoid problems caused by special renderer types like the text combo
renderer by stopping the callback handler to continue with notifications.
:param Gtk.Object widget: Object which is the source of the event
:param Gtk.Event event: Event generated by mouse click
:rtype: bool | [
"Implements",
"shift",
"-",
"and",
"control",
"-",
"key",
"handling",
"features",
"for",
"mouse",
"button",
"press",
"events",
"explicit"
]
| python | train | 54.35443 |
googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/document.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/document.py#L171-L183 | def collection(self, collection_id):
"""Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection.
"""
child_path = self._path + (collection_id,)
return self._client.collection(*child_path) | [
"def",
"collection",
"(",
"self",
",",
"collection_id",
")",
":",
"child_path",
"=",
"self",
".",
"_path",
"+",
"(",
"collection_id",
",",
")",
"return",
"self",
".",
"_client",
".",
"collection",
"(",
"*",
"child_path",
")"
]
| Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection. | [
"Create",
"a",
"sub",
"-",
"collection",
"underneath",
"the",
"current",
"document",
"."
]
| python | train | 35.076923 |
arcticfoxnv/slackminion | slackminion/plugins/core/core.py | https://github.com/arcticfoxnv/slackminion/blob/62ea77aba5ac5ba582793e578a379a76f7d26cdb/slackminion/plugins/core/core.py#L75-L79 | def shutdown(self, msg, args):
"""Causes the bot to gracefully shutdown."""
self.log.info("Received shutdown from %s", msg.user.username)
self._bot.runnable = False
return "Shutting down..." | [
"def",
"shutdown",
"(",
"self",
",",
"msg",
",",
"args",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Received shutdown from %s\"",
",",
"msg",
".",
"user",
".",
"username",
")",
"self",
".",
"_bot",
".",
"runnable",
"=",
"False",
"return",
"\"Shutting down...\""
]
| Causes the bot to gracefully shutdown. | [
"Causes",
"the",
"bot",
"to",
"gracefully",
"shutdown",
"."
]
| python | valid | 43.6 |
mgedmin/findimports | findimports.py | https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L701-L705 | def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names)) | [
"def",
"printImportedNames",
"(",
"self",
")",
":",
"for",
"module",
"in",
"self",
".",
"listModules",
"(",
")",
":",
"print",
"(",
"\"%s:\"",
"%",
"module",
".",
"modname",
")",
"print",
"(",
"\" %s\"",
"%",
"\"\\n \"",
".",
"join",
"(",
"imp",
".",
"name",
"for",
"imp",
"in",
"module",
".",
"imported_names",
")",
")"
]
| Produce a report of imported names. | [
"Produce",
"a",
"report",
"of",
"imported",
"names",
"."
]
| python | train | 48.4 |
fudge-py/fudge | fudge/__init__.py | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1359-L1373 | def with_arg_count(self, count):
"""Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_arg_count = count
return self | [
"def",
"with_arg_count",
"(",
"self",
",",
"count",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"exp",
".",
"expected_arg_count",
"=",
"count",
"return",
"self"
]
| Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2 | [
"Set",
"the",
"last",
"call",
"to",
"expect",
"an",
"exact",
"argument",
"count",
"."
]
| python | train | 32.533333 |
pyviz/holoviews | holoviews/streams.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/streams.py#L128-L171 | def trigger(cls, streams):
"""
Given a list of streams, collect all the stream parameters into
a dictionary and pass it to the union set of subscribers.
Passing multiple streams at once to trigger can be useful when a
subscriber may be set multiple times across streams but only
needs to be called once.
"""
# Union of stream contents
items = [stream.contents.items() for stream in set(streams)]
union = [kv for kvs in items for kv in kvs]
klist = [k for k, _ in union]
key_clashes = set([k for k in klist if klist.count(k) > 1])
if key_clashes:
clashes = []
dicts = [dict(kvs) for kvs in items]
for clash in key_clashes:
values = set(d[clash] for d in dicts if clash in d)
if len(values) > 1:
clashes.append((clash, values))
if clashes:
msg = ', '.join(['%r has values %r' % (k, v) for k, v in clashes])
print('Parameter value clashes where %s' % msg)
# Group subscribers by precedence while keeping the ordering
# within each group
subscriber_precedence = defaultdict(list)
for stream in streams:
stream._on_trigger()
for precedence, subscriber in stream._subscribers:
subscriber_precedence[precedence].append(subscriber)
sorted_subscribers = sorted(subscriber_precedence.items(), key=lambda x: x[0])
subscribers = util.unique_iterator([s for _, subscribers in sorted_subscribers
for s in subscribers])
with triggering_streams(streams):
for subscriber in subscribers:
subscriber(**dict(union))
for stream in streams:
with util.disable_constant(stream):
if stream.transient:
stream.reset() | [
"def",
"trigger",
"(",
"cls",
",",
"streams",
")",
":",
"# Union of stream contents",
"items",
"=",
"[",
"stream",
".",
"contents",
".",
"items",
"(",
")",
"for",
"stream",
"in",
"set",
"(",
"streams",
")",
"]",
"union",
"=",
"[",
"kv",
"for",
"kvs",
"in",
"items",
"for",
"kv",
"in",
"kvs",
"]",
"klist",
"=",
"[",
"k",
"for",
"k",
",",
"_",
"in",
"union",
"]",
"key_clashes",
"=",
"set",
"(",
"[",
"k",
"for",
"k",
"in",
"klist",
"if",
"klist",
".",
"count",
"(",
"k",
")",
">",
"1",
"]",
")",
"if",
"key_clashes",
":",
"clashes",
"=",
"[",
"]",
"dicts",
"=",
"[",
"dict",
"(",
"kvs",
")",
"for",
"kvs",
"in",
"items",
"]",
"for",
"clash",
"in",
"key_clashes",
":",
"values",
"=",
"set",
"(",
"d",
"[",
"clash",
"]",
"for",
"d",
"in",
"dicts",
"if",
"clash",
"in",
"d",
")",
"if",
"len",
"(",
"values",
")",
">",
"1",
":",
"clashes",
".",
"append",
"(",
"(",
"clash",
",",
"values",
")",
")",
"if",
"clashes",
":",
"msg",
"=",
"', '",
".",
"join",
"(",
"[",
"'%r has values %r'",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"clashes",
"]",
")",
"print",
"(",
"'Parameter value clashes where %s'",
"%",
"msg",
")",
"# Group subscribers by precedence while keeping the ordering",
"# within each group",
"subscriber_precedence",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"stream",
"in",
"streams",
":",
"stream",
".",
"_on_trigger",
"(",
")",
"for",
"precedence",
",",
"subscriber",
"in",
"stream",
".",
"_subscribers",
":",
"subscriber_precedence",
"[",
"precedence",
"]",
".",
"append",
"(",
"subscriber",
")",
"sorted_subscribers",
"=",
"sorted",
"(",
"subscriber_precedence",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"subscribers",
"=",
"util",
".",
"unique_iterator",
"(",
"[",
"s",
"for",
"_",
",",
"subscribers",
"in",
"sorted_subscribers",
"for",
"s",
"in",
"subscribers",
"]",
")",
"with",
"triggering_streams",
"(",
"streams",
")",
":",
"for",
"subscriber",
"in",
"subscribers",
":",
"subscriber",
"(",
"*",
"*",
"dict",
"(",
"union",
")",
")",
"for",
"stream",
"in",
"streams",
":",
"with",
"util",
".",
"disable_constant",
"(",
"stream",
")",
":",
"if",
"stream",
".",
"transient",
":",
"stream",
".",
"reset",
"(",
")"
]
| Given a list of streams, collect all the stream parameters into
a dictionary and pass it to the union set of subscribers.
Passing multiple streams at once to trigger can be useful when a
subscriber may be set multiple times across streams but only
needs to be called once. | [
"Given",
"a",
"list",
"of",
"streams",
"collect",
"all",
"the",
"stream",
"parameters",
"into",
"a",
"dictionary",
"and",
"pass",
"it",
"to",
"the",
"union",
"set",
"of",
"subscribers",
"."
]
| python | train | 43.431818 |
EpistasisLab/scikit-mdr | mdr/utils/utils.py | https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L220-L243 | def mdr_conditional_entropy(X, Y, labels, base=2):
"""Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
"""
return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base) | [
"def",
"mdr_conditional_entropy",
"(",
"X",
",",
"Y",
",",
"labels",
",",
"base",
"=",
"2",
")",
":",
"return",
"conditional_entropy",
"(",
"_mdr_predict",
"(",
"X",
",",
"Y",
",",
"labels",
")",
",",
"labels",
",",
"base",
"=",
"base",
")"
]
| Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels) | [
"Calculates",
"the",
"MDR",
"conditional",
"entropy",
"H",
"(",
"XY|labels",
")",
"in",
"the",
"given",
"base"
]
| python | test | 41.583333 |
secdev/scapy | scapy/layers/inet6.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/inet6.py#L1081-L1168 | def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must
already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the
expected maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
if IPv6ExtHdrFragment not in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(raw(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") # noqa: E501
return []
s = raw(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = raw(IPv6(src="::1", dst="::1") / fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = pkt[IPv6ExtHdrFragment].nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart / fragHeader / fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0, 0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize // 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=remain)
res.append(tempo)
break
return res | [
"def",
"fragment6",
"(",
"pkt",
",",
"fragSize",
")",
":",
"pkt",
"=",
"pkt",
".",
"copy",
"(",
")",
"if",
"IPv6ExtHdrFragment",
"not",
"in",
"pkt",
":",
"# TODO : automatically add a fragment before upper Layer",
"# at the moment, we do nothing and return initial packet",
"# as single element of a list",
"return",
"[",
"pkt",
"]",
"# If the payload is bigger than 65535, a Jumbo payload must be used, as",
"# an IPv6 packet can't be bigger than 65535 bytes.",
"if",
"len",
"(",
"raw",
"(",
"pkt",
"[",
"IPv6ExtHdrFragment",
"]",
")",
")",
">",
"65535",
":",
"warning",
"(",
"\"An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.\"",
")",
"# noqa: E501",
"return",
"[",
"]",
"s",
"=",
"raw",
"(",
"pkt",
")",
"# for instantiation to get upper layer checksum right",
"if",
"len",
"(",
"s",
")",
"<=",
"fragSize",
":",
"return",
"[",
"pkt",
"]",
"# Fragmentable part : fake IPv6 for Fragmentable part length computation",
"fragPart",
"=",
"pkt",
"[",
"IPv6ExtHdrFragment",
"]",
".",
"payload",
"tmp",
"=",
"raw",
"(",
"IPv6",
"(",
"src",
"=",
"\"::1\"",
",",
"dst",
"=",
"\"::1\"",
")",
"/",
"fragPart",
")",
"fragPartLen",
"=",
"len",
"(",
"tmp",
")",
"-",
"40",
"# basic IPv6 header length",
"fragPartStr",
"=",
"s",
"[",
"-",
"fragPartLen",
":",
"]",
"# Grab Next Header for use in Fragment Header",
"nh",
"=",
"pkt",
"[",
"IPv6ExtHdrFragment",
"]",
".",
"nh",
"# Keep fragment header",
"fragHeader",
"=",
"pkt",
"[",
"IPv6ExtHdrFragment",
"]",
"del",
"fragHeader",
".",
"payload",
"# detach payload",
"# Unfragmentable Part",
"unfragPartLen",
"=",
"len",
"(",
"s",
")",
"-",
"fragPartLen",
"-",
"8",
"unfragPart",
"=",
"pkt",
"del",
"pkt",
"[",
"IPv6ExtHdrFragment",
"]",
".",
"underlayer",
".",
"payload",
"# detach payload",
"# Cut the fragmentable part to fit fragSize. Inner fragments have",
"# a length that is an integer multiple of 8 octets. last Frag MTU",
"# can be anything below MTU",
"lastFragSize",
"=",
"fragSize",
"-",
"unfragPartLen",
"-",
"8",
"innerFragSize",
"=",
"lastFragSize",
"-",
"(",
"lastFragSize",
"%",
"8",
")",
"if",
"lastFragSize",
"<=",
"0",
"or",
"innerFragSize",
"==",
"0",
":",
"warning",
"(",
"\"Provided fragment size value is too low. \"",
"+",
"\"Should be more than %d\"",
"%",
"(",
"unfragPartLen",
"+",
"8",
")",
")",
"return",
"[",
"unfragPart",
"/",
"fragHeader",
"/",
"fragPart",
"]",
"remain",
"=",
"fragPartStr",
"res",
"=",
"[",
"]",
"fragOffset",
"=",
"0",
"# offset, incremeted during creation",
"fragId",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"0xffffffff",
")",
"# random id ...",
"if",
"fragHeader",
".",
"id",
"is",
"not",
"None",
":",
"# ... except id provided by user",
"fragId",
"=",
"fragHeader",
".",
"id",
"fragHeader",
".",
"m",
"=",
"1",
"fragHeader",
".",
"id",
"=",
"fragId",
"fragHeader",
".",
"nh",
"=",
"nh",
"# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...",
"while",
"True",
":",
"if",
"(",
"len",
"(",
"remain",
")",
">",
"lastFragSize",
")",
":",
"tmp",
"=",
"remain",
"[",
":",
"innerFragSize",
"]",
"remain",
"=",
"remain",
"[",
"innerFragSize",
":",
"]",
"fragHeader",
".",
"offset",
"=",
"fragOffset",
"# update offset",
"fragOffset",
"+=",
"(",
"innerFragSize",
"//",
"8",
")",
"# compute new one",
"if",
"IPv6",
"in",
"unfragPart",
":",
"unfragPart",
"[",
"IPv6",
"]",
".",
"plen",
"=",
"None",
"tempo",
"=",
"unfragPart",
"/",
"fragHeader",
"/",
"conf",
".",
"raw_layer",
"(",
"load",
"=",
"tmp",
")",
"res",
".",
"append",
"(",
"tempo",
")",
"else",
":",
"fragHeader",
".",
"offset",
"=",
"fragOffset",
"# update offSet",
"fragHeader",
".",
"m",
"=",
"0",
"if",
"IPv6",
"in",
"unfragPart",
":",
"unfragPart",
"[",
"IPv6",
"]",
".",
"plen",
"=",
"None",
"tempo",
"=",
"unfragPart",
"/",
"fragHeader",
"/",
"conf",
".",
"raw_layer",
"(",
"load",
"=",
"remain",
")",
"res",
".",
"append",
"(",
"tempo",
")",
"break",
"return",
"res"
]
| Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must
already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the
expected maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list. | [
"Performs",
"fragmentation",
"of",
"an",
"IPv6",
"packet",
".",
"Provided",
"packet",
"(",
"pkt",
")",
"must",
"already",
"contain",
"an",
"IPv6ExtHdrFragment",
"()",
"class",
".",
"fragSize",
"argument",
"is",
"the",
"expected",
"maximum",
"size",
"of",
"fragments",
"(",
"MTU",
")",
".",
"The",
"list",
"of",
"packets",
"is",
"returned",
"."
]
| python | train | 37.215909 |
MattBroach/DjangoRestMultipleModels | drf_multiple_model/mixins.py | https://github.com/MattBroach/DjangoRestMultipleModels/blob/893969ed38d614a5e2f060e560824fa7c5c49cfd/drf_multiple_model/mixins.py#L81-L91 | def add_to_results(self, data, label, results):
"""
responsible for updating the running `results` variable with the
data from this queryset/serializer combo
"""
raise NotImplementedError(
'{} must specify how to add data to the running results tally '
'by overriding the `add_to_results` method.'.format(
self.__class__.__name__
)
) | [
"def",
"add_to_results",
"(",
"self",
",",
"data",
",",
"label",
",",
"results",
")",
":",
"raise",
"NotImplementedError",
"(",
"'{} must specify how to add data to the running results tally '",
"'by overriding the `add_to_results` method.'",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")"
]
| responsible for updating the running `results` variable with the
data from this queryset/serializer combo | [
"responsible",
"for",
"updating",
"the",
"running",
"results",
"variable",
"with",
"the",
"data",
"from",
"this",
"queryset",
"/",
"serializer",
"combo"
]
| python | train | 38.454545 |
google/grr | grr/server/grr_response_server/flows/general/collectors.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/collectors.py#L987-L997 | def MeetsConditions(knowledge_base, source):
"""Check conditions on the source."""
source_conditions_met = True
os_conditions = ConvertSupportedOSToConditions(source)
if os_conditions:
source.conditions.append(os_conditions)
for condition in source.conditions:
source_conditions_met &= artifact_utils.CheckCondition(
condition, knowledge_base)
return source_conditions_met | [
"def",
"MeetsConditions",
"(",
"knowledge_base",
",",
"source",
")",
":",
"source_conditions_met",
"=",
"True",
"os_conditions",
"=",
"ConvertSupportedOSToConditions",
"(",
"source",
")",
"if",
"os_conditions",
":",
"source",
".",
"conditions",
".",
"append",
"(",
"os_conditions",
")",
"for",
"condition",
"in",
"source",
".",
"conditions",
":",
"source_conditions_met",
"&=",
"artifact_utils",
".",
"CheckCondition",
"(",
"condition",
",",
"knowledge_base",
")",
"return",
"source_conditions_met"
]
| Check conditions on the source. | [
"Check",
"conditions",
"on",
"the",
"source",
"."
]
| python | train | 35.545455 |
azraq27/neural | neural/stats.py | https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/stats.py#L13-L60 | def voxel_count(dset,p=None,positive_only=False,mask=None,ROI=None):
''' returns the number of non-zero voxels
:p: threshold the dataset at the given *p*-value, then count
:positive_only: only count positive values
:mask: count within the given mask
:ROI: only use the ROI with the given value (or list of values) within the mask
if ROI is 'all' then return the voxel count of each ROI
as a dictionary
'''
if p:
dset = nl.thresh(dset,p,positive_only)
else:
if positive_only:
dset = nl.calc(dset,'step(a)')
count = 0
devnull = open(os.devnull,"w")
if mask:
cmd = ['3dROIstats','-1Dformat','-nomeanout','-nobriklab', '-nzvoxels']
cmd += ['-mask',str(mask),str(dset)]
out = subprocess.check_output(cmd,stderr=devnull).split('\n')
if len(out)<4:
return 0
rois = [int(x.replace('NZcount_','')) for x in out[1].strip()[1:].split()]
counts = [int(x.replace('NZcount_','')) for x in out[3].strip().split()]
count_dict = None
if ROI==None:
ROI = rois
if ROI=='all':
count_dict = {}
ROI = rois
else:
if not isinstance(ROI,list):
ROI = [ROI]
for r in ROI:
if r in rois:
roi_count = counts[rois.index(r)]
if count_dict!=None:
count_dict[r] = roi_count
else:
count += roi_count
else:
cmd = ['3dBrickStat', '-slow', '-count', '-non-zero', str(dset)]
count = int(subprocess.check_output(cmd,stderr=devnull).strip())
if count_dict:
return count_dict
return count | [
"def",
"voxel_count",
"(",
"dset",
",",
"p",
"=",
"None",
",",
"positive_only",
"=",
"False",
",",
"mask",
"=",
"None",
",",
"ROI",
"=",
"None",
")",
":",
"if",
"p",
":",
"dset",
"=",
"nl",
".",
"thresh",
"(",
"dset",
",",
"p",
",",
"positive_only",
")",
"else",
":",
"if",
"positive_only",
":",
"dset",
"=",
"nl",
".",
"calc",
"(",
"dset",
",",
"'step(a)'",
")",
"count",
"=",
"0",
"devnull",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"\"w\"",
")",
"if",
"mask",
":",
"cmd",
"=",
"[",
"'3dROIstats'",
",",
"'-1Dformat'",
",",
"'-nomeanout'",
",",
"'-nobriklab'",
",",
"'-nzvoxels'",
"]",
"cmd",
"+=",
"[",
"'-mask'",
",",
"str",
"(",
"mask",
")",
",",
"str",
"(",
"dset",
")",
"]",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"stderr",
"=",
"devnull",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"len",
"(",
"out",
")",
"<",
"4",
":",
"return",
"0",
"rois",
"=",
"[",
"int",
"(",
"x",
".",
"replace",
"(",
"'NZcount_'",
",",
"''",
")",
")",
"for",
"x",
"in",
"out",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"[",
"1",
":",
"]",
".",
"split",
"(",
")",
"]",
"counts",
"=",
"[",
"int",
"(",
"x",
".",
"replace",
"(",
"'NZcount_'",
",",
"''",
")",
")",
"for",
"x",
"in",
"out",
"[",
"3",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"]",
"count_dict",
"=",
"None",
"if",
"ROI",
"==",
"None",
":",
"ROI",
"=",
"rois",
"if",
"ROI",
"==",
"'all'",
":",
"count_dict",
"=",
"{",
"}",
"ROI",
"=",
"rois",
"else",
":",
"if",
"not",
"isinstance",
"(",
"ROI",
",",
"list",
")",
":",
"ROI",
"=",
"[",
"ROI",
"]",
"for",
"r",
"in",
"ROI",
":",
"if",
"r",
"in",
"rois",
":",
"roi_count",
"=",
"counts",
"[",
"rois",
".",
"index",
"(",
"r",
")",
"]",
"if",
"count_dict",
"!=",
"None",
":",
"count_dict",
"[",
"r",
"]",
"=",
"roi_count",
"else",
":",
"count",
"+=",
"roi_count",
"else",
":",
"cmd",
"=",
"[",
"'3dBrickStat'",
",",
"'-slow'",
",",
"'-count'",
",",
"'-non-zero'",
",",
"str",
"(",
"dset",
")",
"]",
"count",
"=",
"int",
"(",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"stderr",
"=",
"devnull",
")",
".",
"strip",
"(",
")",
")",
"if",
"count_dict",
":",
"return",
"count_dict",
"return",
"count"
]
| returns the number of non-zero voxels
:p: threshold the dataset at the given *p*-value, then count
:positive_only: only count positive values
:mask: count within the given mask
:ROI: only use the ROI with the given value (or list of values) within the mask
if ROI is 'all' then return the voxel count of each ROI
as a dictionary | [
"returns",
"the",
"number",
"of",
"non",
"-",
"zero",
"voxels"
]
| python | train | 36.333333 |
Cymmetria/honeycomb | honeycomb/integrationmanager/tasks.py | https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/integrationmanager/tasks.py#L170-L188 | def poll_integration_information_for_waiting_integration_alerts():
"""poll_integration_information_for_waiting_integration_alerts."""
if not polling_integration_alerts:
return
logger.debug("Polling information for waiting integration alerts")
for integration_alert in polling_integration_alerts:
configured_integration = integration_alert.configured_integration
integration = configured_integration.integration
polling_duration = integration.polling_duration
if get_current_datetime_utc() - integration_alert.send_time > polling_duration:
logger.debug("Polling duration expired for integration alert %s", integration_alert)
integration_alert.status = IntegrationAlertStatuses.ERROR_POLLING.name
else:
integration_alert.status = IntegrationAlertStatuses.IN_POLLING.name
poll_integration_alert_data(integration_alert) | [
"def",
"poll_integration_information_for_waiting_integration_alerts",
"(",
")",
":",
"if",
"not",
"polling_integration_alerts",
":",
"return",
"logger",
".",
"debug",
"(",
"\"Polling information for waiting integration alerts\"",
")",
"for",
"integration_alert",
"in",
"polling_integration_alerts",
":",
"configured_integration",
"=",
"integration_alert",
".",
"configured_integration",
"integration",
"=",
"configured_integration",
".",
"integration",
"polling_duration",
"=",
"integration",
".",
"polling_duration",
"if",
"get_current_datetime_utc",
"(",
")",
"-",
"integration_alert",
".",
"send_time",
">",
"polling_duration",
":",
"logger",
".",
"debug",
"(",
"\"Polling duration expired for integration alert %s\"",
",",
"integration_alert",
")",
"integration_alert",
".",
"status",
"=",
"IntegrationAlertStatuses",
".",
"ERROR_POLLING",
".",
"name",
"else",
":",
"integration_alert",
".",
"status",
"=",
"IntegrationAlertStatuses",
".",
"IN_POLLING",
".",
"name",
"poll_integration_alert_data",
"(",
"integration_alert",
")"
]
| poll_integration_information_for_waiting_integration_alerts. | [
"poll_integration_information_for_waiting_integration_alerts",
"."
]
| python | train | 48.052632 |
minio/minio-py | minio/api.py | https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L1768-L1789 | def _get_bucket_region(self, bucket_name):
"""
Get region based on the bucket name.
:param bucket_name: Bucket name for which region will be fetched.
:return: Region of bucket name.
"""
# Region set in constructor, return right here.
if self._region:
return self._region
# get bucket location for Amazon S3.
region = 'us-east-1' # default to US standard.
if bucket_name in self._region_map:
region = self._region_map[bucket_name]
else:
region = self._get_bucket_location(bucket_name)
self._region_map[bucket_name] = region
# Success.
return region | [
"def",
"_get_bucket_region",
"(",
"self",
",",
"bucket_name",
")",
":",
"# Region set in constructor, return right here.",
"if",
"self",
".",
"_region",
":",
"return",
"self",
".",
"_region",
"# get bucket location for Amazon S3.",
"region",
"=",
"'us-east-1'",
"# default to US standard.",
"if",
"bucket_name",
"in",
"self",
".",
"_region_map",
":",
"region",
"=",
"self",
".",
"_region_map",
"[",
"bucket_name",
"]",
"else",
":",
"region",
"=",
"self",
".",
"_get_bucket_location",
"(",
"bucket_name",
")",
"self",
".",
"_region_map",
"[",
"bucket_name",
"]",
"=",
"region",
"# Success.",
"return",
"region"
]
| Get region based on the bucket name.
:param bucket_name: Bucket name for which region will be fetched.
:return: Region of bucket name. | [
"Get",
"region",
"based",
"on",
"the",
"bucket",
"name",
"."
]
| python | train | 31.045455 |
saltstack/salt | salt/modules/inspector.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L230-L247 | def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err) | [
"def",
"snapshots",
"(",
")",
":",
"try",
":",
"return",
"_",
"(",
"\"collector\"",
")",
".",
"Inspector",
"(",
"cachedir",
"=",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"piddir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__opts__",
"[",
"'pidfile'",
"]",
")",
")",
".",
"db",
".",
"list",
"(",
")",
"except",
"InspectorSnapshotException",
"as",
"err",
":",
"raise",
"CommandExecutionError",
"(",
"err",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"error",
"(",
"_get_error_message",
"(",
"err",
")",
")",
"raise",
"Exception",
"(",
"err",
")"
]
| List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots | [
"List",
"current",
"description",
"snapshots",
"."
]
| python | train | 28.055556 |
Cue/scales | src/greplin/scales/formats.py | https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/formats.py#L91-L96 | def htmlFormat(output, pathParts = (), statDict = None, query = None):
"""Formats as HTML, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
_htmlRenderDict(pathParts, statDict, output) | [
"def",
"htmlFormat",
"(",
"output",
",",
"pathParts",
"=",
"(",
")",
",",
"statDict",
"=",
"None",
",",
"query",
"=",
"None",
")",
":",
"statDict",
"=",
"statDict",
"or",
"scales",
".",
"getStats",
"(",
")",
"if",
"query",
":",
"statDict",
"=",
"runQuery",
"(",
"statDict",
",",
"query",
")",
"_htmlRenderDict",
"(",
"pathParts",
",",
"statDict",
",",
"output",
")"
]
| Formats as HTML, writing to the given object. | [
"Formats",
"as",
"HTML",
"writing",
"to",
"the",
"given",
"object",
"."
]
| python | train | 43.666667 |
HumanCellAtlas/dcp-cli | hca/upload/upload_config.py | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/upload_config.py#L85-L95 | def area_uri(self, area_uuid):
"""
Return the URI for an Upload Area
:param area_uuid: UUID of area for which we want URI
:return: Upload Area URI object
:rtype: UploadAreaURI
:raises UploadException: if area does not exist
"""
if area_uuid not in self.areas:
raise UploadException("I don't know about area {uuid}".format(uuid=area_uuid))
return UploadAreaURI(self._config.upload.areas[area_uuid]['uri']) | [
"def",
"area_uri",
"(",
"self",
",",
"area_uuid",
")",
":",
"if",
"area_uuid",
"not",
"in",
"self",
".",
"areas",
":",
"raise",
"UploadException",
"(",
"\"I don't know about area {uuid}\"",
".",
"format",
"(",
"uuid",
"=",
"area_uuid",
")",
")",
"return",
"UploadAreaURI",
"(",
"self",
".",
"_config",
".",
"upload",
".",
"areas",
"[",
"area_uuid",
"]",
"[",
"'uri'",
"]",
")"
]
| Return the URI for an Upload Area
:param area_uuid: UUID of area for which we want URI
:return: Upload Area URI object
:rtype: UploadAreaURI
:raises UploadException: if area does not exist | [
"Return",
"the",
"URI",
"for",
"an",
"Upload",
"Area",
":",
"param",
"area_uuid",
":",
"UUID",
"of",
"area",
"for",
"which",
"we",
"want",
"URI",
":",
"return",
":",
"Upload",
"Area",
"URI",
"object",
":",
"rtype",
":",
"UploadAreaURI",
":",
"raises",
"UploadException",
":",
"if",
"area",
"does",
"not",
"exist"
]
| python | train | 43.454545 |
ray-project/ray | python/ray/node.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/node.py#L611-L619 | def kill_log_monitor(self, check_alive=True):
"""Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive) | [
"def",
"kill_log_monitor",
"(",
"self",
",",
"check_alive",
"=",
"True",
")",
":",
"self",
".",
"_kill_process_type",
"(",
"ray_constants",
".",
"PROCESS_TYPE_LOG_MONITOR",
",",
"check_alive",
"=",
"check_alive",
")"
]
| Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead. | [
"Kill",
"the",
"log",
"monitor",
"."
]
| python | train | 34.111111 |
scheibler/khard | khard/khard.py | https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/khard.py#L26-L38 | def write_temp_file(text=""):
"""Create a new temporary file and write some initial text to it.
:param text: the text to write to the temp file
:type text: str
:returns: the file name of the newly created temp file
:rtype: str
"""
with NamedTemporaryFile(mode='w+t', suffix='.yml', delete=False) \
as tempfile:
tempfile.write(text)
return tempfile.name | [
"def",
"write_temp_file",
"(",
"text",
"=",
"\"\"",
")",
":",
"with",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+t'",
",",
"suffix",
"=",
"'.yml'",
",",
"delete",
"=",
"False",
")",
"as",
"tempfile",
":",
"tempfile",
".",
"write",
"(",
"text",
")",
"return",
"tempfile",
".",
"name"
]
| Create a new temporary file and write some initial text to it.
:param text: the text to write to the temp file
:type text: str
:returns: the file name of the newly created temp file
:rtype: str | [
"Create",
"a",
"new",
"temporary",
"file",
"and",
"write",
"some",
"initial",
"text",
"to",
"it",
"."
]
| python | test | 30.384615 |
gtaylor/python-route53 | route53/hosted_zone.py | https://github.com/gtaylor/python-route53/blob/b9fc7e258a79551c9ed61e4a71668b7f06f9e774/route53/hosted_zone.py#L220-L250 | def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(AAAAResourceRecordSet, **values) | [
"def",
"create_aaaa_record",
"(",
"self",
",",
"name",
",",
"values",
",",
"ttl",
"=",
"60",
",",
"weight",
"=",
"None",
",",
"region",
"=",
"None",
",",
"set_identifier",
"=",
"None",
")",
":",
"self",
".",
"_halt_if_already_deleted",
"(",
")",
"# Grab the params/kwargs here for brevity's sake.",
"values",
"=",
"locals",
"(",
")",
"del",
"values",
"[",
"'self'",
"]",
"return",
"self",
".",
"_add_record",
"(",
"AAAAResourceRecordSet",
",",
"*",
"*",
"values",
")"
]
| Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance. | [
"Creates",
"an",
"AAAA",
"record",
"attached",
"to",
"this",
"hosted",
"zone",
"."
]
| python | test | 49.741935 |
pybel/pybel | src/pybel/manager/cache_manager.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L528-L539 | def get_graph_by_ids(self, network_ids: List[int]) -> BELGraph:
"""Get a combine BEL Graph from a list of network identifiers."""
if len(network_ids) == 1:
return self.get_graph_by_id(network_ids[0])
log.debug('getting graph by identifiers: %s', network_ids)
graphs = self.get_graphs_by_ids(network_ids)
log.debug('getting union of graphs: %s', network_ids)
rv = union(graphs)
return rv | [
"def",
"get_graph_by_ids",
"(",
"self",
",",
"network_ids",
":",
"List",
"[",
"int",
"]",
")",
"->",
"BELGraph",
":",
"if",
"len",
"(",
"network_ids",
")",
"==",
"1",
":",
"return",
"self",
".",
"get_graph_by_id",
"(",
"network_ids",
"[",
"0",
"]",
")",
"log",
".",
"debug",
"(",
"'getting graph by identifiers: %s'",
",",
"network_ids",
")",
"graphs",
"=",
"self",
".",
"get_graphs_by_ids",
"(",
"network_ids",
")",
"log",
".",
"debug",
"(",
"'getting union of graphs: %s'",
",",
"network_ids",
")",
"rv",
"=",
"union",
"(",
"graphs",
")",
"return",
"rv"
]
| Get a combine BEL Graph from a list of network identifiers. | [
"Get",
"a",
"combine",
"BEL",
"Graph",
"from",
"a",
"list",
"of",
"network",
"identifiers",
"."
]
| python | train | 37.166667 |
lgiordani/dictregister | dictregister/dictregister.py | https://github.com/lgiordani/dictregister/blob/da3d8110d238c7b518811cb7bce65fad6f5cfc19/dictregister/dictregister.py#L50-L78 | def kremove(self, key, value=None):
"""Removes the given key/value from all elements.
If value is not specified, the whole key is removed.
If value is not None and the key is present but with a
different value, or if the key is not present, silently passes.
"""
for item in self:
if value is None:
# Just pop the key if present,
# otherwise return None
# (shortcut to ignore the exception)
item.pop(key, None)
else:
try:
# Use the key as a set
item[key].remove(value)
# If the set contains a single element
# just store the latter
if len(item[key]) == 1:
item[key] = item[key].pop()
except KeyError:
# This happens when the item
# does not contain the key
pass
except AttributeError:
# This happens when the key is not a set
# and shall be removed only if values match
if item[key] == value:
item.pop(key) | [
"def",
"kremove",
"(",
"self",
",",
"key",
",",
"value",
"=",
"None",
")",
":",
"for",
"item",
"in",
"self",
":",
"if",
"value",
"is",
"None",
":",
"# Just pop the key if present,",
"# otherwise return None",
"# (shortcut to ignore the exception)",
"item",
".",
"pop",
"(",
"key",
",",
"None",
")",
"else",
":",
"try",
":",
"# Use the key as a set",
"item",
"[",
"key",
"]",
".",
"remove",
"(",
"value",
")",
"# If the set contains a single element",
"# just store the latter",
"if",
"len",
"(",
"item",
"[",
"key",
"]",
")",
"==",
"1",
":",
"item",
"[",
"key",
"]",
"=",
"item",
"[",
"key",
"]",
".",
"pop",
"(",
")",
"except",
"KeyError",
":",
"# This happens when the item",
"# does not contain the key",
"pass",
"except",
"AttributeError",
":",
"# This happens when the key is not a set",
"# and shall be removed only if values match",
"if",
"item",
"[",
"key",
"]",
"==",
"value",
":",
"item",
".",
"pop",
"(",
"key",
")"
]
| Removes the given key/value from all elements.
If value is not specified, the whole key is removed.
If value is not None and the key is present but with a
different value, or if the key is not present, silently passes. | [
"Removes",
"the",
"given",
"key",
"/",
"value",
"from",
"all",
"elements",
".",
"If",
"value",
"is",
"not",
"specified",
"the",
"whole",
"key",
"is",
"removed",
".",
"If",
"value",
"is",
"not",
"None",
"and",
"the",
"key",
"is",
"present",
"but",
"with",
"a",
"different",
"value",
"or",
"if",
"the",
"key",
"is",
"not",
"present",
"silently",
"passes",
"."
]
| python | train | 42.37931 |
Jajcus/pyxmpp2 | pyxmpp2/sasl/core.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/sasl/core.py#L466-L494 | def sasl_mechanism(name, secure, preference = 50):
"""Class decorator generator for `ClientAuthenticator` or
`ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl
mechanism registry.
:Parameters:
- `name`: SASL mechanism name
- `secure`: if the mechanims can be considered secure - `True`
if it can be used over plain-text channel
- `preference`: mechanism preference level (the higher the better)
:Types:
- `name`: `unicode`
- `secure`: `bool`
- `preference`: `int`
"""
# pylint: disable-msg=W0212
def decorator(klass):
"""The decorator."""
klass._pyxmpp_sasl_secure = secure
klass._pyxmpp_sasl_preference = preference
if issubclass(klass, ClientAuthenticator):
_register_client_authenticator(klass, name)
elif issubclass(klass, ServerAuthenticator):
_register_server_authenticator(klass, name)
else:
raise TypeError("Not a ClientAuthenticator"
" or ServerAuthenticator class")
return klass
return decorator | [
"def",
"sasl_mechanism",
"(",
"name",
",",
"secure",
",",
"preference",
"=",
"50",
")",
":",
"# pylint: disable-msg=W0212",
"def",
"decorator",
"(",
"klass",
")",
":",
"\"\"\"The decorator.\"\"\"",
"klass",
".",
"_pyxmpp_sasl_secure",
"=",
"secure",
"klass",
".",
"_pyxmpp_sasl_preference",
"=",
"preference",
"if",
"issubclass",
"(",
"klass",
",",
"ClientAuthenticator",
")",
":",
"_register_client_authenticator",
"(",
"klass",
",",
"name",
")",
"elif",
"issubclass",
"(",
"klass",
",",
"ServerAuthenticator",
")",
":",
"_register_server_authenticator",
"(",
"klass",
",",
"name",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Not a ClientAuthenticator\"",
"\" or ServerAuthenticator class\"",
")",
"return",
"klass",
"return",
"decorator"
]
| Class decorator generator for `ClientAuthenticator` or
`ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl
mechanism registry.
:Parameters:
- `name`: SASL mechanism name
- `secure`: if the mechanims can be considered secure - `True`
if it can be used over plain-text channel
- `preference`: mechanism preference level (the higher the better)
:Types:
- `name`: `unicode`
- `secure`: `bool`
- `preference`: `int` | [
"Class",
"decorator",
"generator",
"for",
"ClientAuthenticator",
"or",
"ServerAuthenticator",
"subclasses",
".",
"Adds",
"the",
"class",
"to",
"the",
"pyxmpp",
".",
"sasl",
"mechanism",
"registry",
"."
]
| python | valid | 38.793103 |
avinassh/haxor | hackernews/__init__.py | https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L327-L342 | def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories | [
"def",
"show_stories",
"(",
"self",
",",
"raw",
"=",
"False",
",",
"limit",
"=",
"None",
")",
":",
"show_stories",
"=",
"self",
".",
"_get_stories",
"(",
"'showstories'",
",",
"limit",
")",
"if",
"raw",
":",
"show_stories",
"=",
"[",
"story",
".",
"raw",
"for",
"story",
"in",
"show_stories",
"]",
"return",
"show_stories"
]
| Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories. | [
"Returns",
"list",
"of",
"item",
"ids",
"of",
"latest",
"Show",
"HN",
"stories"
]
| python | train | 34.375 |
numenta/htmresearch | htmresearch/algorithms/faulty_temporal_memory.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/faulty_temporal_memory.py#L81-L122 | def burstColumn(self, column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
"""
Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
"""
start = self.cellsPerColumn * column
# Strip out destroyed cells before passing along to base _burstColumn()
cellsForColumn = [cellIdx
for cellIdx
in xrange(start, start + self.cellsPerColumn)
if cellIdx not in self.deadCells]
return self._burstColumn(
self.connections, self._random, self.lastUsedIterationForSegment, column,
columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,
self.numActivePotentialSynapsesForSegment, self.iteration,
self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,
self.permanenceDecrement, self.maxSegmentsPerCell,
self.maxSynapsesPerSegment, learn) | [
"def",
"burstColumn",
"(",
"self",
",",
"column",
",",
"columnMatchingSegments",
",",
"prevActiveCells",
",",
"prevWinnerCells",
",",
"learn",
")",
":",
"start",
"=",
"self",
".",
"cellsPerColumn",
"*",
"column",
"# Strip out destroyed cells before passing along to base _burstColumn()",
"cellsForColumn",
"=",
"[",
"cellIdx",
"for",
"cellIdx",
"in",
"xrange",
"(",
"start",
",",
"start",
"+",
"self",
".",
"cellsPerColumn",
")",
"if",
"cellIdx",
"not",
"in",
"self",
".",
"deadCells",
"]",
"return",
"self",
".",
"_burstColumn",
"(",
"self",
".",
"connections",
",",
"self",
".",
"_random",
",",
"self",
".",
"lastUsedIterationForSegment",
",",
"column",
",",
"columnMatchingSegments",
",",
"prevActiveCells",
",",
"prevWinnerCells",
",",
"cellsForColumn",
",",
"self",
".",
"numActivePotentialSynapsesForSegment",
",",
"self",
".",
"iteration",
",",
"self",
".",
"maxNewSynapseCount",
",",
"self",
".",
"initialPermanence",
",",
"self",
".",
"permanenceIncrement",
",",
"self",
".",
"permanenceDecrement",
",",
"self",
".",
"maxSegmentsPerCell",
",",
"self",
".",
"maxSynapsesPerSegment",
",",
"learn",
")"
]
| Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int), | [
"Activates",
"all",
"of",
"the",
"cells",
"in",
"an",
"unpredicted",
"active",
"column",
"chooses",
"a",
"winner",
"cell",
"and",
"if",
"learning",
"is",
"turned",
"on",
"learns",
"on",
"one",
"segment",
"growing",
"a",
"new",
"segment",
"if",
"necessary",
"."
]
| python | train | 35.47619 |
saltstack/salt | salt/cloud/clouds/ec2.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/ec2.py#L1350-L1362 | def get_provider(vm_=None):
'''
Extract the provider name from vm
'''
if vm_ is None:
provider = __active_provider_name__ or 'ec2'
else:
provider = vm_.get('provider', 'ec2')
if ':' in provider:
prov_comps = provider.split(':')
provider = prov_comps[0]
return provider | [
"def",
"get_provider",
"(",
"vm_",
"=",
"None",
")",
":",
"if",
"vm_",
"is",
"None",
":",
"provider",
"=",
"__active_provider_name__",
"or",
"'ec2'",
"else",
":",
"provider",
"=",
"vm_",
".",
"get",
"(",
"'provider'",
",",
"'ec2'",
")",
"if",
"':'",
"in",
"provider",
":",
"prov_comps",
"=",
"provider",
".",
"split",
"(",
"':'",
")",
"provider",
"=",
"prov_comps",
"[",
"0",
"]",
"return",
"provider"
]
| Extract the provider name from vm | [
"Extract",
"the",
"provider",
"name",
"from",
"vm"
]
| python | train | 24.384615 |
bspaans/python-mingus | mingus/containers/composition.py | https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/composition.py#L67-L73 | def add_note(self, note):
"""Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted.
"""
for n in self.selected_tracks:
self.tracks[n] + note | [
"def",
"add_note",
"(",
"self",
",",
"note",
")",
":",
"for",
"n",
"in",
"self",
".",
"selected_tracks",
":",
"self",
".",
"tracks",
"[",
"n",
"]",
"+",
"note"
]
| Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted. | [
"Add",
"a",
"note",
"to",
"the",
"selected",
"tracks",
"."
]
| python | train | 31.285714 |
ktdreyer/txkoji | txkoji/connection.py | https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/connection.py#L307-L324 | def getTaskInfo(self, task_id, **kwargs):
"""
Load all information about a task and return a custom Task class.
Calls "getTaskInfo" XML-RPC (with request=True to get the full
information.)
:param task_id: ``int``, for example 12345
:returns: deferred that when fired returns a Task (Munch, dict-like)
object representing this Koji task, or none if no task was
found.
"""
kwargs['request'] = True
taskinfo = yield self.call('getTaskInfo', task_id, **kwargs)
task = Task.fromDict(taskinfo)
if task:
task.connection = self
defer.returnValue(task) | [
"def",
"getTaskInfo",
"(",
"self",
",",
"task_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'request'",
"]",
"=",
"True",
"taskinfo",
"=",
"yield",
"self",
".",
"call",
"(",
"'getTaskInfo'",
",",
"task_id",
",",
"*",
"*",
"kwargs",
")",
"task",
"=",
"Task",
".",
"fromDict",
"(",
"taskinfo",
")",
"if",
"task",
":",
"task",
".",
"connection",
"=",
"self",
"defer",
".",
"returnValue",
"(",
"task",
")"
]
| Load all information about a task and return a custom Task class.
Calls "getTaskInfo" XML-RPC (with request=True to get the full
information.)
:param task_id: ``int``, for example 12345
:returns: deferred that when fired returns a Task (Munch, dict-like)
object representing this Koji task, or none if no task was
found. | [
"Load",
"all",
"information",
"about",
"a",
"task",
"and",
"return",
"a",
"custom",
"Task",
"class",
"."
]
| python | train | 37.333333 |
Knio/dominate | dominate/dom_tag.py | https://github.com/Knio/dominate/blob/1eb88f9fd797658eef83568a548e2ef9b546807d/dominate/dom_tag.py#L434-L445 | def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
ctx = dom_tag._with_contexts[_get_thread_context()]
if ctx and ctx[-1]:
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
ctx[-1].tag.set_attribute(*dom_tag.clean_pair(attr, value))
else:
raise ValueError('not in a tag context') | [
"def",
"attr",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"dom_tag",
".",
"_with_contexts",
"[",
"_get_thread_context",
"(",
")",
"]",
"if",
"ctx",
"and",
"ctx",
"[",
"-",
"1",
"]",
":",
"dicts",
"=",
"args",
"+",
"(",
"kwargs",
",",
")",
"for",
"d",
"in",
"dicts",
":",
"for",
"attr",
",",
"value",
"in",
"d",
".",
"items",
"(",
")",
":",
"ctx",
"[",
"-",
"1",
"]",
".",
"tag",
".",
"set_attribute",
"(",
"*",
"dom_tag",
".",
"clean_pair",
"(",
"attr",
",",
"value",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'not in a tag context'",
")"
]
| Set attributes on the current active tag context | [
"Set",
"attributes",
"on",
"the",
"current",
"active",
"tag",
"context"
]
| python | valid | 30 |
tanghaibao/goatools | goatools/rpt/rpt_lev_depth.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/rpt_lev_depth.py#L98-L101 | def write_summary_cnts_goobjs(self, goobjs):
"""Write summary of level and depth counts for active GO Terms."""
cnts = self.get_cnts_levels_depths_recs(goobjs)
self._write_summary_cnts(cnts) | [
"def",
"write_summary_cnts_goobjs",
"(",
"self",
",",
"goobjs",
")",
":",
"cnts",
"=",
"self",
".",
"get_cnts_levels_depths_recs",
"(",
"goobjs",
")",
"self",
".",
"_write_summary_cnts",
"(",
"cnts",
")"
]
| Write summary of level and depth counts for active GO Terms. | [
"Write",
"summary",
"of",
"level",
"and",
"depth",
"counts",
"for",
"active",
"GO",
"Terms",
"."
]
| python | train | 52.75 |
datamachine/twx.botapi | twx/botapi/botapi.py | https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L3047-L3098 | def edit_message_live_location(latitude, longitude,
chat_id=None, message_id=None, inline_message_id=None, reply_markup=None,
**kwargs):
"""
Use this method to edit live location messages sent by the bot or via the bot (for inline bots).
A location can be edited until its live_period expires or editing is explicitly disabled by a call
to stopMessageLiveLocation.
On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type message_id: Integer
:type inline_message_id: string
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:rtype: TelegramBotRPCRequest or Bool
"""
if not chat_id and not message_id and not inline_message_id:
raise ValueError("Must specify chat_id and message_id or inline_message_id")
if (chat_id and not message_id) or (not chat_id and message_id):
raise ValueError("Must specify chat_id and message_id together")
# required args
params = dict(
latitude=latitude,
longitude=longitude
)
# optional args
params.update(
_clean_params(
chat_id=chat_id,
message_id=message_id,
inline_message_id=inline_message_id,
reply_markup=reply_markup,
)
)
return TelegramBotRPCRequest('editMessageLiveLocation', params=params, on_result=Message.from_result, **kwargs) | [
"def",
"edit_message_live_location",
"(",
"latitude",
",",
"longitude",
",",
"chat_id",
"=",
"None",
",",
"message_id",
"=",
"None",
",",
"inline_message_id",
"=",
"None",
",",
"reply_markup",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"chat_id",
"and",
"not",
"message_id",
"and",
"not",
"inline_message_id",
":",
"raise",
"ValueError",
"(",
"\"Must specify chat_id and message_id or inline_message_id\"",
")",
"if",
"(",
"chat_id",
"and",
"not",
"message_id",
")",
"or",
"(",
"not",
"chat_id",
"and",
"message_id",
")",
":",
"raise",
"ValueError",
"(",
"\"Must specify chat_id and message_id together\"",
")",
"# required args",
"params",
"=",
"dict",
"(",
"latitude",
"=",
"latitude",
",",
"longitude",
"=",
"longitude",
")",
"# optional args",
"params",
".",
"update",
"(",
"_clean_params",
"(",
"chat_id",
"=",
"chat_id",
",",
"message_id",
"=",
"message_id",
",",
"inline_message_id",
"=",
"inline_message_id",
",",
"reply_markup",
"=",
"reply_markup",
",",
")",
")",
"return",
"TelegramBotRPCRequest",
"(",
"'editMessageLiveLocation'",
",",
"params",
"=",
"params",
",",
"on_result",
"=",
"Message",
".",
"from_result",
",",
"*",
"*",
"kwargs",
")"
]
| Use this method to edit live location messages sent by the bot or via the bot (for inline bots).
A location can be edited until its live_period expires or editing is explicitly disabled by a call
to stopMessageLiveLocation.
On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type message_id: Integer
:type inline_message_id: string
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:rtype: TelegramBotRPCRequest or Bool | [
"Use",
"this",
"method",
"to",
"edit",
"live",
"location",
"messages",
"sent",
"by",
"the",
"bot",
"or",
"via",
"the",
"bot",
"(",
"for",
"inline",
"bots",
")",
".",
"A",
"location",
"can",
"be",
"edited",
"until",
"its",
"live_period",
"expires",
"or",
"editing",
"is",
"explicitly",
"disabled",
"by",
"a",
"call",
"to",
"stopMessageLiveLocation",
".",
"On",
"success",
"if",
"the",
"edited",
"message",
"was",
"sent",
"by",
"the",
"bot",
"the",
"edited",
"Message",
"is",
"returned",
"otherwise",
"True",
"is",
"returned",
"."
]
| python | train | 46.115385 |
foremast/foremast | src/foremast/iam/destroy_iam/destroy_iam.py | https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/iam/destroy_iam/destroy_iam.py#L28-L108 | def destroy_iam(app='', env='dev', **_):
"""Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion.
"""
session = boto3.Session(profile_name=env)
client = session.client('iam')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
resource_action(
client,
action='remove_user_from_group',
log_format='Removed user from group: %(UserName)s ~> %(GroupName)s',
GroupName=details.group,
UserName=details.user)
resource_action(client, action='delete_user', log_format='Destroyed user: %(UserName)s', UserName=details.user)
resource_action(client, action='delete_group', log_format='Destroyed group: %(GroupName)s', GroupName=details.group)
resource_action(
client,
action='remove_role_from_instance_profile',
log_format='Destroyed Instance Profile from Role: '
'%(InstanceProfileName)s ~> %(RoleName)s',
InstanceProfileName=details.profile,
RoleName=details.role)
resource_action(
client,
action='delete_instance_profile',
log_format='Destroyed Instance Profile: %(InstanceProfileName)s',
InstanceProfileName=details.profile)
role_policies = []
try:
role_policies = resource_action(
client,
action='list_role_policies',
log_format='Found Role Policies for %(RoleName)s.',
RoleName=details.role)['PolicyNames']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in role_policies:
resource_action(
client,
action='delete_role_policy',
log_format='Removed Inline Policy from Role: '
'%(PolicyName)s ~> %(RoleName)s',
RoleName=details.role,
PolicyName=policy)
attached_role_policies = []
try:
attached_role_policies = resource_action(
client,
action='list_attached_role_policies',
log_format='Found attached Role Polices for %(RoleName)s.',
RoleName=details.role)['AttachedPolicies']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in attached_role_policies:
resource_action(
client,
action='detach_role_policy',
log_format='Detached Policy from Role: '
'%(PolicyArn)s ~> %(RoleName)s',
RoleName=details.role,
PolicyArn=policy['PolicyArn'])
resource_action(client, action='delete_role', log_format='Destroyed Role: %(RoleName)s', RoleName=details.role) | [
"def",
"destroy_iam",
"(",
"app",
"=",
"''",
",",
"env",
"=",
"'dev'",
",",
"*",
"*",
"_",
")",
":",
"session",
"=",
"boto3",
".",
"Session",
"(",
"profile_name",
"=",
"env",
")",
"client",
"=",
"session",
".",
"client",
"(",
"'iam'",
")",
"generated",
"=",
"get_details",
"(",
"env",
"=",
"env",
",",
"app",
"=",
"app",
")",
"generated_iam",
"=",
"generated",
".",
"iam",
"(",
")",
"app_details",
"=",
"collections",
".",
"namedtuple",
"(",
"'AppDetails'",
",",
"generated_iam",
".",
"keys",
"(",
")",
")",
"details",
"=",
"app_details",
"(",
"*",
"*",
"generated_iam",
")",
"LOG",
".",
"debug",
"(",
"'Application details: %s'",
",",
"details",
")",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'remove_user_from_group'",
",",
"log_format",
"=",
"'Removed user from group: %(UserName)s ~> %(GroupName)s'",
",",
"GroupName",
"=",
"details",
".",
"group",
",",
"UserName",
"=",
"details",
".",
"user",
")",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'delete_user'",
",",
"log_format",
"=",
"'Destroyed user: %(UserName)s'",
",",
"UserName",
"=",
"details",
".",
"user",
")",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'delete_group'",
",",
"log_format",
"=",
"'Destroyed group: %(GroupName)s'",
",",
"GroupName",
"=",
"details",
".",
"group",
")",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'remove_role_from_instance_profile'",
",",
"log_format",
"=",
"'Destroyed Instance Profile from Role: '",
"'%(InstanceProfileName)s ~> %(RoleName)s'",
",",
"InstanceProfileName",
"=",
"details",
".",
"profile",
",",
"RoleName",
"=",
"details",
".",
"role",
")",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'delete_instance_profile'",
",",
"log_format",
"=",
"'Destroyed Instance Profile: %(InstanceProfileName)s'",
",",
"InstanceProfileName",
"=",
"details",
".",
"profile",
")",
"role_policies",
"=",
"[",
"]",
"try",
":",
"role_policies",
"=",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'list_role_policies'",
",",
"log_format",
"=",
"'Found Role Policies for %(RoleName)s.'",
",",
"RoleName",
"=",
"details",
".",
"role",
")",
"[",
"'PolicyNames'",
"]",
"except",
"TypeError",
":",
"LOG",
".",
"info",
"(",
"'Role %s not found.'",
",",
"details",
".",
"role",
")",
"for",
"policy",
"in",
"role_policies",
":",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'delete_role_policy'",
",",
"log_format",
"=",
"'Removed Inline Policy from Role: '",
"'%(PolicyName)s ~> %(RoleName)s'",
",",
"RoleName",
"=",
"details",
".",
"role",
",",
"PolicyName",
"=",
"policy",
")",
"attached_role_policies",
"=",
"[",
"]",
"try",
":",
"attached_role_policies",
"=",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'list_attached_role_policies'",
",",
"log_format",
"=",
"'Found attached Role Polices for %(RoleName)s.'",
",",
"RoleName",
"=",
"details",
".",
"role",
")",
"[",
"'AttachedPolicies'",
"]",
"except",
"TypeError",
":",
"LOG",
".",
"info",
"(",
"'Role %s not found.'",
",",
"details",
".",
"role",
")",
"for",
"policy",
"in",
"attached_role_policies",
":",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'detach_role_policy'",
",",
"log_format",
"=",
"'Detached Policy from Role: '",
"'%(PolicyArn)s ~> %(RoleName)s'",
",",
"RoleName",
"=",
"details",
".",
"role",
",",
"PolicyArn",
"=",
"policy",
"[",
"'PolicyArn'",
"]",
")",
"resource_action",
"(",
"client",
",",
"action",
"=",
"'delete_role'",
",",
"log_format",
"=",
"'Destroyed Role: %(RoleName)s'",
",",
"RoleName",
"=",
"details",
".",
"role",
")"
]
| Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion. | [
"Destroy",
"IAM",
"Resources",
"."
]
| python | train | 35.123457 |
rix0rrr/gcl | gcl/ast.py | https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L75-L79 | def convertAndMake(converter, handler):
"""Convert with location."""
def convertAction(loc, value):
return handler(loc, converter(value))
return convertAction | [
"def",
"convertAndMake",
"(",
"converter",
",",
"handler",
")",
":",
"def",
"convertAction",
"(",
"loc",
",",
"value",
")",
":",
"return",
"handler",
"(",
"loc",
",",
"converter",
"(",
"value",
")",
")",
"return",
"convertAction"
]
| Convert with location. | [
"Convert",
"with",
"location",
"."
]
| python | train | 32.8 |
saltstack/salt | salt/modules/system.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/system.py#L221-L235 | def _offset_to_min(utc_offset):
'''
Helper function that converts the utc offset string into number of minutes
offset. Input is in form "[+-]?HHMM". Example valid inputs are "+0500"
"-0300" and "0800". These would return -300, 180, 480 respectively.
'''
match = re.match(r"^([+-])?(\d\d)(\d\d)$", utc_offset)
if not match:
raise SaltInvocationError("Invalid UTC offset")
sign = -1 if match.group(1) == '-' else 1
hours_offset = int(match.group(2))
minutes_offset = int(match.group(3))
total_offset = sign * (hours_offset * 60 + minutes_offset)
return total_offset | [
"def",
"_offset_to_min",
"(",
"utc_offset",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r\"^([+-])?(\\d\\d)(\\d\\d)$\"",
",",
"utc_offset",
")",
"if",
"not",
"match",
":",
"raise",
"SaltInvocationError",
"(",
"\"Invalid UTC offset\"",
")",
"sign",
"=",
"-",
"1",
"if",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'-'",
"else",
"1",
"hours_offset",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"minutes_offset",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"3",
")",
")",
"total_offset",
"=",
"sign",
"*",
"(",
"hours_offset",
"*",
"60",
"+",
"minutes_offset",
")",
"return",
"total_offset"
]
| Helper function that converts the utc offset string into number of minutes
offset. Input is in form "[+-]?HHMM". Example valid inputs are "+0500"
"-0300" and "0800". These would return -300, 180, 480 respectively. | [
"Helper",
"function",
"that",
"converts",
"the",
"utc",
"offset",
"string",
"into",
"number",
"of",
"minutes",
"offset",
".",
"Input",
"is",
"in",
"form",
"[",
"+",
"-",
"]",
"?HHMM",
".",
"Example",
"valid",
"inputs",
"are",
"+",
"0500",
"-",
"0300",
"and",
"0800",
".",
"These",
"would",
"return",
"-",
"300",
"180",
"480",
"respectively",
"."
]
| python | train | 40.4 |
adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L99-L105 | def resume(self, trigger_duration=0):
"""Resumes pulse capture after an optional trigger pulse."""
if trigger_duration != 0:
self._mq.send("t%d" % trigger_duration, True, type=1)
else:
self._mq.send("r", True, type=1)
self._paused = False | [
"def",
"resume",
"(",
"self",
",",
"trigger_duration",
"=",
"0",
")",
":",
"if",
"trigger_duration",
"!=",
"0",
":",
"self",
".",
"_mq",
".",
"send",
"(",
"\"t%d\"",
"%",
"trigger_duration",
",",
"True",
",",
"type",
"=",
"1",
")",
"else",
":",
"self",
".",
"_mq",
".",
"send",
"(",
"\"r\"",
",",
"True",
",",
"type",
"=",
"1",
")",
"self",
".",
"_paused",
"=",
"False"
]
| Resumes pulse capture after an optional trigger pulse. | [
"Resumes",
"pulse",
"capture",
"after",
"an",
"optional",
"trigger",
"pulse",
"."
]
| python | train | 41.142857 |
bokeh/bokeh | bokeh/models/sources.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/models/sources.py#L325-L343 | def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name | [
"def",
"add",
"(",
"self",
",",
"data",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"n",
"=",
"len",
"(",
"self",
".",
"data",
")",
"while",
"\"Series %d\"",
"%",
"n",
"in",
"self",
".",
"data",
":",
"n",
"+=",
"1",
"name",
"=",
"\"Series %d\"",
"%",
"n",
"self",
".",
"data",
"[",
"name",
"]",
"=",
"data",
"return",
"name"
]
| Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used | [
"Appends",
"a",
"new",
"column",
"of",
"data",
"to",
"the",
"data",
"source",
"."
]
| python | train | 28.368421 |
joeyespo/gitpress | gitpress/repository.py | https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/repository.py#L68-L99 | def iterate_presentation_files(path=None, excludes=None, includes=None):
"""Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path) | [
"def",
"iterate_presentation_files",
"(",
"path",
"=",
"None",
",",
"excludes",
"=",
"None",
",",
"includes",
"=",
"None",
")",
":",
"# Defaults",
"if",
"includes",
"is",
"None",
":",
"includes",
"=",
"[",
"]",
"if",
"excludes",
"is",
"None",
":",
"excludes",
"=",
"[",
"]",
"# Transform glob patterns to regular expressions",
"includes_pattern",
"=",
"r'|'",
".",
"join",
"(",
"[",
"fnmatch",
".",
"translate",
"(",
"x",
")",
"for",
"x",
"in",
"includes",
"]",
")",
"or",
"r'$.'",
"excludes_pattern",
"=",
"r'|'",
".",
"join",
"(",
"[",
"fnmatch",
".",
"translate",
"(",
"x",
")",
"for",
"x",
"in",
"excludes",
"]",
")",
"or",
"r'$.'",
"includes_re",
"=",
"re",
".",
"compile",
"(",
"includes_pattern",
")",
"excludes_re",
"=",
"re",
".",
"compile",
"(",
"excludes_pattern",
")",
"def",
"included",
"(",
"root",
",",
"name",
")",
":",
"\"\"\"Returns True if the specified file is a presentation file.\"\"\"",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
"# Explicitly included files takes priority",
"if",
"includes_re",
".",
"match",
"(",
"full_path",
")",
":",
"return",
"True",
"# Ignore special and excluded files",
"return",
"(",
"not",
"specials_re",
".",
"match",
"(",
"name",
")",
"and",
"not",
"excludes_re",
".",
"match",
"(",
"full_path",
")",
")",
"# Get a filtered list of paths to be built",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"dirs",
"[",
":",
"]",
"=",
"[",
"d",
"for",
"d",
"in",
"dirs",
"if",
"included",
"(",
"root",
",",
"d",
")",
"]",
"files",
"=",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"included",
"(",
"root",
",",
"f",
")",
"]",
"for",
"f",
"in",
"files",
":",
"yield",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
",",
"path",
")"
]
| Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority. | [
"Iterates",
"the",
"repository",
"presentation",
"files",
"relative",
"to",
"path",
"not",
"including",
"themes",
".",
"Note",
"that",
"includes",
"take",
"priority",
"."
]
| python | train | 40.53125 |
hobson/aima | aima/logic.py | https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/logic.py#L339-L347 | def prop_symbols(x):
"Return a list of all propositional symbols in x."
if not isinstance(x, Expr):
return []
elif is_prop_symbol(x.op):
return [x]
else:
return list(set(symbol for arg in x.args
for symbol in prop_symbols(arg))) | [
"def",
"prop_symbols",
"(",
"x",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"Expr",
")",
":",
"return",
"[",
"]",
"elif",
"is_prop_symbol",
"(",
"x",
".",
"op",
")",
":",
"return",
"[",
"x",
"]",
"else",
":",
"return",
"list",
"(",
"set",
"(",
"symbol",
"for",
"arg",
"in",
"x",
".",
"args",
"for",
"symbol",
"in",
"prop_symbols",
"(",
"arg",
")",
")",
")"
]
| Return a list of all propositional symbols in x. | [
"Return",
"a",
"list",
"of",
"all",
"propositional",
"symbols",
"in",
"x",
"."
]
| python | valid | 31.555556 |
chrismattmann/tika-python | tika/tika.py | https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L232-L251 | def getPaths(urlOrPaths):
'''
Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's
a directory, it walks the directory and then finds all file paths in it, and ads them
too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path.
:param urlOrPaths: the url or path to be scanned
:return: ``list`` of paths
'''
if isinstance(urlOrPaths, basestring):
#FIXME: basestring is undefined
urlOrPaths = [urlOrPaths] # do not recursively walk over letters of a single path which can include "/"
paths = []
for eachUrlOrPaths in urlOrPaths:
if os.path.isdir(eachUrlOrPaths):
for root, directories, filenames in walk(eachUrlOrPaths):
for filename in filenames:
paths.append(os.path.join(root,filename))
else:
paths.append(eachUrlOrPaths)
return paths | [
"def",
"getPaths",
"(",
"urlOrPaths",
")",
":",
"if",
"isinstance",
"(",
"urlOrPaths",
",",
"basestring",
")",
":",
"#FIXME: basestring is undefined",
"urlOrPaths",
"=",
"[",
"urlOrPaths",
"]",
"# do not recursively walk over letters of a single path which can include \"/\"",
"paths",
"=",
"[",
"]",
"for",
"eachUrlOrPaths",
"in",
"urlOrPaths",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"eachUrlOrPaths",
")",
":",
"for",
"root",
",",
"directories",
",",
"filenames",
"in",
"walk",
"(",
"eachUrlOrPaths",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
")",
"else",
":",
"paths",
".",
"append",
"(",
"eachUrlOrPaths",
")",
"return",
"paths"
]
| Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's
a directory, it walks the directory and then finds all file paths in it, and ads them
too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path.
:param urlOrPaths: the url or path to be scanned
:return: ``list`` of paths | [
"Determines",
"if",
"the",
"given",
"URL",
"in",
"urlOrPaths",
"is",
"a",
"URL",
"or",
"a",
"file",
"or",
"directory",
".",
"If",
"it",
"s",
"a",
"directory",
"it",
"walks",
"the",
"directory",
"and",
"then",
"finds",
"all",
"file",
"paths",
"in",
"it",
"and",
"ads",
"them",
"too",
".",
"If",
"it",
"s",
"a",
"file",
"it",
"adds",
"it",
"to",
"the",
"paths",
".",
"If",
"it",
"s",
"a",
"URL",
"it",
"just",
"adds",
"it",
"to",
"the",
"path",
".",
":",
"param",
"urlOrPaths",
":",
"the",
"url",
"or",
"path",
"to",
"be",
"scanned",
":",
"return",
":",
"list",
"of",
"paths"
]
| python | train | 45.7 |
lsbardel/python-stdnet | stdnet/backends/redisb/client/extensions.py | https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/extensions.py#L46-L56 | def read_lua_file(dotted_module, path=None, context=None):
'''Load lua script from the stdnet/lib/lua directory'''
path = path or DEFAULT_LUA_PATH
bits = dotted_module.split('.')
bits[-1] += '.lua'
name = os.path.join(path, *bits)
with open(name) as f:
data = f.read()
if context:
data = data.format(context)
return data | [
"def",
"read_lua_file",
"(",
"dotted_module",
",",
"path",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"path",
"=",
"path",
"or",
"DEFAULT_LUA_PATH",
"bits",
"=",
"dotted_module",
".",
"split",
"(",
"'.'",
")",
"bits",
"[",
"-",
"1",
"]",
"+=",
"'.lua'",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"*",
"bits",
")",
"with",
"open",
"(",
"name",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"if",
"context",
":",
"data",
"=",
"data",
".",
"format",
"(",
"context",
")",
"return",
"data"
]
| Load lua script from the stdnet/lib/lua directory | [
"Load",
"lua",
"script",
"from",
"the",
"stdnet",
"/",
"lib",
"/",
"lua",
"directory"
]
| python | train | 33.454545 |
fprimex/zdesk | zdesk/zdesk_api.py | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4343-L4347 | def view_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#show-view"
api_path = "/api/v2/views/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | [
"def",
"view_show",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/views/{id}.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"id",
"=",
"id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"*",
"*",
"kwargs",
")"
]
| https://developer.zendesk.com/rest_api/docs/core/views#show-view | [
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"core",
"/",
"views#show",
"-",
"view"
]
| python | train | 47.4 |
tanghaibao/goatools | goatools/gosubdag/plot/go_name_shorten.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L131-L136 | def _keep_this(self, name):
"""Return True if there are to be no modifications to name."""
for keep_name in self.keep:
if name == keep_name:
return True
return False | [
"def",
"_keep_this",
"(",
"self",
",",
"name",
")",
":",
"for",
"keep_name",
"in",
"self",
".",
"keep",
":",
"if",
"name",
"==",
"keep_name",
":",
"return",
"True",
"return",
"False"
]
| Return True if there are to be no modifications to name. | [
"Return",
"True",
"if",
"there",
"are",
"to",
"be",
"no",
"modifications",
"to",
"name",
"."
]
| python | train | 35.333333 |
Hrabal/TemPy | tempy/widgets.py | https://github.com/Hrabal/TemPy/blob/7d229b73e2ce3ccbb8254deae05c1f758f626ed6/tempy/widgets.py#L209-L213 | def make_caption(self, caption):
"""Adds/Substitutes the table's caption."""
if not hasattr(self, "caption"):
self(caption=Caption())
return self.caption.empty()(caption) | [
"def",
"make_caption",
"(",
"self",
",",
"caption",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"caption\"",
")",
":",
"self",
"(",
"caption",
"=",
"Caption",
"(",
")",
")",
"return",
"self",
".",
"caption",
".",
"empty",
"(",
")",
"(",
"caption",
")"
]
| Adds/Substitutes the table's caption. | [
"Adds",
"/",
"Substitutes",
"the",
"table",
"s",
"caption",
"."
]
| python | train | 40.4 |
datamachine/twx | twx/twx.py | https://github.com/datamachine/twx/blob/d9633f12f3647b1e54ba87b70b39df3b7e02b4eb/twx/twx.py#L392-L404 | def send_location(self, peer: Peer, latitude: float, longitude: float, reply: int=None, on_success: callable=None,
reply_markup: botapi.ReplyMarkup=None):
"""
Send location to peer.
:param peer: Peer to send message to.
:param latitude: Latitude of the location.
:param longitude: Longitude of the location.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
pass | [
"def",
"send_location",
"(",
"self",
",",
"peer",
":",
"Peer",
",",
"latitude",
":",
"float",
",",
"longitude",
":",
"float",
",",
"reply",
":",
"int",
"=",
"None",
",",
"on_success",
":",
"callable",
"=",
"None",
",",
"reply_markup",
":",
"botapi",
".",
"ReplyMarkup",
"=",
"None",
")",
":",
"pass"
]
| Send location to peer.
:param peer: Peer to send message to.
:param latitude: Latitude of the location.
:param longitude: Longitude of the location.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message | [
"Send",
"location",
"to",
"peer",
".",
":",
"param",
"peer",
":",
"Peer",
"to",
"send",
"message",
"to",
".",
":",
"param",
"latitude",
":",
"Latitude",
"of",
"the",
"location",
".",
":",
"param",
"longitude",
":",
"Longitude",
"of",
"the",
"location",
".",
":",
"param",
"reply",
":",
"Message",
"object",
"or",
"message_id",
"to",
"reply",
"to",
".",
":",
"param",
"on_success",
":",
"Callback",
"to",
"call",
"when",
"call",
"is",
"complete",
"."
]
| python | train | 42.307692 |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L13921-L13942 | def tisbod(ref, body, et):
"""
Return a 6x6 matrix that transforms states in inertial coordinates to
states in body-equator-and-prime-meridian coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tisbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (state), inertial to prime meridian.
:rtype: 6x6-Element Array of floats
"""
ref = stypes.stringToCharP(ref)
body = ctypes.c_int(body)
et = ctypes.c_double(et)
retmatrix = stypes.emptyDoubleMatrix(x=6, y=6)
libspice.tisbod_c(ref, body, et, retmatrix)
return stypes.cMatrixToNumpy(retmatrix) | [
"def",
"tisbod",
"(",
"ref",
",",
"body",
",",
"et",
")",
":",
"ref",
"=",
"stypes",
".",
"stringToCharP",
"(",
"ref",
")",
"body",
"=",
"ctypes",
".",
"c_int",
"(",
"body",
")",
"et",
"=",
"ctypes",
".",
"c_double",
"(",
"et",
")",
"retmatrix",
"=",
"stypes",
".",
"emptyDoubleMatrix",
"(",
"x",
"=",
"6",
",",
"y",
"=",
"6",
")",
"libspice",
".",
"tisbod_c",
"(",
"ref",
",",
"body",
",",
"et",
",",
"retmatrix",
")",
"return",
"stypes",
".",
"cMatrixToNumpy",
"(",
"retmatrix",
")"
]
| Return a 6x6 matrix that transforms states in inertial coordinates to
states in body-equator-and-prime-meridian coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tisbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (state), inertial to prime meridian.
:rtype: 6x6-Element Array of floats | [
"Return",
"a",
"6x6",
"matrix",
"that",
"transforms",
"states",
"in",
"inertial",
"coordinates",
"to",
"states",
"in",
"body",
"-",
"equator",
"-",
"and",
"-",
"prime",
"-",
"meridian",
"coordinates",
"."
]
| python | train | 35.090909 |
rochacbruno/dynaconf | dynaconf/loaders/vault_loader.py | https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/loaders/vault_loader.py#L53-L97 | def load(obj, env=None, silent=None, key=None):
"""Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None
"""
client = get_client(obj)
env_list = _get_env_list(obj, env)
for env in env_list:
path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env]).replace("//", "/")
data = client.read(path)
if data:
# There seems to be a data dict within a data dict,
# extract the inner data
data = data.get("data", {}).get("data", {})
try:
if data and key:
value = parse_conf_data(data.get(key), tomlfy=True)
if value:
obj.logger.debug(
"vault_loader: loading by key: %s:%s (%s:%s)",
key,
"****",
IDENTIFIER,
path,
)
obj.set(key, value)
elif data:
obj.logger.debug(
"vault_loader: loading: %s (%s:%s)",
list(data.keys()),
IDENTIFIER,
path,
)
obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
except Exception as e:
if silent:
if hasattr(obj, "logger"):
obj.logger.error(str(e))
return False
raise | [
"def",
"load",
"(",
"obj",
",",
"env",
"=",
"None",
",",
"silent",
"=",
"None",
",",
"key",
"=",
"None",
")",
":",
"client",
"=",
"get_client",
"(",
"obj",
")",
"env_list",
"=",
"_get_env_list",
"(",
"obj",
",",
"env",
")",
"for",
"env",
"in",
"env_list",
":",
"path",
"=",
"\"/\"",
".",
"join",
"(",
"[",
"obj",
".",
"VAULT_PATH_FOR_DYNACONF",
",",
"env",
"]",
")",
".",
"replace",
"(",
"\"//\"",
",",
"\"/\"",
")",
"data",
"=",
"client",
".",
"read",
"(",
"path",
")",
"if",
"data",
":",
"# There seems to be a data dict within a data dict,",
"# extract the inner data",
"data",
"=",
"data",
".",
"get",
"(",
"\"data\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"data\"",
",",
"{",
"}",
")",
"try",
":",
"if",
"data",
"and",
"key",
":",
"value",
"=",
"parse_conf_data",
"(",
"data",
".",
"get",
"(",
"key",
")",
",",
"tomlfy",
"=",
"True",
")",
"if",
"value",
":",
"obj",
".",
"logger",
".",
"debug",
"(",
"\"vault_loader: loading by key: %s:%s (%s:%s)\"",
",",
"key",
",",
"\"****\"",
",",
"IDENTIFIER",
",",
"path",
",",
")",
"obj",
".",
"set",
"(",
"key",
",",
"value",
")",
"elif",
"data",
":",
"obj",
".",
"logger",
".",
"debug",
"(",
"\"vault_loader: loading: %s (%s:%s)\"",
",",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
",",
"IDENTIFIER",
",",
"path",
",",
")",
"obj",
".",
"update",
"(",
"data",
",",
"loader_identifier",
"=",
"IDENTIFIER",
",",
"tomlfy",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"silent",
":",
"if",
"hasattr",
"(",
"obj",
",",
"\"logger\"",
")",
":",
"obj",
".",
"logger",
".",
"error",
"(",
"str",
"(",
"e",
")",
")",
"return",
"False",
"raise"
]
| Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None | [
"Reads",
"and",
"loads",
"in",
"to",
"settings",
"a",
"single",
"key",
"or",
"all",
"keys",
"from",
"vault"
]
| python | train | 35.755556 |
alefnula/tea | tea/shell/__init__.py | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L166-L187 | def __copyfile(source, destination):
"""Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copyfile: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy(source, destination)
return True
except Exception as e:
logger.error(
"copyfile: %s -> %s failed! Error: %s", source, destination, e
)
return False | [
"def",
"__copyfile",
"(",
"source",
",",
"destination",
")",
":",
"logger",
".",
"info",
"(",
"\"copyfile: %s -> %s\"",
"%",
"(",
"source",
",",
"destination",
")",
")",
"try",
":",
"__create_destdir",
"(",
"destination",
")",
"shutil",
".",
"copy",
"(",
"source",
",",
"destination",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"copyfile: %s -> %s failed! Error: %s\"",
",",
"source",
",",
"destination",
",",
"e",
")",
"return",
"False"
]
| Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise. | [
"Copy",
"data",
"and",
"mode",
"bits",
"(",
"cp",
"source",
"destination",
")",
"."
]
| python | train | 30.181818 |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L21550-L21566 | def find_session(self, session_name):
"""Finds guest sessions by their friendly name and returns an interface
array with all found guest sessions.
in session_name of type str
The session's friendly name to find. Wildcards like ? and * are allowed.
return sessions of type :class:`IGuestSession`
Array with all guest sessions found matching the name specified.
"""
if not isinstance(session_name, basestring):
raise TypeError("session_name can only be an instance of type basestring")
sessions = self._call("findSession",
in_p=[session_name])
sessions = [IGuestSession(a) for a in sessions]
return sessions | [
"def",
"find_session",
"(",
"self",
",",
"session_name",
")",
":",
"if",
"not",
"isinstance",
"(",
"session_name",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"session_name can only be an instance of type basestring\"",
")",
"sessions",
"=",
"self",
".",
"_call",
"(",
"\"findSession\"",
",",
"in_p",
"=",
"[",
"session_name",
"]",
")",
"sessions",
"=",
"[",
"IGuestSession",
"(",
"a",
")",
"for",
"a",
"in",
"sessions",
"]",
"return",
"sessions"
]
| Finds guest sessions by their friendly name and returns an interface
array with all found guest sessions.
in session_name of type str
The session's friendly name to find. Wildcards like ? and * are allowed.
return sessions of type :class:`IGuestSession`
Array with all guest sessions found matching the name specified. | [
"Finds",
"guest",
"sessions",
"by",
"their",
"friendly",
"name",
"and",
"returns",
"an",
"interface",
"array",
"with",
"all",
"found",
"guest",
"sessions",
"."
]
| python | train | 42.411765 |
facelessuser/backrefs | backrefs/_bregex_parse.py | https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bregex_parse.py#L202-L231 | def get_posix(self, i):
"""Get POSIX."""
index = i.index
value = ['[']
try:
c = next(i)
if c != ':':
raise ValueError('Not a valid property!')
else:
value.append(c)
c = next(i)
if c == '^':
value.append(c)
c = next(i)
while c != ':':
if c not in _PROPERTY:
raise ValueError('Not a valid property!')
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
value.append(c)
c = next(i)
if c != ']' or not value:
raise ValueError('Unmatched ]')
value.append(c)
except Exception:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None | [
"def",
"get_posix",
"(",
"self",
",",
"i",
")",
":",
"index",
"=",
"i",
".",
"index",
"value",
"=",
"[",
"'['",
"]",
"try",
":",
"c",
"=",
"next",
"(",
"i",
")",
"if",
"c",
"!=",
"':'",
":",
"raise",
"ValueError",
"(",
"'Not a valid property!'",
")",
"else",
":",
"value",
".",
"append",
"(",
"c",
")",
"c",
"=",
"next",
"(",
"i",
")",
"if",
"c",
"==",
"'^'",
":",
"value",
".",
"append",
"(",
"c",
")",
"c",
"=",
"next",
"(",
"i",
")",
"while",
"c",
"!=",
"':'",
":",
"if",
"c",
"not",
"in",
"_PROPERTY",
":",
"raise",
"ValueError",
"(",
"'Not a valid property!'",
")",
"if",
"c",
"not",
"in",
"_PROPERTY_STRIP",
":",
"value",
".",
"append",
"(",
"c",
")",
"c",
"=",
"next",
"(",
"i",
")",
"value",
".",
"append",
"(",
"c",
")",
"c",
"=",
"next",
"(",
"i",
")",
"if",
"c",
"!=",
"']'",
"or",
"not",
"value",
":",
"raise",
"ValueError",
"(",
"'Unmatched ]'",
")",
"value",
".",
"append",
"(",
"c",
")",
"except",
"Exception",
":",
"i",
".",
"rewind",
"(",
"i",
".",
"index",
"-",
"index",
")",
"value",
"=",
"[",
"]",
"return",
"''",
".",
"join",
"(",
"value",
")",
"if",
"value",
"else",
"None"
]
| Get POSIX. | [
"Get",
"POSIX",
"."
]
| python | train | 31.5 |
apache/incubator-mxnet | example/cnn_text_classification/data_helpers.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L79-L89 | def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences | [
"def",
"pad_sentences",
"(",
"sentences",
",",
"padding_word",
"=",
"\"</s>\"",
")",
":",
"sequence_length",
"=",
"max",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"sentences",
")",
"padded_sentences",
"=",
"[",
"]",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"sentences",
")",
":",
"num_padding",
"=",
"sequence_length",
"-",
"len",
"(",
"sentence",
")",
"new_sentence",
"=",
"sentence",
"+",
"[",
"padding_word",
"]",
"*",
"num_padding",
"padded_sentences",
".",
"append",
"(",
"new_sentence",
")",
"return",
"padded_sentences"
]
| Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences. | [
"Pads",
"all",
"sentences",
"to",
"the",
"same",
"length",
".",
"The",
"length",
"is",
"defined",
"by",
"the",
"longest",
"sentence",
".",
"Returns",
"padded",
"sentences",
"."
]
| python | train | 44.181818 |
pyecore/pyecore | pyecore/ecore.py | https://github.com/pyecore/pyecore/blob/22b67ad8799594f8f44fd8bee497583d4f12ed63/pyecore/ecore.py#L873-L894 | def EMetaclass(cls):
"""Class decorator for creating PyEcore metaclass."""
superclass = cls.__bases__
if not issubclass(cls, EObject):
sclasslist = list(superclass)
if object in superclass:
index = sclasslist.index(object)
sclasslist.insert(index, EObject)
sclasslist.remove(object)
else:
sclasslist.insert(0, EObject)
superclass = tuple(sclasslist)
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return MetaEClass(cls.__name__, superclass, orig_vars) | [
"def",
"EMetaclass",
"(",
"cls",
")",
":",
"superclass",
"=",
"cls",
".",
"__bases__",
"if",
"not",
"issubclass",
"(",
"cls",
",",
"EObject",
")",
":",
"sclasslist",
"=",
"list",
"(",
"superclass",
")",
"if",
"object",
"in",
"superclass",
":",
"index",
"=",
"sclasslist",
".",
"index",
"(",
"object",
")",
"sclasslist",
".",
"insert",
"(",
"index",
",",
"EObject",
")",
"sclasslist",
".",
"remove",
"(",
"object",
")",
"else",
":",
"sclasslist",
".",
"insert",
"(",
"0",
",",
"EObject",
")",
"superclass",
"=",
"tuple",
"(",
"sclasslist",
")",
"orig_vars",
"=",
"cls",
".",
"__dict__",
".",
"copy",
"(",
")",
"slots",
"=",
"orig_vars",
".",
"get",
"(",
"'__slots__'",
")",
"if",
"slots",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"slots",
",",
"str",
")",
":",
"slots",
"=",
"[",
"slots",
"]",
"for",
"slots_var",
"in",
"slots",
":",
"orig_vars",
".",
"pop",
"(",
"slots_var",
")",
"orig_vars",
".",
"pop",
"(",
"'__dict__'",
",",
"None",
")",
"orig_vars",
".",
"pop",
"(",
"'__weakref__'",
",",
"None",
")",
"return",
"MetaEClass",
"(",
"cls",
".",
"__name__",
",",
"superclass",
",",
"orig_vars",
")"
]
| Class decorator for creating PyEcore metaclass. | [
"Class",
"decorator",
"for",
"creating",
"PyEcore",
"metaclass",
"."
]
| python | train | 35.772727 |
riptano/ccm | ccmlib/dse_node.py | https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/dse_node.py#L320-L339 | def export_dse_home_in_dse_env_sh(self):
'''
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
'''
with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh:
buf = dse_env_sh.readlines()
with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file:
for line in buf:
out_file.write(line)
if line == "# This is here so the installer can force set DSE_HOME\n":
out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n") | [
"def",
"export_dse_home_in_dse_env_sh",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"get_bin_dir",
"(",
")",
"+",
"\"/dse-env.sh\"",
",",
"\"r\"",
")",
"as",
"dse_env_sh",
":",
"buf",
"=",
"dse_env_sh",
".",
"readlines",
"(",
")",
"with",
"open",
"(",
"self",
".",
"get_bin_dir",
"(",
")",
"+",
"\"/dse-env.sh\"",
",",
"\"w\"",
")",
"as",
"out_file",
":",
"for",
"line",
"in",
"buf",
":",
"out_file",
".",
"write",
"(",
"line",
")",
"if",
"line",
"==",
"\"# This is here so the installer can force set DSE_HOME\\n\"",
":",
"out_file",
".",
"write",
"(",
"\"DSE_HOME=\"",
"+",
"self",
".",
"get_install_dir",
"(",
")",
"+",
"\"\\nexport DSE_HOME\\n\"",
")"
]
| Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster. | [
"Due",
"to",
"the",
"way",
"CCM",
"lays",
"out",
"files",
"separating",
"the",
"repository",
"from",
"the",
"node",
"(",
"s",
")",
"confs",
"the",
"dse",
"-",
"env",
".",
"sh",
"script",
"of",
"each",
"node",
"needs",
"to",
"have",
"its",
"DSE_HOME",
"var",
"set",
"and",
"exported",
".",
"Since",
"DSE",
"4",
".",
"5",
".",
"x",
"the",
"stock",
"dse",
"-",
"env",
".",
"sh",
"file",
"includes",
"a",
"commented",
"-",
"out",
"place",
"to",
"do",
"exactly",
"this",
"intended",
"for",
"installers",
".",
"Basically",
":",
"read",
"in",
"the",
"file",
"write",
"it",
"back",
"out",
"and",
"add",
"the",
"two",
"lines",
".",
"sstableloader",
"is",
"an",
"example",
"of",
"a",
"node",
"script",
"that",
"depends",
"on",
"this",
"when",
"used",
"in",
"a",
"CCM",
"-",
"built",
"cluster",
"."
]
| python | train | 51 |
payu-org/payu | payu/experiment.py | https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/experiment.py#L796-L806 | def postprocess(self):
"""Submit a postprocessing script after collation"""
assert self.postscript
envmod.setup()
envmod.module('load', 'pbs')
cmd = 'qsub {script}'.format(script=self.postscript)
cmd = shlex.split(cmd)
rc = sp.call(cmd)
assert rc == 0, 'Postprocessing script submission failed.' | [
"def",
"postprocess",
"(",
"self",
")",
":",
"assert",
"self",
".",
"postscript",
"envmod",
".",
"setup",
"(",
")",
"envmod",
".",
"module",
"(",
"'load'",
",",
"'pbs'",
")",
"cmd",
"=",
"'qsub {script}'",
".",
"format",
"(",
"script",
"=",
"self",
".",
"postscript",
")",
"cmd",
"=",
"shlex",
".",
"split",
"(",
"cmd",
")",
"rc",
"=",
"sp",
".",
"call",
"(",
"cmd",
")",
"assert",
"rc",
"==",
"0",
",",
"'Postprocessing script submission failed.'"
]
| Submit a postprocessing script after collation | [
"Submit",
"a",
"postprocessing",
"script",
"after",
"collation"
]
| python | train | 31.909091 |
hardbyte/python-can | can/interfaces/systec/ucan.py | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/systec/ucan.py#L893-L903 | def check_support_ucannet(cls, hw_info_ex):
"""
Checks whether the module supports the usage of USB-CANnetwork driver.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module does support the usage of the USB-CANnetwork driver, otherwise False.
:rtype: bool
"""
return cls.check_is_systec(hw_info_ex) and \
cls.check_version_is_equal_or_higher(hw_info_ex.m_dwFwVersionEx, 3, 8) | [
"def",
"check_support_ucannet",
"(",
"cls",
",",
"hw_info_ex",
")",
":",
"return",
"cls",
".",
"check_is_systec",
"(",
"hw_info_ex",
")",
"and",
"cls",
".",
"check_version_is_equal_or_higher",
"(",
"hw_info_ex",
".",
"m_dwFwVersionEx",
",",
"3",
",",
"8",
")"
]
| Checks whether the module supports the usage of USB-CANnetwork driver.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module does support the usage of the USB-CANnetwork driver, otherwise False.
:rtype: bool | [
"Checks",
"whether",
"the",
"module",
"supports",
"the",
"usage",
"of",
"USB",
"-",
"CANnetwork",
"driver",
"."
]
| python | train | 49 |
MacHu-GWU/angora-project | angora/math/img2waveform.py | https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/math/img2waveform.py#L72-L261 | def img2ascii(img_path, ascii_path, ascii_char="*", pad=0):
"""Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform\rabbit.png",
... r"testdata\img2waveform\asciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int
"""
if len(ascii_char) != 1:
raise Exception("ascii_char has to be single character.")
image = Image.open(img_path).convert("L")
matrix = np.array(image)
# you can customize the gray scale fix behavior to fit color image
matrix[np.where(matrix >= 128)] = 255
matrix[np.where(matrix < 128)] = 0
lines = list()
for vector in matrix:
line = list()
for i in vector:
line.append(" " * pad)
if i:
line.append(" ")
else:
line.append(ascii_char)
lines.append("".join(line))
with open(ascii_path, "w") as f:
f.write("\n".join(lines)) | [
"def",
"img2ascii",
"(",
"img_path",
",",
"ascii_path",
",",
"ascii_char",
"=",
"\"*\"",
",",
"pad",
"=",
"0",
")",
":",
"if",
"len",
"(",
"ascii_char",
")",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"\"ascii_char has to be single character.\"",
")",
"image",
"=",
"Image",
".",
"open",
"(",
"img_path",
")",
".",
"convert",
"(",
"\"L\"",
")",
"matrix",
"=",
"np",
".",
"array",
"(",
"image",
")",
"# you can customize the gray scale fix behavior to fit color image",
"matrix",
"[",
"np",
".",
"where",
"(",
"matrix",
">=",
"128",
")",
"]",
"=",
"255",
"matrix",
"[",
"np",
".",
"where",
"(",
"matrix",
"<",
"128",
")",
"]",
"=",
"0",
"lines",
"=",
"list",
"(",
")",
"for",
"vector",
"in",
"matrix",
":",
"line",
"=",
"list",
"(",
")",
"for",
"i",
"in",
"vector",
":",
"line",
".",
"append",
"(",
"\" \"",
"*",
"pad",
")",
"if",
"i",
":",
"line",
".",
"append",
"(",
"\" \"",
")",
"else",
":",
"line",
".",
"append",
"(",
"ascii_char",
")",
"lines",
".",
"append",
"(",
"\"\"",
".",
"join",
"(",
"line",
")",
")",
"with",
"open",
"(",
"ascii_path",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
")"
]
| Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform\rabbit.png",
... r"testdata\img2waveform\asciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int | [
"Convert",
"an",
"image",
"to",
"ascii",
"art",
"text",
"."
]
| python | train | 72.510526 |
espressif/esptool | ecdsa/ecdsa.py | https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/ecdsa/ecdsa.py#L93-L111 | def verifies( self, hash, signature ):
"""Verify that signature is a valid signature of hash.
Return True if the signature is valid.
"""
# From X9.62 J.3.1.
G = self.generator
n = G.order()
r = signature.r
s = signature.s
if r < 1 or r > n-1: return False
if s < 1 or s > n-1: return False
c = numbertheory.inverse_mod( s, n )
u1 = ( hash * c ) % n
u2 = ( r * c ) % n
xy = u1 * G + u2 * self.point
v = xy.x() % n
return v == r | [
"def",
"verifies",
"(",
"self",
",",
"hash",
",",
"signature",
")",
":",
"# From X9.62 J.3.1.",
"G",
"=",
"self",
".",
"generator",
"n",
"=",
"G",
".",
"order",
"(",
")",
"r",
"=",
"signature",
".",
"r",
"s",
"=",
"signature",
".",
"s",
"if",
"r",
"<",
"1",
"or",
"r",
">",
"n",
"-",
"1",
":",
"return",
"False",
"if",
"s",
"<",
"1",
"or",
"s",
">",
"n",
"-",
"1",
":",
"return",
"False",
"c",
"=",
"numbertheory",
".",
"inverse_mod",
"(",
"s",
",",
"n",
")",
"u1",
"=",
"(",
"hash",
"*",
"c",
")",
"%",
"n",
"u2",
"=",
"(",
"r",
"*",
"c",
")",
"%",
"n",
"xy",
"=",
"u1",
"*",
"G",
"+",
"u2",
"*",
"self",
".",
"point",
"v",
"=",
"xy",
".",
"x",
"(",
")",
"%",
"n",
"return",
"v",
"==",
"r"
]
| Verify that signature is a valid signature of hash.
Return True if the signature is valid. | [
"Verify",
"that",
"signature",
"is",
"a",
"valid",
"signature",
"of",
"hash",
".",
"Return",
"True",
"if",
"the",
"signature",
"is",
"valid",
"."
]
| python | train | 24.947368 |
wonambi-python/wonambi | wonambi/detect/spindle.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/detect/spindle.py#L1998-L2026 | def _detect_start_end(true_values):
"""From ndarray of bool values, return intervals of True values.
Parameters
----------
true_values : ndarray (dtype='bool')
array with bool values
Returns
-------
ndarray (dtype='int')
N x 2 matrix with starting and ending times.
"""
neg = zeros((1), dtype='bool')
int_values = asarray(concatenate((neg, true_values[:-1], neg)),
dtype='int')
# must discard last value to avoid axis out of bounds
cross_threshold = diff(int_values)
event_starts = where(cross_threshold == 1)[0]
event_ends = where(cross_threshold == -1)[0]
if len(event_starts):
events = vstack((event_starts, event_ends)).T
else:
events = None
return events | [
"def",
"_detect_start_end",
"(",
"true_values",
")",
":",
"neg",
"=",
"zeros",
"(",
"(",
"1",
")",
",",
"dtype",
"=",
"'bool'",
")",
"int_values",
"=",
"asarray",
"(",
"concatenate",
"(",
"(",
"neg",
",",
"true_values",
"[",
":",
"-",
"1",
"]",
",",
"neg",
")",
")",
",",
"dtype",
"=",
"'int'",
")",
"# must discard last value to avoid axis out of bounds",
"cross_threshold",
"=",
"diff",
"(",
"int_values",
")",
"event_starts",
"=",
"where",
"(",
"cross_threshold",
"==",
"1",
")",
"[",
"0",
"]",
"event_ends",
"=",
"where",
"(",
"cross_threshold",
"==",
"-",
"1",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"event_starts",
")",
":",
"events",
"=",
"vstack",
"(",
"(",
"event_starts",
",",
"event_ends",
")",
")",
".",
"T",
"else",
":",
"events",
"=",
"None",
"return",
"events"
]
| From ndarray of bool values, return intervals of True values.
Parameters
----------
true_values : ndarray (dtype='bool')
array with bool values
Returns
-------
ndarray (dtype='int')
N x 2 matrix with starting and ending times. | [
"From",
"ndarray",
"of",
"bool",
"values",
"return",
"intervals",
"of",
"True",
"values",
"."
]
| python | train | 26.310345 |
PyCQA/pyflakes | pyflakes/api.py | https://github.com/PyCQA/pyflakes/blob/232cb1d27ee134bf96adc8f37e53589dc259b159/pyflakes/api.py#L102-L122 | def isPythonFile(filename):
"""Return True if filename points to a Python file."""
if filename.endswith('.py'):
return True
# Avoid obvious Emacs backup files
if filename.endswith("~"):
return False
max_bytes = 128
try:
with open(filename, 'rb') as f:
text = f.read(max_bytes)
if not text:
return False
except IOError:
return False
first_line = text.splitlines()[0]
return PYTHON_SHEBANG_REGEX.match(first_line) | [
"def",
"isPythonFile",
"(",
"filename",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.py'",
")",
":",
"return",
"True",
"# Avoid obvious Emacs backup files",
"if",
"filename",
".",
"endswith",
"(",
"\"~\"",
")",
":",
"return",
"False",
"max_bytes",
"=",
"128",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"text",
"=",
"f",
".",
"read",
"(",
"max_bytes",
")",
"if",
"not",
"text",
":",
"return",
"False",
"except",
"IOError",
":",
"return",
"False",
"first_line",
"=",
"text",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"return",
"PYTHON_SHEBANG_REGEX",
".",
"match",
"(",
"first_line",
")"
]
| Return True if filename points to a Python file. | [
"Return",
"True",
"if",
"filename",
"points",
"to",
"a",
"Python",
"file",
"."
]
| python | train | 23.952381 |
JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/reftrackitemdata.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/reftrackitemdata.py#L149-L165 | def reftrack_status_data(rt, role):
"""Return the data for the status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the status
:rtype: depending on role
:raises: None
"""
status = rt.status()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if status:
return status
else:
return "Not in scene!" | [
"def",
"reftrack_status_data",
"(",
"rt",
",",
"role",
")",
":",
"status",
"=",
"rt",
".",
"status",
"(",
")",
"if",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
"or",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"EditRole",
":",
"if",
"status",
":",
"return",
"status",
"else",
":",
"return",
"\"Not in scene!\""
]
| Return the data for the status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the status
:rtype: depending on role
:raises: None | [
"Return",
"the",
"data",
"for",
"the",
"status"
]
| python | train | 31.294118 |
UCL-INGI/INGInious | inginious/frontend/session_mongodb.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/session_mongodb.py#L22-L71 | def needs_encode(obj):
'''
>>> from re import compile
>>> atomics = (True, 1, 1.0, '', None, compile(''), datetime.now(), b'')
>>> any(needs_encode(i) for i in atomics)
False
>>> needs_encode([1, 2, 3])
False
>>> needs_encode([])
False
>>> needs_encode([1, [2, 3]])
False
>>> needs_encode({})
False
>>> needs_encode({'1': {'2': 3}})
False
>>> needs_encode({'1': [2]})
False
>>> needs_encode(b'1')
False
Objects that don't round trip need encoding::
>>> needs_encode(tuple())
True
>>> needs_encode(set())
True
>>> needs_encode([1, [set()]])
True
>>> needs_encode({'1': {'2': set()}})
True
Mongo rejects dicts with non-string keys so they need encoding too::
>>> needs_encode({1: 2})
True
>>> needs_encode({'1': {None: True}})
True
'''
obtype = type(obj)
if obtype in atomic_types:
return False
if obtype is list:
return any(needs_encode(i) for i in obj)
if obtype is dict:
return any(type(k) not in valid_key_types or needs_encode(v)
for (k, v) in obj.items())
return True | [
"def",
"needs_encode",
"(",
"obj",
")",
":",
"obtype",
"=",
"type",
"(",
"obj",
")",
"if",
"obtype",
"in",
"atomic_types",
":",
"return",
"False",
"if",
"obtype",
"is",
"list",
":",
"return",
"any",
"(",
"needs_encode",
"(",
"i",
")",
"for",
"i",
"in",
"obj",
")",
"if",
"obtype",
"is",
"dict",
":",
"return",
"any",
"(",
"type",
"(",
"k",
")",
"not",
"in",
"valid_key_types",
"or",
"needs_encode",
"(",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"obj",
".",
"items",
"(",
")",
")",
"return",
"True"
]
| >>> from re import compile
>>> atomics = (True, 1, 1.0, '', None, compile(''), datetime.now(), b'')
>>> any(needs_encode(i) for i in atomics)
False
>>> needs_encode([1, 2, 3])
False
>>> needs_encode([])
False
>>> needs_encode([1, [2, 3]])
False
>>> needs_encode({})
False
>>> needs_encode({'1': {'2': 3}})
False
>>> needs_encode({'1': [2]})
False
>>> needs_encode(b'1')
False
Objects that don't round trip need encoding::
>>> needs_encode(tuple())
True
>>> needs_encode(set())
True
>>> needs_encode([1, [set()]])
True
>>> needs_encode({'1': {'2': set()}})
True
Mongo rejects dicts with non-string keys so they need encoding too::
>>> needs_encode({1: 2})
True
>>> needs_encode({'1': {None: True}})
True | [
">>>",
"from",
"re",
"import",
"compile",
">>>",
"atomics",
"=",
"(",
"True",
"1",
"1",
".",
"0",
"None",
"compile",
"(",
")",
"datetime",
".",
"now",
"()",
"b",
")",
">>>",
"any",
"(",
"needs_encode",
"(",
"i",
")",
"for",
"i",
"in",
"atomics",
")",
"False",
">>>",
"needs_encode",
"(",
"[",
"1",
"2",
"3",
"]",
")",
"False",
">>>",
"needs_encode",
"(",
"[]",
")",
"False",
">>>",
"needs_encode",
"(",
"[",
"1",
"[",
"2",
"3",
"]]",
")",
"False",
">>>",
"needs_encode",
"(",
"{}",
")",
"False",
">>>",
"needs_encode",
"(",
"{",
"1",
":",
"{",
"2",
":",
"3",
"}}",
")",
"False",
">>>",
"needs_encode",
"(",
"{",
"1",
":",
"[",
"2",
"]",
"}",
")",
"False",
">>>",
"needs_encode",
"(",
"b",
"1",
")",
"False"
]
| python | train | 22.54 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.