repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
CivicSpleen/ambry
|
ambry/etl/pipeline.py
|
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/etl/pipeline.py#L2404-L2420
|
def _to_ascii(s):
""" Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
"""
# TODO: Always use unicode within ambry.
from six import text_type, binary_type
if isinstance(s, text_type):
ascii_ = s.encode('ascii', 'ignore')
elif isinstance(s, binary_type):
ascii_ = s.decode('utf-8').encode('ascii', 'ignore')
else:
raise Exception('Unknown text type - {}'.format(type(s)))
return ascii_
|
[
"def",
"_to_ascii",
"(",
"s",
")",
":",
"# TODO: Always use unicode within ambry.",
"from",
"six",
"import",
"text_type",
",",
"binary_type",
"if",
"isinstance",
"(",
"s",
",",
"text_type",
")",
":",
"ascii_",
"=",
"s",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"elif",
"isinstance",
"(",
"s",
",",
"binary_type",
")",
":",
"ascii_",
"=",
"s",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown text type - {}'",
".",
"format",
"(",
"type",
"(",
"s",
")",
")",
")",
"return",
"ascii_"
] |
Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
|
[
"Converts",
"given",
"string",
"to",
"ascii",
"ignoring",
"non",
"ascii",
".",
"Args",
":",
"s",
"(",
"text",
"or",
"binary",
")",
":"
] |
python
|
train
| 28.882353 |
bitcraze/crazyflie-lib-python
|
cflib/crazyflie/param.py
|
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/param.py#L340-L350
|
def request_param_update(self, var_id):
"""Place a param update request on the queue"""
self._useV2 = self.cf.platform.get_protocol_version() >= 4
pk = CRTPPacket()
pk.set_header(CRTPPort.PARAM, READ_CHANNEL)
if self._useV2:
pk.data = struct.pack('<H', var_id)
else:
pk.data = struct.pack('<B', var_id)
logger.debug('Requesting request to update param [%d]', var_id)
self.request_queue.put(pk)
|
[
"def",
"request_param_update",
"(",
"self",
",",
"var_id",
")",
":",
"self",
".",
"_useV2",
"=",
"self",
".",
"cf",
".",
"platform",
".",
"get_protocol_version",
"(",
")",
">=",
"4",
"pk",
"=",
"CRTPPacket",
"(",
")",
"pk",
".",
"set_header",
"(",
"CRTPPort",
".",
"PARAM",
",",
"READ_CHANNEL",
")",
"if",
"self",
".",
"_useV2",
":",
"pk",
".",
"data",
"=",
"struct",
".",
"pack",
"(",
"'<H'",
",",
"var_id",
")",
"else",
":",
"pk",
".",
"data",
"=",
"struct",
".",
"pack",
"(",
"'<B'",
",",
"var_id",
")",
"logger",
".",
"debug",
"(",
"'Requesting request to update param [%d]'",
",",
"var_id",
")",
"self",
".",
"request_queue",
".",
"put",
"(",
"pk",
")"
] |
Place a param update request on the queue
|
[
"Place",
"a",
"param",
"update",
"request",
"on",
"the",
"queue"
] |
python
|
train
| 42.818182 |
pricingassistant/mrq
|
mrq/queue_raw.py
|
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L97-L117
|
def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list))
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list)
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k)
context.metric("queues.%s.removed" % self.id, len(params_list))
context.metric("queues.all.removed", len(params_list))
|
[
"def",
"remove_raw_jobs",
"(",
"self",
",",
"params_list",
")",
":",
"if",
"len",
"(",
"params_list",
")",
"==",
"0",
":",
"return",
"# ZSET",
"if",
"self",
".",
"is_sorted",
":",
"context",
".",
"connections",
".",
"redis",
".",
"zrem",
"(",
"self",
".",
"redis_key",
",",
"*",
"iter",
"(",
"params_list",
")",
")",
"# SET",
"elif",
"self",
".",
"is_set",
":",
"context",
".",
"connections",
".",
"redis",
".",
"srem",
"(",
"self",
".",
"redis_key",
",",
"*",
"params_list",
")",
"else",
":",
"# O(n)! Use with caution.",
"for",
"k",
"in",
"params_list",
":",
"context",
".",
"connections",
".",
"redis",
".",
"lrem",
"(",
"self",
".",
"redis_key",
",",
"1",
",",
"k",
")",
"context",
".",
"metric",
"(",
"\"queues.%s.removed\"",
"%",
"self",
".",
"id",
",",
"len",
"(",
"params_list",
")",
")",
"context",
".",
"metric",
"(",
"\"queues.all.removed\"",
",",
"len",
"(",
"params_list",
")",
")"
] |
Remove jobs from a raw queue with their raw params.
|
[
"Remove",
"jobs",
"from",
"a",
"raw",
"queue",
"with",
"their",
"raw",
"params",
"."
] |
python
|
train
| 31.857143 |
OLC-Bioinformatics/ConFindr
|
confindr_src/confindr.py
|
https://github.com/OLC-Bioinformatics/ConFindr/blob/4c292617c3f270ebd5ff138cbc5a107f6d01200d/confindr_src/confindr.py#L370-L403
|
def find_rmlst_type(kma_report, rmlst_report):
"""
Uses a report generated by KMA to determine what allele is present for each rMLST gene.
:param kma_report: The .res report generated by KMA.
:param rmlst_report: rMLST report file to write information to.
:return: a sorted list of loci present, in format gene_allele
"""
genes_to_use = dict()
score_dict = dict()
gene_alleles = list()
with open(kma_report) as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t')
for row in reader:
gene_allele = row['#Template']
score = int(row['Score'])
gene = gene_allele.split('_')[0]
allele = gene_allele.split('_')[1]
if gene not in score_dict:
score_dict[gene] = score
genes_to_use[gene] = allele
else:
if score > score_dict[gene]:
score_dict[gene] = score
genes_to_use[gene] = allele
for gene in genes_to_use:
gene_alleles.append(gene + '_' + genes_to_use[gene].replace(' ', ''))
gene_alleles = sorted(gene_alleles)
with open(rmlst_report, 'w') as f:
f.write('Gene,Allele\n')
for gene_allele in gene_alleles:
gene = gene_allele.split('_')[0]
allele = gene_allele.split('_')[1]
f.write('{},{}\n'.format(gene, allele))
return gene_alleles
|
[
"def",
"find_rmlst_type",
"(",
"kma_report",
",",
"rmlst_report",
")",
":",
"genes_to_use",
"=",
"dict",
"(",
")",
"score_dict",
"=",
"dict",
"(",
")",
"gene_alleles",
"=",
"list",
"(",
")",
"with",
"open",
"(",
"kma_report",
")",
"as",
"tsvfile",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"tsvfile",
",",
"delimiter",
"=",
"'\\t'",
")",
"for",
"row",
"in",
"reader",
":",
"gene_allele",
"=",
"row",
"[",
"'#Template'",
"]",
"score",
"=",
"int",
"(",
"row",
"[",
"'Score'",
"]",
")",
"gene",
"=",
"gene_allele",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"allele",
"=",
"gene_allele",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
"if",
"gene",
"not",
"in",
"score_dict",
":",
"score_dict",
"[",
"gene",
"]",
"=",
"score",
"genes_to_use",
"[",
"gene",
"]",
"=",
"allele",
"else",
":",
"if",
"score",
">",
"score_dict",
"[",
"gene",
"]",
":",
"score_dict",
"[",
"gene",
"]",
"=",
"score",
"genes_to_use",
"[",
"gene",
"]",
"=",
"allele",
"for",
"gene",
"in",
"genes_to_use",
":",
"gene_alleles",
".",
"append",
"(",
"gene",
"+",
"'_'",
"+",
"genes_to_use",
"[",
"gene",
"]",
".",
"replace",
"(",
"' '",
",",
"''",
")",
")",
"gene_alleles",
"=",
"sorted",
"(",
"gene_alleles",
")",
"with",
"open",
"(",
"rmlst_report",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'Gene,Allele\\n'",
")",
"for",
"gene_allele",
"in",
"gene_alleles",
":",
"gene",
"=",
"gene_allele",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"allele",
"=",
"gene_allele",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
"f",
".",
"write",
"(",
"'{},{}\\n'",
".",
"format",
"(",
"gene",
",",
"allele",
")",
")",
"return",
"gene_alleles"
] |
Uses a report generated by KMA to determine what allele is present for each rMLST gene.
:param kma_report: The .res report generated by KMA.
:param rmlst_report: rMLST report file to write information to.
:return: a sorted list of loci present, in format gene_allele
|
[
"Uses",
"a",
"report",
"generated",
"by",
"KMA",
"to",
"determine",
"what",
"allele",
"is",
"present",
"for",
"each",
"rMLST",
"gene",
".",
":",
"param",
"kma_report",
":",
"The",
".",
"res",
"report",
"generated",
"by",
"KMA",
".",
":",
"param",
"rmlst_report",
":",
"rMLST",
"report",
"file",
"to",
"write",
"information",
"to",
".",
":",
"return",
":",
"a",
"sorted",
"list",
"of",
"loci",
"present",
"in",
"format",
"gene_allele"
] |
python
|
train
| 40.941176 |
awslabs/sockeye
|
sockeye_contrib/autopilot/autopilot.py
|
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye_contrib/autopilot/autopilot.py#L181-L231
|
def populate_parallel_text(extract_dir: str,
file_sets: List[Tuple[str, str, str]],
dest_prefix: str,
keep_separate: bool,
head_n: int = 0):
"""
Create raw parallel train, dev, or test files with a given prefix.
:param extract_dir: Directory where raw files (inputs) are extracted.
:param file_sets: Sets of files to use.
:param dest_prefix: Prefix for output files.
:param keep_separate: True if each file set (source-target pair) should have
its own file (used for test sets).
:param head_n: If N>0, use only the first N lines (used in test mode).
"""
source_out = None # type: IO[Any]
target_out = None # type: IO[Any]
lines_written = 0
# Single output file for each side
if not keep_separate:
source_dest = dest_prefix + SUFFIX_SRC_GZ
target_dest = dest_prefix + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for i, (source_fname, target_fname, text_type) in enumerate(file_sets):
# One output file per input file for each side
if keep_separate:
if source_out:
source_out.close()
if target_out:
target_out.close()
source_dest = dest_prefix + str(i) + "." + SUFFIX_SRC_GZ
target_dest = dest_prefix + str(i) + "." + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for source_line, target_line in zip(
plain_text_iter(os.path.join(extract_dir, source_fname), text_type, DATA_SRC),
plain_text_iter(os.path.join(extract_dir, target_fname), text_type, DATA_TRG)):
# Only write N lines total if requested, but reset per file when
# keeping files separate
if head_n > 0 and lines_written >= head_n:
if keep_separate:
lines_written = 0
break
source_out.write("{}\n".format(source_line))
target_out.write("{}\n".format(target_line))
lines_written += 1
source_out.close()
target_out.close()
|
[
"def",
"populate_parallel_text",
"(",
"extract_dir",
":",
"str",
",",
"file_sets",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
",",
"str",
"]",
"]",
",",
"dest_prefix",
":",
"str",
",",
"keep_separate",
":",
"bool",
",",
"head_n",
":",
"int",
"=",
"0",
")",
":",
"source_out",
"=",
"None",
"# type: IO[Any]",
"target_out",
"=",
"None",
"# type: IO[Any]",
"lines_written",
"=",
"0",
"# Single output file for each side",
"if",
"not",
"keep_separate",
":",
"source_dest",
"=",
"dest_prefix",
"+",
"SUFFIX_SRC_GZ",
"target_dest",
"=",
"dest_prefix",
"+",
"SUFFIX_TRG_GZ",
"logging",
".",
"info",
"(",
"\"Populate: %s %s\"",
",",
"source_dest",
",",
"target_dest",
")",
"source_out",
"=",
"gzip",
".",
"open",
"(",
"source_dest",
",",
"\"wt\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"target_out",
"=",
"gzip",
".",
"open",
"(",
"target_dest",
",",
"\"wt\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"for",
"i",
",",
"(",
"source_fname",
",",
"target_fname",
",",
"text_type",
")",
"in",
"enumerate",
"(",
"file_sets",
")",
":",
"# One output file per input file for each side",
"if",
"keep_separate",
":",
"if",
"source_out",
":",
"source_out",
".",
"close",
"(",
")",
"if",
"target_out",
":",
"target_out",
".",
"close",
"(",
")",
"source_dest",
"=",
"dest_prefix",
"+",
"str",
"(",
"i",
")",
"+",
"\".\"",
"+",
"SUFFIX_SRC_GZ",
"target_dest",
"=",
"dest_prefix",
"+",
"str",
"(",
"i",
")",
"+",
"\".\"",
"+",
"SUFFIX_TRG_GZ",
"logging",
".",
"info",
"(",
"\"Populate: %s %s\"",
",",
"source_dest",
",",
"target_dest",
")",
"source_out",
"=",
"gzip",
".",
"open",
"(",
"source_dest",
",",
"\"wt\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"target_out",
"=",
"gzip",
".",
"open",
"(",
"target_dest",
",",
"\"wt\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"for",
"source_line",
",",
"target_line",
"in",
"zip",
"(",
"plain_text_iter",
"(",
"os",
".",
"path",
".",
"join",
"(",
"extract_dir",
",",
"source_fname",
")",
",",
"text_type",
",",
"DATA_SRC",
")",
",",
"plain_text_iter",
"(",
"os",
".",
"path",
".",
"join",
"(",
"extract_dir",
",",
"target_fname",
")",
",",
"text_type",
",",
"DATA_TRG",
")",
")",
":",
"# Only write N lines total if requested, but reset per file when",
"# keeping files separate",
"if",
"head_n",
">",
"0",
"and",
"lines_written",
">=",
"head_n",
":",
"if",
"keep_separate",
":",
"lines_written",
"=",
"0",
"break",
"source_out",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"source_line",
")",
")",
"target_out",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"target_line",
")",
")",
"lines_written",
"+=",
"1",
"source_out",
".",
"close",
"(",
")",
"target_out",
".",
"close",
"(",
")"
] |
Create raw parallel train, dev, or test files with a given prefix.
:param extract_dir: Directory where raw files (inputs) are extracted.
:param file_sets: Sets of files to use.
:param dest_prefix: Prefix for output files.
:param keep_separate: True if each file set (source-target pair) should have
its own file (used for test sets).
:param head_n: If N>0, use only the first N lines (used in test mode).
|
[
"Create",
"raw",
"parallel",
"train",
"dev",
"or",
"test",
"files",
"with",
"a",
"given",
"prefix",
"."
] |
python
|
train
| 48.235294 |
SwissDataScienceCenter/renku-python
|
renku/cli/_format/graph.py
|
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/_format/graph.py#L86-L175
|
def _rdf2dot_simple(g, stream):
"""Create a simple graph of processes and artifacts."""
from itertools import chain
import re
path_re = re.compile(
r'file:///(?P<type>[a-zA-Z]+)/'
r'(?P<commit>\w+)'
r'(?P<path>.+)?'
)
inputs = g.query(
"""
SELECT ?input ?role ?activity ?comment
WHERE {
?activity (prov:qualifiedUsage/prov:entity) ?input .
?activity prov:qualifiedUsage ?qual .
?qual prov:hadRole ?role .
?qual prov:entity ?input .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun .
?activity rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
outputs = g.query(
"""
SELECT ?activity ?role ?output ?comment
WHERE {
?output (prov:qualifiedGeneration/prov:activity) ?activity .
?output prov:qualifiedGeneration ?qual .
?qual prov:hadRole ?role .
?qual prov:activity ?activity .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun ;
rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
activity_nodes = {}
artifact_nodes = {}
for source, role, target, comment, in chain(inputs, outputs):
# extract the pieces of the process URI
src_path = path_re.match(source).groupdict()
tgt_path = path_re.match(target).groupdict()
# write the edge
stream.write(
'\t"{src_commit}:{src_path}" -> '
'"{tgt_commit}:{tgt_path}" '
'[label={role}] \n'.format(
src_commit=src_path['commit'][:5],
src_path=src_path.get('path') or '',
tgt_commit=tgt_path['commit'][:5],
tgt_path=tgt_path.get('path') or '',
role=role
)
)
if src_path.get('type') == 'commit':
activity_nodes.setdefault(source, {'comment': comment})
artifact_nodes.setdefault(target, {})
if tgt_path.get('type') == 'commit':
activity_nodes.setdefault(target, {'comment': comment})
artifact_nodes.setdefault(source, {})
# customize the nodes
for node, content in activity_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[shape=box label="#{commit}:{path}:{comment}"] \n'.format(
comment=content['comment'],
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
for node, content in artifact_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[label="#{commit}:{path}"] \n'.format(
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
stream.write('}\n')
|
[
"def",
"_rdf2dot_simple",
"(",
"g",
",",
"stream",
")",
":",
"from",
"itertools",
"import",
"chain",
"import",
"re",
"path_re",
"=",
"re",
".",
"compile",
"(",
"r'file:///(?P<type>[a-zA-Z]+)/'",
"r'(?P<commit>\\w+)'",
"r'(?P<path>.+)?'",
")",
"inputs",
"=",
"g",
".",
"query",
"(",
"\"\"\"\n SELECT ?input ?role ?activity ?comment\n WHERE {\n ?activity (prov:qualifiedUsage/prov:entity) ?input .\n ?activity prov:qualifiedUsage ?qual .\n ?qual prov:hadRole ?role .\n ?qual prov:entity ?input .\n ?qual rdf:type ?type .\n ?activity rdf:type wfprov:ProcessRun .\n ?activity rdfs:comment ?comment .\n FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}\n }\n \"\"\"",
")",
"outputs",
"=",
"g",
".",
"query",
"(",
"\"\"\"\n SELECT ?activity ?role ?output ?comment\n WHERE {\n ?output (prov:qualifiedGeneration/prov:activity) ?activity .\n ?output prov:qualifiedGeneration ?qual .\n ?qual prov:hadRole ?role .\n ?qual prov:activity ?activity .\n ?qual rdf:type ?type .\n ?activity rdf:type wfprov:ProcessRun ;\n rdfs:comment ?comment .\n FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}\n }\n \"\"\"",
")",
"activity_nodes",
"=",
"{",
"}",
"artifact_nodes",
"=",
"{",
"}",
"for",
"source",
",",
"role",
",",
"target",
",",
"comment",
",",
"in",
"chain",
"(",
"inputs",
",",
"outputs",
")",
":",
"# extract the pieces of the process URI",
"src_path",
"=",
"path_re",
".",
"match",
"(",
"source",
")",
".",
"groupdict",
"(",
")",
"tgt_path",
"=",
"path_re",
".",
"match",
"(",
"target",
")",
".",
"groupdict",
"(",
")",
"# write the edge",
"stream",
".",
"write",
"(",
"'\\t\"{src_commit}:{src_path}\" -> '",
"'\"{tgt_commit}:{tgt_path}\" '",
"'[label={role}] \\n'",
".",
"format",
"(",
"src_commit",
"=",
"src_path",
"[",
"'commit'",
"]",
"[",
":",
"5",
"]",
",",
"src_path",
"=",
"src_path",
".",
"get",
"(",
"'path'",
")",
"or",
"''",
",",
"tgt_commit",
"=",
"tgt_path",
"[",
"'commit'",
"]",
"[",
":",
"5",
"]",
",",
"tgt_path",
"=",
"tgt_path",
".",
"get",
"(",
"'path'",
")",
"or",
"''",
",",
"role",
"=",
"role",
")",
")",
"if",
"src_path",
".",
"get",
"(",
"'type'",
")",
"==",
"'commit'",
":",
"activity_nodes",
".",
"setdefault",
"(",
"source",
",",
"{",
"'comment'",
":",
"comment",
"}",
")",
"artifact_nodes",
".",
"setdefault",
"(",
"target",
",",
"{",
"}",
")",
"if",
"tgt_path",
".",
"get",
"(",
"'type'",
")",
"==",
"'commit'",
":",
"activity_nodes",
".",
"setdefault",
"(",
"target",
",",
"{",
"'comment'",
":",
"comment",
"}",
")",
"artifact_nodes",
".",
"setdefault",
"(",
"source",
",",
"{",
"}",
")",
"# customize the nodes",
"for",
"node",
",",
"content",
"in",
"activity_nodes",
".",
"items",
"(",
")",
":",
"node_path",
"=",
"path_re",
".",
"match",
"(",
"node",
")",
".",
"groupdict",
"(",
")",
"stream",
".",
"write",
"(",
"'\\t\"{commit}:{path}\" '",
"'[shape=box label=\"#{commit}:{path}:{comment}\"] \\n'",
".",
"format",
"(",
"comment",
"=",
"content",
"[",
"'comment'",
"]",
",",
"commit",
"=",
"node_path",
"[",
"'commit'",
"]",
"[",
":",
"5",
"]",
",",
"path",
"=",
"node_path",
".",
"get",
"(",
"'path'",
")",
"or",
"''",
")",
")",
"for",
"node",
",",
"content",
"in",
"artifact_nodes",
".",
"items",
"(",
")",
":",
"node_path",
"=",
"path_re",
".",
"match",
"(",
"node",
")",
".",
"groupdict",
"(",
")",
"stream",
".",
"write",
"(",
"'\\t\"{commit}:{path}\" '",
"'[label=\"#{commit}:{path}\"] \\n'",
".",
"format",
"(",
"commit",
"=",
"node_path",
"[",
"'commit'",
"]",
"[",
":",
"5",
"]",
",",
"path",
"=",
"node_path",
".",
"get",
"(",
"'path'",
")",
"or",
"''",
")",
")",
"stream",
".",
"write",
"(",
"'}\\n'",
")"
] |
Create a simple graph of processes and artifacts.
|
[
"Create",
"a",
"simple",
"graph",
"of",
"processes",
"and",
"artifacts",
"."
] |
python
|
train
| 33.755556 |
youversion/crony
|
crony/crony.py
|
https://github.com/youversion/crony/blob/c93d14b809a2e878f1b9d6d53d5a04947896583b/crony/crony.py#L76-L100
|
def cronitor(self):
"""Wrap run with requests to cronitor."""
url = f'https://cronitor.link/{self.opts.cronitor}/{{}}'
try:
run_url = url.format('run')
self.logger.debug(f'Pinging {run_url}')
requests.get(run_url, timeout=self.opts.timeout)
except requests.exceptions.RequestException as e:
self.logger.exception(e)
# Cronitor may be having an outage, but we still want to run our stuff
output, exit_status = self.run()
endpoint = 'complete' if exit_status == 0 else 'fail'
try:
ping_url = url.format(endpoint)
self.logger.debug('Pinging {}'.format(ping_url))
requests.get(ping_url, timeout=self.opts.timeout)
except requests.exceptions.RequestException as e:
self.logger.exception(e)
return output, exit_status
|
[
"def",
"cronitor",
"(",
"self",
")",
":",
"url",
"=",
"f'https://cronitor.link/{self.opts.cronitor}/{{}}'",
"try",
":",
"run_url",
"=",
"url",
".",
"format",
"(",
"'run'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f'Pinging {run_url}'",
")",
"requests",
".",
"get",
"(",
"run_url",
",",
"timeout",
"=",
"self",
".",
"opts",
".",
"timeout",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"e",
")",
"# Cronitor may be having an outage, but we still want to run our stuff",
"output",
",",
"exit_status",
"=",
"self",
".",
"run",
"(",
")",
"endpoint",
"=",
"'complete'",
"if",
"exit_status",
"==",
"0",
"else",
"'fail'",
"try",
":",
"ping_url",
"=",
"url",
".",
"format",
"(",
"endpoint",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Pinging {}'",
".",
"format",
"(",
"ping_url",
")",
")",
"requests",
".",
"get",
"(",
"ping_url",
",",
"timeout",
"=",
"self",
".",
"opts",
".",
"timeout",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"e",
")",
"return",
"output",
",",
"exit_status"
] |
Wrap run with requests to cronitor.
|
[
"Wrap",
"run",
"with",
"requests",
"to",
"cronitor",
"."
] |
python
|
train
| 34.76 |
PureStorage-OpenConnect/rest-client
|
purestorage/purestorage.py
|
https://github.com/PureStorage-OpenConnect/rest-client/blob/097d5f2bc6facf607d7e4a92567b09fb8cf5cb34/purestorage/purestorage.py#L1496-L1518
|
def create_vlan_interface(self, interface, subnet, **kwargs):
"""Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
data = {"subnet": subnet}
data.update(kwargs)
return self._request("POST", "network/vif/{0}".format(interface), data)
|
[
"def",
"create_vlan_interface",
"(",
"self",
",",
"interface",
",",
"subnet",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"{",
"\"subnet\"",
":",
"subnet",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_request",
"(",
"\"POST\"",
",",
"\"network/vif/{0}\"",
".",
"format",
"(",
"interface",
")",
",",
"data",
")"
] |
Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
|
[
"Create",
"a",
"vlan",
"interface"
] |
python
|
train
| 35.086957 |
estnltk/estnltk
|
estnltk/wordnet/wn.py
|
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L864-L873
|
def synset(self):
"""Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
"""
return synset('%s.%s.%s.%s'%(self.synset_literal,self.synset_pos,self.synset_sense,self.literal))
|
[
"def",
"synset",
"(",
"self",
")",
":",
"return",
"synset",
"(",
"'%s.%s.%s.%s'",
"%",
"(",
"self",
".",
"synset_literal",
",",
"self",
".",
"synset_pos",
",",
"self",
".",
"synset_sense",
",",
"self",
".",
"literal",
")",
")"
] |
Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
|
[
"Returns",
"synset",
"into",
"which",
"the",
"given",
"lemma",
"belongs",
"to",
".",
"Returns",
"-------",
"Synset",
"Synset",
"into",
"which",
"the",
"given",
"lemma",
"belongs",
"to",
"."
] |
python
|
train
| 31.6 |
fboender/ansible-cmdb
|
lib/mako/codegen.py
|
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/codegen.py#L267-L316
|
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.start_source(node.lineno)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write_blanks(2)
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
|
[
"def",
"write_render_callable",
"(",
"self",
",",
"node",
",",
"name",
",",
"args",
",",
"buffered",
",",
"filtered",
",",
"cached",
")",
":",
"if",
"self",
".",
"in_def",
":",
"decorator",
"=",
"node",
".",
"decorator",
"if",
"decorator",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"@runtime._decorate_toplevel(%s)\"",
"%",
"decorator",
")",
"self",
".",
"printer",
".",
"start_source",
"(",
"node",
".",
"lineno",
")",
"self",
".",
"printer",
".",
"writelines",
"(",
"\"def %s(%s):\"",
"%",
"(",
"name",
",",
"','",
".",
"join",
"(",
"args",
")",
")",
",",
"# push new frame, assign current frame to __M_caller",
"\"__M_caller = context.caller_stack._push_frame()\"",
",",
"\"try:\"",
")",
"if",
"buffered",
"or",
"filtered",
"or",
"cached",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"context._push_buffer()\"",
")",
"self",
".",
"identifier_stack",
".",
"append",
"(",
"self",
".",
"compiler",
".",
"identifiers",
".",
"branch",
"(",
"self",
".",
"node",
")",
")",
"if",
"(",
"not",
"self",
".",
"in_def",
"or",
"self",
".",
"node",
".",
"is_block",
")",
"and",
"'**pageargs'",
"in",
"args",
":",
"self",
".",
"identifier_stack",
"[",
"-",
"1",
"]",
".",
"argument_declared",
".",
"add",
"(",
"'pageargs'",
")",
"if",
"not",
"self",
".",
"in_def",
"and",
"(",
"len",
"(",
"self",
".",
"identifiers",
".",
"locally_assigned",
")",
">",
"0",
"or",
"len",
"(",
"self",
".",
"identifiers",
".",
"argument_declared",
")",
">",
"0",
")",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"__M_locals = __M_dict_builtin(%s)\"",
"%",
"','",
".",
"join",
"(",
"[",
"\"%s=%s\"",
"%",
"(",
"x",
",",
"x",
")",
"for",
"x",
"in",
"self",
".",
"identifiers",
".",
"argument_declared",
"]",
")",
")",
"self",
".",
"write_variable_declares",
"(",
"self",
".",
"identifiers",
",",
"toplevel",
"=",
"True",
")",
"for",
"n",
"in",
"self",
".",
"node",
".",
"nodes",
":",
"n",
".",
"accept_visitor",
"(",
"self",
")",
"self",
".",
"write_def_finish",
"(",
"self",
".",
"node",
",",
"buffered",
",",
"filtered",
",",
"cached",
")",
"self",
".",
"printer",
".",
"writeline",
"(",
"None",
")",
"self",
".",
"printer",
".",
"write_blanks",
"(",
"2",
")",
"if",
"cached",
":",
"self",
".",
"write_cache_decorator",
"(",
"node",
",",
"name",
",",
"args",
",",
"buffered",
",",
"self",
".",
"identifiers",
",",
"toplevel",
"=",
"True",
")"
] |
write a top-level render callable.
this could be the main render() method or that of a top-level def.
|
[
"write",
"a",
"top",
"-",
"level",
"render",
"callable",
"."
] |
python
|
train
| 41.74 |
saltstack/salt
|
salt/modules/virt.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L352-L363
|
def _get_on_reboot(dom):
'''
Return `on_reboot` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_reboot <domain>
'''
node = ElementTree.fromstring(get_xml(dom)).find('on_reboot')
return node.text if node is not None else ''
|
[
"def",
"_get_on_reboot",
"(",
"dom",
")",
":",
"node",
"=",
"ElementTree",
".",
"fromstring",
"(",
"get_xml",
"(",
"dom",
")",
")",
".",
"find",
"(",
"'on_reboot'",
")",
"return",
"node",
".",
"text",
"if",
"node",
"is",
"not",
"None",
"else",
"''"
] |
Return `on_reboot` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_reboot <domain>
|
[
"Return",
"on_reboot",
"setting",
"from",
"the",
"named",
"vm"
] |
python
|
train
| 23.583333 |
redapple/parslepy
|
parslepy/base.py
|
https://github.com/redapple/parslepy/blob/a8bc4c0592824459629018c8f4c6ae3dad6cc3cc/parslepy/base.py#L203-L220
|
def from_yamlfile(cls, fp, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
return cls.from_yamlstring(fp.read(), selector_handler=selector_handler, strict=strict, debug=debug)
|
[
"def",
"from_yamlfile",
"(",
"cls",
",",
"fp",
",",
"selector_handler",
"=",
"None",
",",
"strict",
"=",
"False",
",",
"debug",
"=",
"False",
")",
":",
"return",
"cls",
".",
"from_yamlstring",
"(",
"fp",
".",
"read",
"(",
")",
",",
"selector_handler",
"=",
"selector_handler",
",",
"strict",
"=",
"strict",
",",
"debug",
"=",
"debug",
")"
] |
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
|
[
"Create",
"a",
"Parselet",
"instance",
"from",
"a",
"file",
"containing",
"the",
"Parsley",
"script",
"as",
"a",
"YAML",
"object"
] |
python
|
valid
| 37.277778 |
AndrewAnnex/SpiceyPy
|
spiceypy/spiceypy.py
|
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L6497-L6511
|
def gfstep(time):
"""
Return the time step set by the most recent call to :func:`gfsstp`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html
:param time: Ignored ET value.
:type time: float
:return: Time step to take.
:rtype: float
"""
time = ctypes.c_double(time)
step = ctypes.c_double()
libspice.gfstep_c(time, ctypes.byref(step))
return step.value
|
[
"def",
"gfstep",
"(",
"time",
")",
":",
"time",
"=",
"ctypes",
".",
"c_double",
"(",
"time",
")",
"step",
"=",
"ctypes",
".",
"c_double",
"(",
")",
"libspice",
".",
"gfstep_c",
"(",
"time",
",",
"ctypes",
".",
"byref",
"(",
"step",
")",
")",
"return",
"step",
".",
"value"
] |
Return the time step set by the most recent call to :func:`gfsstp`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html
:param time: Ignored ET value.
:type time: float
:return: Time step to take.
:rtype: float
|
[
"Return",
"the",
"time",
"step",
"set",
"by",
"the",
"most",
"recent",
"call",
"to",
":",
"func",
":",
"gfsstp",
"."
] |
python
|
train
| 27.066667 |
python-visualization/branca
|
branca/element.py
|
https://github.com/python-visualization/branca/blob/4e89e88a5a7ff3586f0852249c2c125f72316da8/branca/element.py#L138-L140
|
def to_json(self, depth=-1, **kwargs):
"""Returns a JSON representation of the object."""
return json.dumps(self.to_dict(depth=depth, ordered=True), **kwargs)
|
[
"def",
"to_json",
"(",
"self",
",",
"depth",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"to_dict",
"(",
"depth",
"=",
"depth",
",",
"ordered",
"=",
"True",
")",
",",
"*",
"*",
"kwargs",
")"
] |
Returns a JSON representation of the object.
|
[
"Returns",
"a",
"JSON",
"representation",
"of",
"the",
"object",
"."
] |
python
|
train
| 57.333333 |
darkfeline/animanager
|
animanager/datets.py
|
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/datets.py#L34-L41
|
def to_date(ts: float) -> datetime.date:
"""Convert timestamp to date.
>>> to_date(978393600.0)
datetime.date(2001, 1, 2)
"""
return datetime.datetime.fromtimestamp(
ts, tz=datetime.timezone.utc).date()
|
[
"def",
"to_date",
"(",
"ts",
":",
"float",
")",
"->",
"datetime",
".",
"date",
":",
"return",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"ts",
",",
"tz",
"=",
"datetime",
".",
"timezone",
".",
"utc",
")",
".",
"date",
"(",
")"
] |
Convert timestamp to date.
>>> to_date(978393600.0)
datetime.date(2001, 1, 2)
|
[
"Convert",
"timestamp",
"to",
"date",
"."
] |
python
|
train
| 28 |
joowani/quadriga
|
quadriga/book.py
|
https://github.com/joowani/quadriga/blob/412f88f414ef0cb53efa6d5841b9674eb9718359/quadriga/book.py#L37-L47
|
def get_ticker(self):
"""Return the latest ticker information.
:return: Latest ticker information.
:rtype: dict
"""
self._log('get ticker')
return self._rest_client.get(
endpoint='/ticker',
params={'book': self.name}
)
|
[
"def",
"get_ticker",
"(",
"self",
")",
":",
"self",
".",
"_log",
"(",
"'get ticker'",
")",
"return",
"self",
".",
"_rest_client",
".",
"get",
"(",
"endpoint",
"=",
"'/ticker'",
",",
"params",
"=",
"{",
"'book'",
":",
"self",
".",
"name",
"}",
")"
] |
Return the latest ticker information.
:return: Latest ticker information.
:rtype: dict
|
[
"Return",
"the",
"latest",
"ticker",
"information",
"."
] |
python
|
train
| 26.272727 |
cocoakekeyu/cancan
|
cancan/ability.py
|
https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L37-L41
|
def addnot(self, action=None, subject=None, **conditions):
"""
Defines an ability which cannot be done.
"""
self.add_rule(Rule(False, action, subject, **conditions))
|
[
"def",
"addnot",
"(",
"self",
",",
"action",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"*",
"*",
"conditions",
")",
":",
"self",
".",
"add_rule",
"(",
"Rule",
"(",
"False",
",",
"action",
",",
"subject",
",",
"*",
"*",
"conditions",
")",
")"
] |
Defines an ability which cannot be done.
|
[
"Defines",
"an",
"ability",
"which",
"cannot",
"be",
"done",
"."
] |
python
|
train
| 38.6 |
jeffh/describe
|
describe/mock/expectations.py
|
https://github.com/jeffh/describe/blob/6a33ffecc3340b57e60bc8a7095521882ff9a156/describe/mock/expectations.py#L257-L259
|
def attribute_invoked(self, sender, name, args, kwargs):
"Handles the creation of ExpectationBuilder when an attribute is invoked."
return ExpectationBuilder(self.sender, self.delegate, self.add_invocation, self.add_expectations, '__call__')(*args, **kwargs)
|
[
"def",
"attribute_invoked",
"(",
"self",
",",
"sender",
",",
"name",
",",
"args",
",",
"kwargs",
")",
":",
"return",
"ExpectationBuilder",
"(",
"self",
".",
"sender",
",",
"self",
".",
"delegate",
",",
"self",
".",
"add_invocation",
",",
"self",
".",
"add_expectations",
",",
"'__call__'",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Handles the creation of ExpectationBuilder when an attribute is invoked.
|
[
"Handles",
"the",
"creation",
"of",
"ExpectationBuilder",
"when",
"an",
"attribute",
"is",
"invoked",
"."
] |
python
|
train
| 90.666667 |
pyapi-gitlab/pyapi-gitlab
|
gitlab/__init__.py
|
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L101-L135
|
def createproject(self, name, **kwargs):
"""
Creates a new project owned by the authenticated user.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param namespace_id: namespace for the new project (defaults to user)
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:param sudo:
:param import_url:
:return:
"""
data = {'name': name}
if kwargs:
data.update(kwargs)
request = requests.post(
self.projects_url, headers=self.headers, data=data,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
elif request.status_code == 403:
if 'Your own projects limit is 0' in request.text:
print(request.text)
return False
else:
return False
|
[
"def",
"createproject",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"{",
"'name'",
":",
"name",
"}",
"if",
"kwargs",
":",
"data",
".",
"update",
"(",
"kwargs",
")",
"request",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"projects_url",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"data",
",",
"verify",
"=",
"self",
".",
"verify_ssl",
",",
"auth",
"=",
"self",
".",
"auth",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"if",
"request",
".",
"status_code",
"==",
"201",
":",
"return",
"request",
".",
"json",
"(",
")",
"elif",
"request",
".",
"status_code",
"==",
"403",
":",
"if",
"'Your own projects limit is 0'",
"in",
"request",
".",
"text",
":",
"print",
"(",
"request",
".",
"text",
")",
"return",
"False",
"else",
":",
"return",
"False"
] |
Creates a new project owned by the authenticated user.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param namespace_id: namespace for the new project (defaults to user)
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:param sudo:
:param import_url:
:return:
|
[
"Creates",
"a",
"new",
"project",
"owned",
"by",
"the",
"authenticated",
"user",
"."
] |
python
|
train
| 34.142857 |
allenai/allennlp
|
allennlp/data/dataset_readers/reading_comprehension/util.py
|
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/reading_comprehension/util.py#L217-L351
|
def make_reading_comprehension_instance_quac(question_list_tokens: List[List[Token]],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_span_lists: List[List[Tuple[int, int]]] = None,
yesno_list: List[int] = None,
followup_list: List[int] = None,
additional_metadata: Dict[str, Any] = None,
num_context_answers: int = 0) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields['passage'] = passage_field
fields['question'] = ListField([TextField(q_tokens, token_indexers) for q_tokens in question_list_tokens])
metadata = {'original_passage': passage_text,
'token_offsets': passage_offsets,
'question_tokens': [[token.text for token in question_tokens] \
for question_tokens in question_list_tokens],
'passage_tokens': [token.text for token in passage_tokens], }
p1_answer_marker_list: List[Field] = []
p2_answer_marker_list: List[Field] = []
p3_answer_marker_list: List[Field] = []
def get_tag(i, i_name):
# Generate a tag to mark previous answer span in the passage.
return "<{0:d}_{1:s}>".format(i, i_name)
def mark_tag(span_start, span_end, passage_tags, prev_answer_distance):
try:
assert span_start >= 0
assert span_end >= 0
except:
raise ValueError("Previous {0:d}th answer span should have been updated!".format(prev_answer_distance))
# Modify "tags" to mark previous answer span.
if span_start == span_end:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "")
else:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "start")
passage_tags[prev_answer_distance][span_end] = get_tag(prev_answer_distance, "end")
for passage_index in range(span_start + 1, span_end):
passage_tags[prev_answer_distance][passage_index] = get_tag(prev_answer_distance, "in")
if token_span_lists:
span_start_list: List[Field] = []
span_end_list: List[Field] = []
p1_span_start, p1_span_end, p2_span_start = -1, -1, -1
p2_span_end, p3_span_start, p3_span_end = -1, -1, -1
# Looping each <<answers>>.
for question_index, answer_span_lists in enumerate(token_span_lists):
span_start, span_end = answer_span_lists[-1] # Last one is the original answer
span_start_list.append(IndexField(span_start, passage_field))
span_end_list.append(IndexField(span_end, passage_field))
prev_answer_marker_lists = [["O"] * len(passage_tokens), ["O"] * len(passage_tokens),
["O"] * len(passage_tokens), ["O"] * len(passage_tokens)]
if question_index > 0 and num_context_answers > 0:
mark_tag(p1_span_start, p1_span_end, prev_answer_marker_lists, 1)
if question_index > 1 and num_context_answers > 1:
mark_tag(p2_span_start, p2_span_end, prev_answer_marker_lists, 2)
if question_index > 2 and num_context_answers > 2:
mark_tag(p3_span_start, p3_span_end, prev_answer_marker_lists, 3)
p3_span_start = p2_span_start
p3_span_end = p2_span_end
p2_span_start = p1_span_start
p2_span_end = p1_span_end
p1_span_start = span_start
p1_span_end = span_end
if num_context_answers > 2:
p3_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[3],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 1:
p2_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[2],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 0:
p1_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[1],
passage_field,
label_namespace="answer_tags"))
fields['span_start'] = ListField(span_start_list)
fields['span_end'] = ListField(span_end_list)
if num_context_answers > 0:
fields['p1_answer_marker'] = ListField(p1_answer_marker_list)
if num_context_answers > 1:
fields['p2_answer_marker'] = ListField(p2_answer_marker_list)
if num_context_answers > 2:
fields['p3_answer_marker'] = ListField(p3_answer_marker_list)
fields['yesno_list'] = ListField( \
[LabelField(yesno, label_namespace="yesno_labels") for yesno in yesno_list])
fields['followup_list'] = ListField([LabelField(followup, label_namespace="followup_labels") \
for followup in followup_list])
metadata.update(additional_metadata)
fields['metadata'] = MetadataField(metadata)
return Instance(fields)
|
[
"def",
"make_reading_comprehension_instance_quac",
"(",
"question_list_tokens",
":",
"List",
"[",
"List",
"[",
"Token",
"]",
"]",
",",
"passage_tokens",
":",
"List",
"[",
"Token",
"]",
",",
"token_indexers",
":",
"Dict",
"[",
"str",
",",
"TokenIndexer",
"]",
",",
"passage_text",
":",
"str",
",",
"token_span_lists",
":",
"List",
"[",
"List",
"[",
"Tuple",
"[",
"int",
",",
"int",
"]",
"]",
"]",
"=",
"None",
",",
"yesno_list",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"followup_list",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"additional_metadata",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"num_context_answers",
":",
"int",
"=",
"0",
")",
"->",
"Instance",
":",
"additional_metadata",
"=",
"additional_metadata",
"or",
"{",
"}",
"fields",
":",
"Dict",
"[",
"str",
",",
"Field",
"]",
"=",
"{",
"}",
"passage_offsets",
"=",
"[",
"(",
"token",
".",
"idx",
",",
"token",
".",
"idx",
"+",
"len",
"(",
"token",
".",
"text",
")",
")",
"for",
"token",
"in",
"passage_tokens",
"]",
"# This is separate so we can reference it later with a known type.",
"passage_field",
"=",
"TextField",
"(",
"passage_tokens",
",",
"token_indexers",
")",
"fields",
"[",
"'passage'",
"]",
"=",
"passage_field",
"fields",
"[",
"'question'",
"]",
"=",
"ListField",
"(",
"[",
"TextField",
"(",
"q_tokens",
",",
"token_indexers",
")",
"for",
"q_tokens",
"in",
"question_list_tokens",
"]",
")",
"metadata",
"=",
"{",
"'original_passage'",
":",
"passage_text",
",",
"'token_offsets'",
":",
"passage_offsets",
",",
"'question_tokens'",
":",
"[",
"[",
"token",
".",
"text",
"for",
"token",
"in",
"question_tokens",
"]",
"for",
"question_tokens",
"in",
"question_list_tokens",
"]",
",",
"'passage_tokens'",
":",
"[",
"token",
".",
"text",
"for",
"token",
"in",
"passage_tokens",
"]",
",",
"}",
"p1_answer_marker_list",
":",
"List",
"[",
"Field",
"]",
"=",
"[",
"]",
"p2_answer_marker_list",
":",
"List",
"[",
"Field",
"]",
"=",
"[",
"]",
"p3_answer_marker_list",
":",
"List",
"[",
"Field",
"]",
"=",
"[",
"]",
"def",
"get_tag",
"(",
"i",
",",
"i_name",
")",
":",
"# Generate a tag to mark previous answer span in the passage.",
"return",
"\"<{0:d}_{1:s}>\"",
".",
"format",
"(",
"i",
",",
"i_name",
")",
"def",
"mark_tag",
"(",
"span_start",
",",
"span_end",
",",
"passage_tags",
",",
"prev_answer_distance",
")",
":",
"try",
":",
"assert",
"span_start",
">=",
"0",
"assert",
"span_end",
">=",
"0",
"except",
":",
"raise",
"ValueError",
"(",
"\"Previous {0:d}th answer span should have been updated!\"",
".",
"format",
"(",
"prev_answer_distance",
")",
")",
"# Modify \"tags\" to mark previous answer span.",
"if",
"span_start",
"==",
"span_end",
":",
"passage_tags",
"[",
"prev_answer_distance",
"]",
"[",
"span_start",
"]",
"=",
"get_tag",
"(",
"prev_answer_distance",
",",
"\"\"",
")",
"else",
":",
"passage_tags",
"[",
"prev_answer_distance",
"]",
"[",
"span_start",
"]",
"=",
"get_tag",
"(",
"prev_answer_distance",
",",
"\"start\"",
")",
"passage_tags",
"[",
"prev_answer_distance",
"]",
"[",
"span_end",
"]",
"=",
"get_tag",
"(",
"prev_answer_distance",
",",
"\"end\"",
")",
"for",
"passage_index",
"in",
"range",
"(",
"span_start",
"+",
"1",
",",
"span_end",
")",
":",
"passage_tags",
"[",
"prev_answer_distance",
"]",
"[",
"passage_index",
"]",
"=",
"get_tag",
"(",
"prev_answer_distance",
",",
"\"in\"",
")",
"if",
"token_span_lists",
":",
"span_start_list",
":",
"List",
"[",
"Field",
"]",
"=",
"[",
"]",
"span_end_list",
":",
"List",
"[",
"Field",
"]",
"=",
"[",
"]",
"p1_span_start",
",",
"p1_span_end",
",",
"p2_span_start",
"=",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"p2_span_end",
",",
"p3_span_start",
",",
"p3_span_end",
"=",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"# Looping each <<answers>>.",
"for",
"question_index",
",",
"answer_span_lists",
"in",
"enumerate",
"(",
"token_span_lists",
")",
":",
"span_start",
",",
"span_end",
"=",
"answer_span_lists",
"[",
"-",
"1",
"]",
"# Last one is the original answer",
"span_start_list",
".",
"append",
"(",
"IndexField",
"(",
"span_start",
",",
"passage_field",
")",
")",
"span_end_list",
".",
"append",
"(",
"IndexField",
"(",
"span_end",
",",
"passage_field",
")",
")",
"prev_answer_marker_lists",
"=",
"[",
"[",
"\"O\"",
"]",
"*",
"len",
"(",
"passage_tokens",
")",
",",
"[",
"\"O\"",
"]",
"*",
"len",
"(",
"passage_tokens",
")",
",",
"[",
"\"O\"",
"]",
"*",
"len",
"(",
"passage_tokens",
")",
",",
"[",
"\"O\"",
"]",
"*",
"len",
"(",
"passage_tokens",
")",
"]",
"if",
"question_index",
">",
"0",
"and",
"num_context_answers",
">",
"0",
":",
"mark_tag",
"(",
"p1_span_start",
",",
"p1_span_end",
",",
"prev_answer_marker_lists",
",",
"1",
")",
"if",
"question_index",
">",
"1",
"and",
"num_context_answers",
">",
"1",
":",
"mark_tag",
"(",
"p2_span_start",
",",
"p2_span_end",
",",
"prev_answer_marker_lists",
",",
"2",
")",
"if",
"question_index",
">",
"2",
"and",
"num_context_answers",
">",
"2",
":",
"mark_tag",
"(",
"p3_span_start",
",",
"p3_span_end",
",",
"prev_answer_marker_lists",
",",
"3",
")",
"p3_span_start",
"=",
"p2_span_start",
"p3_span_end",
"=",
"p2_span_end",
"p2_span_start",
"=",
"p1_span_start",
"p2_span_end",
"=",
"p1_span_end",
"p1_span_start",
"=",
"span_start",
"p1_span_end",
"=",
"span_end",
"if",
"num_context_answers",
">",
"2",
":",
"p3_answer_marker_list",
".",
"append",
"(",
"SequenceLabelField",
"(",
"prev_answer_marker_lists",
"[",
"3",
"]",
",",
"passage_field",
",",
"label_namespace",
"=",
"\"answer_tags\"",
")",
")",
"if",
"num_context_answers",
">",
"1",
":",
"p2_answer_marker_list",
".",
"append",
"(",
"SequenceLabelField",
"(",
"prev_answer_marker_lists",
"[",
"2",
"]",
",",
"passage_field",
",",
"label_namespace",
"=",
"\"answer_tags\"",
")",
")",
"if",
"num_context_answers",
">",
"0",
":",
"p1_answer_marker_list",
".",
"append",
"(",
"SequenceLabelField",
"(",
"prev_answer_marker_lists",
"[",
"1",
"]",
",",
"passage_field",
",",
"label_namespace",
"=",
"\"answer_tags\"",
")",
")",
"fields",
"[",
"'span_start'",
"]",
"=",
"ListField",
"(",
"span_start_list",
")",
"fields",
"[",
"'span_end'",
"]",
"=",
"ListField",
"(",
"span_end_list",
")",
"if",
"num_context_answers",
">",
"0",
":",
"fields",
"[",
"'p1_answer_marker'",
"]",
"=",
"ListField",
"(",
"p1_answer_marker_list",
")",
"if",
"num_context_answers",
">",
"1",
":",
"fields",
"[",
"'p2_answer_marker'",
"]",
"=",
"ListField",
"(",
"p2_answer_marker_list",
")",
"if",
"num_context_answers",
">",
"2",
":",
"fields",
"[",
"'p3_answer_marker'",
"]",
"=",
"ListField",
"(",
"p3_answer_marker_list",
")",
"fields",
"[",
"'yesno_list'",
"]",
"=",
"ListField",
"(",
"[",
"LabelField",
"(",
"yesno",
",",
"label_namespace",
"=",
"\"yesno_labels\"",
")",
"for",
"yesno",
"in",
"yesno_list",
"]",
")",
"fields",
"[",
"'followup_list'",
"]",
"=",
"ListField",
"(",
"[",
"LabelField",
"(",
"followup",
",",
"label_namespace",
"=",
"\"followup_labels\"",
")",
"for",
"followup",
"in",
"followup_list",
"]",
")",
"metadata",
".",
"update",
"(",
"additional_metadata",
")",
"fields",
"[",
"'metadata'",
"]",
"=",
"MetadataField",
"(",
"metadata",
")",
"return",
"Instance",
"(",
"fields",
")"
] |
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
|
[
"Converts",
"a",
"question",
"a",
"passage",
"and",
"an",
"optional",
"answer",
"(",
"or",
"answers",
")",
"to",
"an",
"Instance",
"for",
"use",
"in",
"a",
"reading",
"comprehension",
"model",
"."
] |
python
|
train
| 61.859259 |
agabrown/PyGaia
|
pygaia/astrometry/coordinates.py
|
https://github.com/agabrown/PyGaia/blob/ae972b0622a15f713ffae471f925eac25ccdae47/pygaia/astrometry/coordinates.py#L149-L170
|
def transformSkyCoordinates(self, phi, theta):
"""
Converts sky coordinates from one reference system to another, making use of the rotation matrix with
which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
phi - Value of the azimuthal angle (right ascension, longitude) in radians.
theta - Value of the elevation angle (declination, latitude) in radians.
Returns
-------
phirot - Value of the transformed azimuthal angle in radians.
thetarot - Value of the transformed elevation angle in radians.
"""
r=ones_like(phi)
x, y, z = sphericalToCartesian(r, phi, theta)
xrot, yrot, zrot = self.transformCartesianCoordinates(x, y, z)
r, phirot, thetarot = cartesianToSpherical(xrot, yrot, zrot)
return phirot, thetarot
|
[
"def",
"transformSkyCoordinates",
"(",
"self",
",",
"phi",
",",
"theta",
")",
":",
"r",
"=",
"ones_like",
"(",
"phi",
")",
"x",
",",
"y",
",",
"z",
"=",
"sphericalToCartesian",
"(",
"r",
",",
"phi",
",",
"theta",
")",
"xrot",
",",
"yrot",
",",
"zrot",
"=",
"self",
".",
"transformCartesianCoordinates",
"(",
"x",
",",
"y",
",",
"z",
")",
"r",
",",
"phirot",
",",
"thetarot",
"=",
"cartesianToSpherical",
"(",
"xrot",
",",
"yrot",
",",
"zrot",
")",
"return",
"phirot",
",",
"thetarot"
] |
Converts sky coordinates from one reference system to another, making use of the rotation matrix with
which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
phi - Value of the azimuthal angle (right ascension, longitude) in radians.
theta - Value of the elevation angle (declination, latitude) in radians.
Returns
-------
phirot - Value of the transformed azimuthal angle in radians.
thetarot - Value of the transformed elevation angle in radians.
|
[
"Converts",
"sky",
"coordinates",
"from",
"one",
"reference",
"system",
"to",
"another",
"making",
"use",
"of",
"the",
"rotation",
"matrix",
"with",
"which",
"the",
"class",
"was",
"initialized",
".",
"Inputs",
"can",
"be",
"scalars",
"or",
"1",
"-",
"dimensional",
"numpy",
"arrays",
"."
] |
python
|
test
| 40.409091 |
tanghaibao/goatools
|
goatools/wr_tbl_class.py
|
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl_class.py#L49-L53
|
def wr_row_mergeall(self, worksheet, txtstr, fmt, row_idx):
"""Merge all columns and place text string in widened cell."""
hdridxval = len(self.hdrs) - 1
worksheet.merge_range(row_idx, 0, row_idx, hdridxval, txtstr, fmt)
return row_idx + 1
|
[
"def",
"wr_row_mergeall",
"(",
"self",
",",
"worksheet",
",",
"txtstr",
",",
"fmt",
",",
"row_idx",
")",
":",
"hdridxval",
"=",
"len",
"(",
"self",
".",
"hdrs",
")",
"-",
"1",
"worksheet",
".",
"merge_range",
"(",
"row_idx",
",",
"0",
",",
"row_idx",
",",
"hdridxval",
",",
"txtstr",
",",
"fmt",
")",
"return",
"row_idx",
"+",
"1"
] |
Merge all columns and place text string in widened cell.
|
[
"Merge",
"all",
"columns",
"and",
"place",
"text",
"string",
"in",
"widened",
"cell",
"."
] |
python
|
train
| 53.4 |
assamite/creamas
|
creamas/examples/spiro/spiro.py
|
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro.py#L14-L26
|
def give_dots_yield(R, r, r_, resolution=2*PI/1000, spins=50):
'''Generate Spirograph dots without numpy using yield.
'''
def x(theta):
return (R-r) * math.cos(theta) + r_*math.cos((R-r) / r * theta)
def y(theta):
return (R-r) * math.sin(theta) - r_*math.sin((R-r) / r * theta)
theta = 0.0
while theta < 2*PI*spins:
yield (x(theta), y(theta))
theta += resolution
|
[
"def",
"give_dots_yield",
"(",
"R",
",",
"r",
",",
"r_",
",",
"resolution",
"=",
"2",
"*",
"PI",
"/",
"1000",
",",
"spins",
"=",
"50",
")",
":",
"def",
"x",
"(",
"theta",
")",
":",
"return",
"(",
"R",
"-",
"r",
")",
"*",
"math",
".",
"cos",
"(",
"theta",
")",
"+",
"r_",
"*",
"math",
".",
"cos",
"(",
"(",
"R",
"-",
"r",
")",
"/",
"r",
"*",
"theta",
")",
"def",
"y",
"(",
"theta",
")",
":",
"return",
"(",
"R",
"-",
"r",
")",
"*",
"math",
".",
"sin",
"(",
"theta",
")",
"-",
"r_",
"*",
"math",
".",
"sin",
"(",
"(",
"R",
"-",
"r",
")",
"/",
"r",
"*",
"theta",
")",
"theta",
"=",
"0.0",
"while",
"theta",
"<",
"2",
"*",
"PI",
"*",
"spins",
":",
"yield",
"(",
"x",
"(",
"theta",
")",
",",
"y",
"(",
"theta",
")",
")",
"theta",
"+=",
"resolution"
] |
Generate Spirograph dots without numpy using yield.
|
[
"Generate",
"Spirograph",
"dots",
"without",
"numpy",
"using",
"yield",
"."
] |
python
|
train
| 31.384615 |
alefnula/tea
|
tea/console/format.py
|
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/console/format.py#L16-L30
|
def format_page(text):
"""Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
"""
width = max(map(len, text.splitlines()))
page = "+-" + "-" * width + "-+\n"
for line in text.splitlines():
page += "| " + line.ljust(width) + " |\n"
page += "+-" + "-" * width + "-+\n"
return page
|
[
"def",
"format_page",
"(",
"text",
")",
":",
"width",
"=",
"max",
"(",
"map",
"(",
"len",
",",
"text",
".",
"splitlines",
"(",
")",
")",
")",
"page",
"=",
"\"+-\"",
"+",
"\"-\"",
"*",
"width",
"+",
"\"-+\\n\"",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
")",
":",
"page",
"+=",
"\"| \"",
"+",
"line",
".",
"ljust",
"(",
"width",
")",
"+",
"\" |\\n\"",
"page",
"+=",
"\"+-\"",
"+",
"\"-\"",
"*",
"width",
"+",
"\"-+\\n\"",
"return",
"page"
] |
Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
|
[
"Format",
"the",
"text",
"for",
"output",
"adding",
"ASCII",
"frame",
"around",
"the",
"text",
".",
"Args",
":",
"text",
"(",
"str",
")",
":",
"Text",
"that",
"needs",
"to",
"be",
"formatted",
".",
"Returns",
":",
"str",
":",
"Formatted",
"string",
"."
] |
python
|
train
| 28.933333 |
barrust/mediawiki
|
mediawiki/mediawiki.py
|
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L662-L770
|
def categorytree(self, category, depth=5):
""" Generate the Category Tree for the given categories
Args:
category(str or list of strings): Category name(s)
depth(int): Depth to traverse the tree
Returns:
dict: Category tree structure
Note:
Set depth to **None** to get the whole tree
Note:
Return Data Structure: Subcategory contains the same \
recursive structure
>>> {
'category': {
'depth': Number,
'links': list,
'parent-categories': list,
'sub-categories': dict
}
}
.. versionadded:: 0.3.10 """
def __cat_tree_rec(cat, depth, tree, level, categories, links):
""" recursive function to build out the tree """
tree[cat] = dict()
tree[cat]["depth"] = level
tree[cat]["sub-categories"] = dict()
tree[cat]["links"] = list()
tree[cat]["parent-categories"] = list()
parent_cats = list()
if cat not in categories:
tries = 0
while True:
if tries > 10:
raise MediaWikiCategoryTreeError(cat)
try:
pag = self.page("{0}:{1}".format(self.category_prefix, cat))
categories[cat] = pag
parent_cats = categories[cat].categories
links[cat] = self.categorymembers(
cat, results=None, subcategories=True
)
break
except PageError:
raise PageError("{0}:{1}".format(self.category_prefix, cat))
except KeyboardInterrupt:
raise
except Exception:
tries = tries + 1
time.sleep(1)
else:
parent_cats = categories[cat].categories
tree[cat]["parent-categories"].extend(parent_cats)
tree[cat]["links"].extend(links[cat][0])
if depth and level >= depth:
for ctg in links[cat][1]:
tree[cat]["sub-categories"][ctg] = None
else:
for ctg in links[cat][1]:
__cat_tree_rec(
ctg,
depth,
tree[cat]["sub-categories"],
level + 1,
categories,
links,
)
# ###################################
# ### Actual Function Code ###
# ###################################
# make it simple to use both a list or a single category term
if not isinstance(category, list):
cats = [category]
else:
cats = category
# parameter verification
if len(cats) == 1 and (cats[0] is None or cats[0] == ""):
msg = (
"CategoryTree: Parameter 'category' must either "
"be a list of one or more categories or a string; "
"provided: '{}'".format(category)
)
raise ValueError(msg)
if depth is not None and depth < 1:
msg = (
"CategoryTree: Parameter 'depth' must be either None "
"(for the full tree) or be greater than 0"
)
raise ValueError(msg)
results = dict()
categories = dict()
links = dict()
for cat in cats:
if cat is None or cat == "":
continue
__cat_tree_rec(cat, depth, results, 0, categories, links)
return results
|
[
"def",
"categorytree",
"(",
"self",
",",
"category",
",",
"depth",
"=",
"5",
")",
":",
"def",
"__cat_tree_rec",
"(",
"cat",
",",
"depth",
",",
"tree",
",",
"level",
",",
"categories",
",",
"links",
")",
":",
"\"\"\" recursive function to build out the tree \"\"\"",
"tree",
"[",
"cat",
"]",
"=",
"dict",
"(",
")",
"tree",
"[",
"cat",
"]",
"[",
"\"depth\"",
"]",
"=",
"level",
"tree",
"[",
"cat",
"]",
"[",
"\"sub-categories\"",
"]",
"=",
"dict",
"(",
")",
"tree",
"[",
"cat",
"]",
"[",
"\"links\"",
"]",
"=",
"list",
"(",
")",
"tree",
"[",
"cat",
"]",
"[",
"\"parent-categories\"",
"]",
"=",
"list",
"(",
")",
"parent_cats",
"=",
"list",
"(",
")",
"if",
"cat",
"not",
"in",
"categories",
":",
"tries",
"=",
"0",
"while",
"True",
":",
"if",
"tries",
">",
"10",
":",
"raise",
"MediaWikiCategoryTreeError",
"(",
"cat",
")",
"try",
":",
"pag",
"=",
"self",
".",
"page",
"(",
"\"{0}:{1}\"",
".",
"format",
"(",
"self",
".",
"category_prefix",
",",
"cat",
")",
")",
"categories",
"[",
"cat",
"]",
"=",
"pag",
"parent_cats",
"=",
"categories",
"[",
"cat",
"]",
".",
"categories",
"links",
"[",
"cat",
"]",
"=",
"self",
".",
"categorymembers",
"(",
"cat",
",",
"results",
"=",
"None",
",",
"subcategories",
"=",
"True",
")",
"break",
"except",
"PageError",
":",
"raise",
"PageError",
"(",
"\"{0}:{1}\"",
".",
"format",
"(",
"self",
".",
"category_prefix",
",",
"cat",
")",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
"Exception",
":",
"tries",
"=",
"tries",
"+",
"1",
"time",
".",
"sleep",
"(",
"1",
")",
"else",
":",
"parent_cats",
"=",
"categories",
"[",
"cat",
"]",
".",
"categories",
"tree",
"[",
"cat",
"]",
"[",
"\"parent-categories\"",
"]",
".",
"extend",
"(",
"parent_cats",
")",
"tree",
"[",
"cat",
"]",
"[",
"\"links\"",
"]",
".",
"extend",
"(",
"links",
"[",
"cat",
"]",
"[",
"0",
"]",
")",
"if",
"depth",
"and",
"level",
">=",
"depth",
":",
"for",
"ctg",
"in",
"links",
"[",
"cat",
"]",
"[",
"1",
"]",
":",
"tree",
"[",
"cat",
"]",
"[",
"\"sub-categories\"",
"]",
"[",
"ctg",
"]",
"=",
"None",
"else",
":",
"for",
"ctg",
"in",
"links",
"[",
"cat",
"]",
"[",
"1",
"]",
":",
"__cat_tree_rec",
"(",
"ctg",
",",
"depth",
",",
"tree",
"[",
"cat",
"]",
"[",
"\"sub-categories\"",
"]",
",",
"level",
"+",
"1",
",",
"categories",
",",
"links",
",",
")",
"# ###################################",
"# ### Actual Function Code ###",
"# ###################################",
"# make it simple to use both a list or a single category term",
"if",
"not",
"isinstance",
"(",
"category",
",",
"list",
")",
":",
"cats",
"=",
"[",
"category",
"]",
"else",
":",
"cats",
"=",
"category",
"# parameter verification",
"if",
"len",
"(",
"cats",
")",
"==",
"1",
"and",
"(",
"cats",
"[",
"0",
"]",
"is",
"None",
"or",
"cats",
"[",
"0",
"]",
"==",
"\"\"",
")",
":",
"msg",
"=",
"(",
"\"CategoryTree: Parameter 'category' must either \"",
"\"be a list of one or more categories or a string; \"",
"\"provided: '{}'\"",
".",
"format",
"(",
"category",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"depth",
"is",
"not",
"None",
"and",
"depth",
"<",
"1",
":",
"msg",
"=",
"(",
"\"CategoryTree: Parameter 'depth' must be either None \"",
"\"(for the full tree) or be greater than 0\"",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"results",
"=",
"dict",
"(",
")",
"categories",
"=",
"dict",
"(",
")",
"links",
"=",
"dict",
"(",
")",
"for",
"cat",
"in",
"cats",
":",
"if",
"cat",
"is",
"None",
"or",
"cat",
"==",
"\"\"",
":",
"continue",
"__cat_tree_rec",
"(",
"cat",
",",
"depth",
",",
"results",
",",
"0",
",",
"categories",
",",
"links",
")",
"return",
"results"
] |
Generate the Category Tree for the given categories
Args:
category(str or list of strings): Category name(s)
depth(int): Depth to traverse the tree
Returns:
dict: Category tree structure
Note:
Set depth to **None** to get the whole tree
Note:
Return Data Structure: Subcategory contains the same \
recursive structure
>>> {
'category': {
'depth': Number,
'links': list,
'parent-categories': list,
'sub-categories': dict
}
}
.. versionadded:: 0.3.10
|
[
"Generate",
"the",
"Category",
"Tree",
"for",
"the",
"given",
"categories"
] |
python
|
train
| 35.706422 |
tensorflow/tensorboard
|
tensorboard/plugins/hparams/list_session_groups.py
|
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L98-L130
|
def _add_session(self, session, start_info, groups_by_name):
"""Adds a new Session protobuffer to the 'groups_by_name' dictionary.
Called by _build_session_groups when we encounter a new session. Creates
the Session protobuffer and adds it to the relevant group in the
'groups_by_name' dict. Creates the session group if this is the first time
we encounter it.
Args:
session: api_pb2.Session. The session to add.
start_info: The SessionStartInfo protobuffer associated with the session.
groups_by_name: A str to SessionGroup protobuffer dict. Representing the
session groups and sessions found so far.
"""
# If the group_name is empty, this session's group contains only
# this session. Use the session name for the group name since session
# names are unique.
group_name = start_info.group_name or session.name
if group_name in groups_by_name:
groups_by_name[group_name].sessions.extend([session])
else:
# Create the group and add the session as the first one.
group = api_pb2.SessionGroup(
name=group_name,
sessions=[session],
monitor_url=start_info.monitor_url)
# Copy hparams from the first session (all sessions should have the same
# hyperparameter values) into result.
# There doesn't seem to be a way to initialize a protobuffer map in the
# constructor.
for (key, value) in six.iteritems(start_info.hparams):
group.hparams[key].CopyFrom(value)
groups_by_name[group_name] = group
|
[
"def",
"_add_session",
"(",
"self",
",",
"session",
",",
"start_info",
",",
"groups_by_name",
")",
":",
"# If the group_name is empty, this session's group contains only",
"# this session. Use the session name for the group name since session",
"# names are unique.",
"group_name",
"=",
"start_info",
".",
"group_name",
"or",
"session",
".",
"name",
"if",
"group_name",
"in",
"groups_by_name",
":",
"groups_by_name",
"[",
"group_name",
"]",
".",
"sessions",
".",
"extend",
"(",
"[",
"session",
"]",
")",
"else",
":",
"# Create the group and add the session as the first one.",
"group",
"=",
"api_pb2",
".",
"SessionGroup",
"(",
"name",
"=",
"group_name",
",",
"sessions",
"=",
"[",
"session",
"]",
",",
"monitor_url",
"=",
"start_info",
".",
"monitor_url",
")",
"# Copy hparams from the first session (all sessions should have the same",
"# hyperparameter values) into result.",
"# There doesn't seem to be a way to initialize a protobuffer map in the",
"# constructor.",
"for",
"(",
"key",
",",
"value",
")",
"in",
"six",
".",
"iteritems",
"(",
"start_info",
".",
"hparams",
")",
":",
"group",
".",
"hparams",
"[",
"key",
"]",
".",
"CopyFrom",
"(",
"value",
")",
"groups_by_name",
"[",
"group_name",
"]",
"=",
"group"
] |
Adds a new Session protobuffer to the 'groups_by_name' dictionary.
Called by _build_session_groups when we encounter a new session. Creates
the Session protobuffer and adds it to the relevant group in the
'groups_by_name' dict. Creates the session group if this is the first time
we encounter it.
Args:
session: api_pb2.Session. The session to add.
start_info: The SessionStartInfo protobuffer associated with the session.
groups_by_name: A str to SessionGroup protobuffer dict. Representing the
session groups and sessions found so far.
|
[
"Adds",
"a",
"new",
"Session",
"protobuffer",
"to",
"the",
"groups_by_name",
"dictionary",
"."
] |
python
|
train
| 46.272727 |
aichaos/rivescript-python
|
rivescript/parser.py
|
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/parser.py#L507-L625
|
def check_syntax(self, cmd, line):
"""Syntax check a line of RiveScript code.
Args:
str cmd: The command symbol for the line of code, such as one
of ``+``, ``-``, ``*``, ``>``, etc.
str line: The remainder of the line of code, such as the text of
a trigger or reply.
Return:
str: A string syntax error message or ``None`` if no errors.
"""
# Run syntax checks based on the type of command.
if cmd == '!':
# ! Definition
# - Must be formatted like this:
# ! type name = value
# OR
# ! type = value
match = re.match(RE.def_syntax, line)
if not match:
return "Invalid format for !Definition line: must be '! type name = value' OR '! type = value'"
elif cmd == '>':
# > Label
# - The "begin" label must have only one argument ("begin")
# - "topic" labels must be lowercased but can inherit other topics (a-z0-9_\s)
# - "object" labels must follow the same rules as "topic", but don't need to be lowercase
parts = re.split(" ", line, 2)
if parts[0] == "begin" and len(parts) > 1:
return "The 'begin' label takes no additional arguments, should be verbatim '> begin'"
elif parts[0] == "topic":
search = re.search(RE.name_syntax, line)
if search:
return "Topics should be lowercased and contain only numbers and letters"
elif parts[0] == "object":
search = re.search(RE.obj_syntax, line) # Upper case is allowed
if search:
return "Objects can only contain numbers and letters"
elif cmd == '+' or cmd == '%' or cmd == '@':
# + Trigger, % Previous, @ Redirect
# This one is strict. The triggers are to be run through the regexp engine,
# therefore it should be acceptable for the regexp engine.
# - Entirely lowercase
# - No symbols except: ( | ) [ ] * _ # @ { } < > =
# - All brackets should be matched
# - No empty option with pipe such as ||, [|, |], (|, |) and whitespace between
parens = 0 # Open parenthesis
square = 0 # Open square brackets
curly = 0 # Open curly brackets
angle = 0 # Open angled brackets
# Count brackets.
for char in line:
if char == '(':
parens += 1
elif char == ')':
parens -= 1
elif char == '[':
square += 1
elif char == ']':
square -= 1
elif char == '{':
curly += 1
elif char == '}':
curly -= 1
elif char == '<':
angle += 1
elif char == '>':
angle -= 1
elif char == '|':
if parens == 0 and square == 0: # Pipe outside the alternative and option
return "Pipe | must be within parenthesis brackets or square brackets"
if (angle != 0) and (char in {"(", ")", "[", "]", "{", "}"}):
return "Angle bracket must be closed before closing or opening other type of brackets"
total = parens + square + curly # At each character, not more than 1 bracket opens, except <>
for special_char_count in [parens, square, curly, angle, total]:
if special_char_count not in (0, 1):
return "Unbalanced brackets"
# Any mismatches?
if parens != 0:
return "Unmatched parenthesis brackets"
elif square != 0:
return "Unmatched square brackets"
elif curly != 0:
return "Unmatched curly brackets"
elif angle != 0:
return "Unmatched angle brackets"
# Check for empty pipe
search = re.search(RE.empty_pipe, line)
if search:
return "Piped arrays can't include blank entries"
# In UTF-8 mode, most symbols are allowed.
if self.utf8:
search = re.search(RE.utf8_trig, line)
if search:
return "Triggers can't contain uppercase letters, backslashes or dots in UTF-8 mode."
else:
search = re.search(RE.trig_syntax, line)
if search:
return "Triggers may only contain lowercase letters, numbers, and these symbols: ( | ) [ ] * _ # @ { } < > ="
elif cmd == '-' or cmd == '^' or cmd == '/':
# - Trigger, ^ Continue, / Comment
# These commands take verbatim arguments, so their syntax is loose.
pass
elif cmd == '*':
# * Condition
# Syntax for a conditional is as follows:
# * value symbol value => response
match = re.match(RE.cond_syntax, line)
if not match:
return "Invalid format for !Condition: should be like '* value symbol value => response'"
return None
|
[
"def",
"check_syntax",
"(",
"self",
",",
"cmd",
",",
"line",
")",
":",
"# Run syntax checks based on the type of command.",
"if",
"cmd",
"==",
"'!'",
":",
"# ! Definition",
"# - Must be formatted like this:",
"# ! type name = value",
"# OR",
"# ! type = value",
"match",
"=",
"re",
".",
"match",
"(",
"RE",
".",
"def_syntax",
",",
"line",
")",
"if",
"not",
"match",
":",
"return",
"\"Invalid format for !Definition line: must be '! type name = value' OR '! type = value'\"",
"elif",
"cmd",
"==",
"'>'",
":",
"# > Label",
"# - The \"begin\" label must have only one argument (\"begin\")",
"# - \"topic\" labels must be lowercased but can inherit other topics (a-z0-9_\\s)",
"# - \"object\" labels must follow the same rules as \"topic\", but don't need to be lowercase",
"parts",
"=",
"re",
".",
"split",
"(",
"\" \"",
",",
"line",
",",
"2",
")",
"if",
"parts",
"[",
"0",
"]",
"==",
"\"begin\"",
"and",
"len",
"(",
"parts",
")",
">",
"1",
":",
"return",
"\"The 'begin' label takes no additional arguments, should be verbatim '> begin'\"",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"topic\"",
":",
"search",
"=",
"re",
".",
"search",
"(",
"RE",
".",
"name_syntax",
",",
"line",
")",
"if",
"search",
":",
"return",
"\"Topics should be lowercased and contain only numbers and letters\"",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"object\"",
":",
"search",
"=",
"re",
".",
"search",
"(",
"RE",
".",
"obj_syntax",
",",
"line",
")",
"# Upper case is allowed",
"if",
"search",
":",
"return",
"\"Objects can only contain numbers and letters\"",
"elif",
"cmd",
"==",
"'+'",
"or",
"cmd",
"==",
"'%'",
"or",
"cmd",
"==",
"'@'",
":",
"# + Trigger, % Previous, @ Redirect",
"# This one is strict. The triggers are to be run through the regexp engine,",
"# therefore it should be acceptable for the regexp engine.",
"# - Entirely lowercase",
"# - No symbols except: ( | ) [ ] * _ # @ { } < > =",
"# - All brackets should be matched",
"# - No empty option with pipe such as ||, [|, |], (|, |) and whitespace between",
"parens",
"=",
"0",
"# Open parenthesis",
"square",
"=",
"0",
"# Open square brackets",
"curly",
"=",
"0",
"# Open curly brackets",
"angle",
"=",
"0",
"# Open angled brackets",
"# Count brackets.",
"for",
"char",
"in",
"line",
":",
"if",
"char",
"==",
"'('",
":",
"parens",
"+=",
"1",
"elif",
"char",
"==",
"')'",
":",
"parens",
"-=",
"1",
"elif",
"char",
"==",
"'['",
":",
"square",
"+=",
"1",
"elif",
"char",
"==",
"']'",
":",
"square",
"-=",
"1",
"elif",
"char",
"==",
"'{'",
":",
"curly",
"+=",
"1",
"elif",
"char",
"==",
"'}'",
":",
"curly",
"-=",
"1",
"elif",
"char",
"==",
"'<'",
":",
"angle",
"+=",
"1",
"elif",
"char",
"==",
"'>'",
":",
"angle",
"-=",
"1",
"elif",
"char",
"==",
"'|'",
":",
"if",
"parens",
"==",
"0",
"and",
"square",
"==",
"0",
":",
"# Pipe outside the alternative and option",
"return",
"\"Pipe | must be within parenthesis brackets or square brackets\"",
"if",
"(",
"angle",
"!=",
"0",
")",
"and",
"(",
"char",
"in",
"{",
"\"(\"",
",",
"\")\"",
",",
"\"[\"",
",",
"\"]\"",
",",
"\"{\"",
",",
"\"}\"",
"}",
")",
":",
"return",
"\"Angle bracket must be closed before closing or opening other type of brackets\"",
"total",
"=",
"parens",
"+",
"square",
"+",
"curly",
"# At each character, not more than 1 bracket opens, except <>",
"for",
"special_char_count",
"in",
"[",
"parens",
",",
"square",
",",
"curly",
",",
"angle",
",",
"total",
"]",
":",
"if",
"special_char_count",
"not",
"in",
"(",
"0",
",",
"1",
")",
":",
"return",
"\"Unbalanced brackets\"",
"# Any mismatches?",
"if",
"parens",
"!=",
"0",
":",
"return",
"\"Unmatched parenthesis brackets\"",
"elif",
"square",
"!=",
"0",
":",
"return",
"\"Unmatched square brackets\"",
"elif",
"curly",
"!=",
"0",
":",
"return",
"\"Unmatched curly brackets\"",
"elif",
"angle",
"!=",
"0",
":",
"return",
"\"Unmatched angle brackets\"",
"# Check for empty pipe",
"search",
"=",
"re",
".",
"search",
"(",
"RE",
".",
"empty_pipe",
",",
"line",
")",
"if",
"search",
":",
"return",
"\"Piped arrays can't include blank entries\"",
"# In UTF-8 mode, most symbols are allowed.",
"if",
"self",
".",
"utf8",
":",
"search",
"=",
"re",
".",
"search",
"(",
"RE",
".",
"utf8_trig",
",",
"line",
")",
"if",
"search",
":",
"return",
"\"Triggers can't contain uppercase letters, backslashes or dots in UTF-8 mode.\"",
"else",
":",
"search",
"=",
"re",
".",
"search",
"(",
"RE",
".",
"trig_syntax",
",",
"line",
")",
"if",
"search",
":",
"return",
"\"Triggers may only contain lowercase letters, numbers, and these symbols: ( | ) [ ] * _ # @ { } < > =\"",
"elif",
"cmd",
"==",
"'-'",
"or",
"cmd",
"==",
"'^'",
"or",
"cmd",
"==",
"'/'",
":",
"# - Trigger, ^ Continue, / Comment",
"# These commands take verbatim arguments, so their syntax is loose.",
"pass",
"elif",
"cmd",
"==",
"'*'",
":",
"# * Condition",
"# Syntax for a conditional is as follows:",
"# * value symbol value => response",
"match",
"=",
"re",
".",
"match",
"(",
"RE",
".",
"cond_syntax",
",",
"line",
")",
"if",
"not",
"match",
":",
"return",
"\"Invalid format for !Condition: should be like '* value symbol value => response'\"",
"return",
"None"
] |
Syntax check a line of RiveScript code.
Args:
str cmd: The command symbol for the line of code, such as one
of ``+``, ``-``, ``*``, ``>``, etc.
str line: The remainder of the line of code, such as the text of
a trigger or reply.
Return:
str: A string syntax error message or ``None`` if no errors.
|
[
"Syntax",
"check",
"a",
"line",
"of",
"RiveScript",
"code",
"."
] |
python
|
train
| 44.487395 |
vijaykatam/django-cache-manager
|
django_cache_manager/mixins.py
|
https://github.com/vijaykatam/django-cache-manager/blob/05142c44eb349d3f24f962592945888d9d367375/django_cache_manager/mixins.py#L90-L105
|
def cache_backend(self):
"""
Get the cache backend
Returns
~~~~~~~
Django cache backend
"""
if not hasattr(self, '_cache_backend'):
if hasattr(django.core.cache, 'caches'):
self._cache_backend = django.core.cache.caches[_cache_name]
else:
self._cache_backend = django.core.cache.get_cache(_cache_name)
return self._cache_backend
|
[
"def",
"cache_backend",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_cache_backend'",
")",
":",
"if",
"hasattr",
"(",
"django",
".",
"core",
".",
"cache",
",",
"'caches'",
")",
":",
"self",
".",
"_cache_backend",
"=",
"django",
".",
"core",
".",
"cache",
".",
"caches",
"[",
"_cache_name",
"]",
"else",
":",
"self",
".",
"_cache_backend",
"=",
"django",
".",
"core",
".",
"cache",
".",
"get_cache",
"(",
"_cache_name",
")",
"return",
"self",
".",
"_cache_backend"
] |
Get the cache backend
Returns
~~~~~~~
Django cache backend
|
[
"Get",
"the",
"cache",
"backend"
] |
python
|
train
| 27.25 |
leosartaj/sub
|
sub/main.py
|
https://github.com/leosartaj/sub/blob/9a8e55a5326c3b41357eedd235e7c36f253db2e0/sub/main.py#L95-L115
|
def download(name, options):
"""
download a file or all files in a directory
"""
dire = os.path.dirname(name) # returns the directory name
fName = os.path.basename(name) # returns the filename
fNameOnly, fExt = os.path.splitext(fName)
dwn = 0
if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded
if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose):
dwn += 1
elif dirExists(name):
for filename in os.listdir(name):
if options.recursive:
dwn += download(os.path.join(name, filename), options)
else:
if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose):
dwn += 1
return dwn
|
[
"def",
"download",
"(",
"name",
",",
"options",
")",
":",
"dire",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"name",
")",
"# returns the directory name",
"fName",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"# returns the filename",
"fNameOnly",
",",
"fExt",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fName",
")",
"dwn",
"=",
"0",
"if",
"fileExists",
"(",
"fName",
",",
"dire",
")",
"and",
"not",
"fileExists",
"(",
"(",
"fNameOnly",
"+",
"'.srt'",
")",
",",
"dire",
")",
":",
"# skip if already downloaded",
"if",
"file_downloaded",
"(",
"download_file",
"(",
"fName",
",",
"options",
".",
"timeout",
",",
"dire",
")",
",",
"fName",
",",
"options",
".",
"verbose",
")",
":",
"dwn",
"+=",
"1",
"elif",
"dirExists",
"(",
"name",
")",
":",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"name",
")",
":",
"if",
"options",
".",
"recursive",
":",
"dwn",
"+=",
"download",
"(",
"os",
".",
"path",
".",
"join",
"(",
"name",
",",
"filename",
")",
",",
"options",
")",
"else",
":",
"if",
"file_downloaded",
"(",
"download_file",
"(",
"filename",
",",
"options",
".",
"timeout",
",",
"name",
")",
",",
"filename",
",",
"options",
".",
"verbose",
")",
":",
"dwn",
"+=",
"1",
"return",
"dwn"
] |
download a file or all files in a directory
|
[
"download",
"a",
"file",
"or",
"all",
"files",
"in",
"a",
"directory"
] |
python
|
train
| 39.238095 |
inspirehep/inspire-dojson
|
inspire_dojson/hep/rules/bd9xx.py
|
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd9xx.py#L256-L306
|
def references(self, key, value):
"""Populate the ``references`` key."""
def _has_curator_flag(value):
normalized_nine_values = [el.upper() for el in force_list(value.get('9'))]
return 'CURATOR' in normalized_nine_values
def _is_curated(value):
return force_single_element(value.get('z')) == '1' and _has_curator_flag(value)
def _set_record(el):
recid = maybe_int(el)
record = get_record_ref(recid, 'literature')
rb.set_record(record)
rb = ReferenceBuilder()
mapping = [
('0', _set_record),
('a', rb.add_uid),
('b', rb.add_uid),
('c', rb.add_collaboration),
('e', partial(rb.add_author, role='ed.')),
('h', rb.add_refextract_authors_str),
('i', rb.add_uid),
('k', rb.set_texkey),
('m', rb.add_misc),
('o', rb.set_label),
('p', rb.set_publisher),
('q', rb.add_parent_title),
('r', rb.add_report_number),
('s', rb.set_pubnote),
('t', rb.add_title),
('x', rb.add_raw_reference),
('y', rb.set_year),
]
for field, method in mapping:
for el in force_list(value.get(field)):
if el:
method(el)
for el in dedupe_list(force_list(value.get('u'))):
if el:
rb.add_url(el)
if _is_curated(value):
rb.curate()
if _has_curator_flag(value):
rb.obj['legacy_curated'] = True
return rb.obj
|
[
"def",
"references",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"def",
"_has_curator_flag",
"(",
"value",
")",
":",
"normalized_nine_values",
"=",
"[",
"el",
".",
"upper",
"(",
")",
"for",
"el",
"in",
"force_list",
"(",
"value",
".",
"get",
"(",
"'9'",
")",
")",
"]",
"return",
"'CURATOR'",
"in",
"normalized_nine_values",
"def",
"_is_curated",
"(",
"value",
")",
":",
"return",
"force_single_element",
"(",
"value",
".",
"get",
"(",
"'z'",
")",
")",
"==",
"'1'",
"and",
"_has_curator_flag",
"(",
"value",
")",
"def",
"_set_record",
"(",
"el",
")",
":",
"recid",
"=",
"maybe_int",
"(",
"el",
")",
"record",
"=",
"get_record_ref",
"(",
"recid",
",",
"'literature'",
")",
"rb",
".",
"set_record",
"(",
"record",
")",
"rb",
"=",
"ReferenceBuilder",
"(",
")",
"mapping",
"=",
"[",
"(",
"'0'",
",",
"_set_record",
")",
",",
"(",
"'a'",
",",
"rb",
".",
"add_uid",
")",
",",
"(",
"'b'",
",",
"rb",
".",
"add_uid",
")",
",",
"(",
"'c'",
",",
"rb",
".",
"add_collaboration",
")",
",",
"(",
"'e'",
",",
"partial",
"(",
"rb",
".",
"add_author",
",",
"role",
"=",
"'ed.'",
")",
")",
",",
"(",
"'h'",
",",
"rb",
".",
"add_refextract_authors_str",
")",
",",
"(",
"'i'",
",",
"rb",
".",
"add_uid",
")",
",",
"(",
"'k'",
",",
"rb",
".",
"set_texkey",
")",
",",
"(",
"'m'",
",",
"rb",
".",
"add_misc",
")",
",",
"(",
"'o'",
",",
"rb",
".",
"set_label",
")",
",",
"(",
"'p'",
",",
"rb",
".",
"set_publisher",
")",
",",
"(",
"'q'",
",",
"rb",
".",
"add_parent_title",
")",
",",
"(",
"'r'",
",",
"rb",
".",
"add_report_number",
")",
",",
"(",
"'s'",
",",
"rb",
".",
"set_pubnote",
")",
",",
"(",
"'t'",
",",
"rb",
".",
"add_title",
")",
",",
"(",
"'x'",
",",
"rb",
".",
"add_raw_reference",
")",
",",
"(",
"'y'",
",",
"rb",
".",
"set_year",
")",
",",
"]",
"for",
"field",
",",
"method",
"in",
"mapping",
":",
"for",
"el",
"in",
"force_list",
"(",
"value",
".",
"get",
"(",
"field",
")",
")",
":",
"if",
"el",
":",
"method",
"(",
"el",
")",
"for",
"el",
"in",
"dedupe_list",
"(",
"force_list",
"(",
"value",
".",
"get",
"(",
"'u'",
")",
")",
")",
":",
"if",
"el",
":",
"rb",
".",
"add_url",
"(",
"el",
")",
"if",
"_is_curated",
"(",
"value",
")",
":",
"rb",
".",
"curate",
"(",
")",
"if",
"_has_curator_flag",
"(",
"value",
")",
":",
"rb",
".",
"obj",
"[",
"'legacy_curated'",
"]",
"=",
"True",
"return",
"rb",
".",
"obj"
] |
Populate the ``references`` key.
|
[
"Populate",
"the",
"references",
"key",
"."
] |
python
|
train
| 28.039216 |
rsmuc/health_monitoring_plugins
|
health_monitoring_plugins/check_snmp_port/check_snmp_port.py
|
https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/check_snmp_port/check_snmp_port.py#L92-L150
|
def check_tcp(helper, host, port, warning_param, critical_param, session):
"""
the check logic for check TCP ports
"""
# from tcpConnState from TCP-MIB
tcp_translate = {
"1" : "closed",
"2" : "listen",
"3" : "synSent",
"4" : "synReceived",
"5" : "established",
"6" : "finWait1",
"7" : "finWait2",
"8" : "closeWait",
"9" : "lastAck",
"10": "closing",
"11": "timeWait",
"12": "deleteTCB"
}
# collect all open local ports
open_ports = walk_data(session, '.1.3.6.1.2.1.6.13.1.3', helper)[0] #tcpConnLocalPort from TCP-MIB (deprecated)
# collect all status information about the open ports
port_status = walk_data(session, '.1.3.6.1.2.1.6.13.1.1', helper)[0] #tcpConnState from TCP-MIB (deprecated)
# make a dict out of the two lists
port_and_status = dict(zip(open_ports, port_status))
# here we show all open TCP ports and it's status
if scan:
print "All open TCP ports: " + host
for port in open_ports:
tcp_status = port_and_status[port]
tcp_status = tcp_translate[tcp_status]
print "TCP: \t" + port + "\t Status: \t" + tcp_status
quit()
#here we have the real check logic for TCP ports
if port in open_ports:
# if the port is available in the list of open_ports, then extract the status
tcp_status = port_and_status[port]
# translate the status from the integer value to a human readable string
tcp_status = tcp_translate[tcp_status]
# now let's set the status according to the warning / critical "threshold" parameter
if tcp_status in warning_param:
helper.status(warning)
elif tcp_status in critical_param:
helper.status(critical)
else:
helper.status(ok)
else:
# if there is no value in the list => the port is closed for sure
tcp_status = "CLOSED"
helper.status(critical)
return ("Current status for TCP port " + port + " is: " + tcp_status)
|
[
"def",
"check_tcp",
"(",
"helper",
",",
"host",
",",
"port",
",",
"warning_param",
",",
"critical_param",
",",
"session",
")",
":",
"# from tcpConnState from TCP-MIB",
"tcp_translate",
"=",
"{",
"\"1\"",
":",
"\"closed\"",
",",
"\"2\"",
":",
"\"listen\"",
",",
"\"3\"",
":",
"\"synSent\"",
",",
"\"4\"",
":",
"\"synReceived\"",
",",
"\"5\"",
":",
"\"established\"",
",",
"\"6\"",
":",
"\"finWait1\"",
",",
"\"7\"",
":",
"\"finWait2\"",
",",
"\"8\"",
":",
"\"closeWait\"",
",",
"\"9\"",
":",
"\"lastAck\"",
",",
"\"10\"",
":",
"\"closing\"",
",",
"\"11\"",
":",
"\"timeWait\"",
",",
"\"12\"",
":",
"\"deleteTCB\"",
"}",
"# collect all open local ports",
"open_ports",
"=",
"walk_data",
"(",
"session",
",",
"'.1.3.6.1.2.1.6.13.1.3'",
",",
"helper",
")",
"[",
"0",
"]",
"#tcpConnLocalPort from TCP-MIB (deprecated)",
"# collect all status information about the open ports",
"port_status",
"=",
"walk_data",
"(",
"session",
",",
"'.1.3.6.1.2.1.6.13.1.1'",
",",
"helper",
")",
"[",
"0",
"]",
"#tcpConnState from TCP-MIB (deprecated)",
"# make a dict out of the two lists",
"port_and_status",
"=",
"dict",
"(",
"zip",
"(",
"open_ports",
",",
"port_status",
")",
")",
"# here we show all open TCP ports and it's status",
"if",
"scan",
":",
"print",
"\"All open TCP ports: \"",
"+",
"host",
"for",
"port",
"in",
"open_ports",
":",
"tcp_status",
"=",
"port_and_status",
"[",
"port",
"]",
"tcp_status",
"=",
"tcp_translate",
"[",
"tcp_status",
"]",
"print",
"\"TCP: \\t\"",
"+",
"port",
"+",
"\"\\t Status: \\t\"",
"+",
"tcp_status",
"quit",
"(",
")",
"#here we have the real check logic for TCP ports",
"if",
"port",
"in",
"open_ports",
":",
"# if the port is available in the list of open_ports, then extract the status",
"tcp_status",
"=",
"port_and_status",
"[",
"port",
"]",
"# translate the status from the integer value to a human readable string",
"tcp_status",
"=",
"tcp_translate",
"[",
"tcp_status",
"]",
"# now let's set the status according to the warning / critical \"threshold\" parameter ",
"if",
"tcp_status",
"in",
"warning_param",
":",
"helper",
".",
"status",
"(",
"warning",
")",
"elif",
"tcp_status",
"in",
"critical_param",
":",
"helper",
".",
"status",
"(",
"critical",
")",
"else",
":",
"helper",
".",
"status",
"(",
"ok",
")",
"else",
":",
"# if there is no value in the list => the port is closed for sure",
"tcp_status",
"=",
"\"CLOSED\"",
"helper",
".",
"status",
"(",
"critical",
")",
"return",
"(",
"\"Current status for TCP port \"",
"+",
"port",
"+",
"\" is: \"",
"+",
"tcp_status",
")"
] |
the check logic for check TCP ports
|
[
"the",
"check",
"logic",
"for",
"check",
"TCP",
"ports"
] |
python
|
train
| 35.084746 |
hydpy-dev/hydpy
|
hydpy/models/dam/dam_model.py
|
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L2258-L2262
|
def pass_actualremoterelease_v1(self):
"""Update the outlet link sequence |dam_outlets.S|."""
flu = self.sequences.fluxes.fastaccess
out = self.sequences.outlets.fastaccess
out.s[0] += flu.actualremoterelease
|
[
"def",
"pass_actualremoterelease_v1",
"(",
"self",
")",
":",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"out",
"=",
"self",
".",
"sequences",
".",
"outlets",
".",
"fastaccess",
"out",
".",
"s",
"[",
"0",
"]",
"+=",
"flu",
".",
"actualremoterelease"
] |
Update the outlet link sequence |dam_outlets.S|.
|
[
"Update",
"the",
"outlet",
"link",
"sequence",
"|dam_outlets",
".",
"S|",
"."
] |
python
|
train
| 44 |
Parsl/parsl
|
parsl/dataflow/flow_control.py
|
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/flow_control.py#L100-L121
|
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
|
[
"def",
"_wake_up_timer",
"(",
"self",
",",
"kill_event",
")",
":",
"while",
"True",
":",
"prev",
"=",
"self",
".",
"_wake_up_time",
"# Waiting for the event returns True only when the event",
"# is set, usually by the parent thread",
"time_to_die",
"=",
"kill_event",
".",
"wait",
"(",
"float",
"(",
"max",
"(",
"prev",
"-",
"time",
".",
"time",
"(",
")",
",",
"0",
")",
")",
")",
"if",
"time_to_die",
":",
"return",
"if",
"prev",
"==",
"self",
".",
"_wake_up_time",
":",
"self",
".",
"make_callback",
"(",
"kind",
"=",
"'timer'",
")",
"else",
":",
"print",
"(",
"\"Sleeping a bit more\"",
")"
] |
Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
|
[
"Internal",
".",
"This",
"is",
"the",
"function",
"that",
"the",
"thread",
"will",
"execute",
".",
"waits",
"on",
"an",
"event",
"so",
"that",
"the",
"thread",
"can",
"make",
"a",
"quick",
"exit",
"when",
"close",
"()",
"is",
"called"
] |
python
|
valid
| 33.272727 |
dmbee/seglearn
|
seglearn/transform.py
|
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L549-L552
|
def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))])
|
[
"def",
"expand_variables_to_segments",
"(",
"v",
",",
"Nt",
")",
":",
"N_v",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"v",
"[",
"0",
"]",
")",
")",
"return",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"full",
"(",
"(",
"Nt",
"[",
"i",
"]",
",",
"N_v",
")",
",",
"v",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"v",
")",
")",
"]",
")"
] |
expands contextual variables v, by repeating each instance as specified in Nt
|
[
"expands",
"contextual",
"variables",
"v",
"by",
"repeating",
"each",
"instance",
"as",
"specified",
"in",
"Nt"
] |
python
|
train
| 61.5 |
xeroc/python-graphenelib
|
grapheneapi/grapheneapi.py
|
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/grapheneapi/grapheneapi.py#L68-L108
|
def rpcexec(self, payload):
""" Manual execute a command on API (internally used)
param str payload: The payload containing the request
return: Servers answer to the query
rtype: json
raises RPCConnection: if no connction can be made
raises UnauthorizedError: if the user is not authorized
raise ValueError: if the API returns a non-JSON formated answer
It is not recommended to use this method directly, unless
you know what you are doing. All calls available to the API
will be wrapped to methods directly::
info -> grapheneapi.info()
"""
try:
response = requests.post(
"http://{}:{}/rpc".format(self.host, self.port),
data=json.dumps(payload, ensure_ascii=False).encode("utf8"),
headers=self.headers,
auth=(self.username, self.password),
)
if response.status_code == 401:
raise UnauthorizedError
ret = json.loads(response.text)
if "error" in ret:
if "detail" in ret["error"]:
raise RPCError(ret["error"]["detail"])
else:
raise RPCError(ret["error"]["message"])
except requests.exceptions.RequestException:
raise RPCConnection("Error connecting to Client!")
except UnauthorizedError:
raise UnauthorizedError("Invalid login credentials!")
except ValueError:
raise ValueError("Client returned invalid format. Expected JSON!")
except RPCError as err:
raise err
else:
return ret["result"]
|
[
"def",
"rpcexec",
"(",
"self",
",",
"payload",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"\"http://{}:{}/rpc\"",
".",
"format",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"ensure_ascii",
"=",
"False",
")",
".",
"encode",
"(",
"\"utf8\"",
")",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"auth",
"=",
"(",
"self",
".",
"username",
",",
"self",
".",
"password",
")",
",",
")",
"if",
"response",
".",
"status_code",
"==",
"401",
":",
"raise",
"UnauthorizedError",
"ret",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")",
"if",
"\"error\"",
"in",
"ret",
":",
"if",
"\"detail\"",
"in",
"ret",
"[",
"\"error\"",
"]",
":",
"raise",
"RPCError",
"(",
"ret",
"[",
"\"error\"",
"]",
"[",
"\"detail\"",
"]",
")",
"else",
":",
"raise",
"RPCError",
"(",
"ret",
"[",
"\"error\"",
"]",
"[",
"\"message\"",
"]",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"raise",
"RPCConnection",
"(",
"\"Error connecting to Client!\"",
")",
"except",
"UnauthorizedError",
":",
"raise",
"UnauthorizedError",
"(",
"\"Invalid login credentials!\"",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Client returned invalid format. Expected JSON!\"",
")",
"except",
"RPCError",
"as",
"err",
":",
"raise",
"err",
"else",
":",
"return",
"ret",
"[",
"\"result\"",
"]"
] |
Manual execute a command on API (internally used)
param str payload: The payload containing the request
return: Servers answer to the query
rtype: json
raises RPCConnection: if no connction can be made
raises UnauthorizedError: if the user is not authorized
raise ValueError: if the API returns a non-JSON formated answer
It is not recommended to use this method directly, unless
you know what you are doing. All calls available to the API
will be wrapped to methods directly::
info -> grapheneapi.info()
|
[
"Manual",
"execute",
"a",
"command",
"on",
"API",
"(",
"internally",
"used",
")"
] |
python
|
valid
| 41.682927 |
bennylope/django-organizations
|
organizations/backends/modeled.py
|
https://github.com/bennylope/django-organizations/blob/85f753a8f7a8f0f31636c9209fb69e7030a5c79a/organizations/backends/modeled.py#L195-L228
|
def email_message(
self,
recipient, # type: Text
subject_template, # type: Text
body_template, # type: Text
sender=None, # type: Optional[AbstractUser]
message_class=EmailMessage,
**kwargs
):
"""
Returns an invitation email message. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
"""
from_email = "%s %s <%s>" % (
sender.first_name,
sender.last_name,
email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1],
)
reply_to = "%s %s <%s>" % (sender.first_name, sender.last_name, sender.email)
headers = {"Reply-To": reply_to}
kwargs.update({"sender": sender, "recipient": recipient})
subject_template = loader.get_template(subject_template)
body_template = loader.get_template(body_template)
subject = subject_template.render(
kwargs
).strip() # Remove stray newline characters
body = body_template.render(kwargs)
return message_class(subject, body, from_email, [recipient], headers=headers)
|
[
"def",
"email_message",
"(",
"self",
",",
"recipient",
",",
"# type: Text",
"subject_template",
",",
"# type: Text",
"body_template",
",",
"# type: Text",
"sender",
"=",
"None",
",",
"# type: Optional[AbstractUser]",
"message_class",
"=",
"EmailMessage",
",",
"*",
"*",
"kwargs",
")",
":",
"from_email",
"=",
"\"%s %s <%s>\"",
"%",
"(",
"sender",
".",
"first_name",
",",
"sender",
".",
"last_name",
",",
"email",
".",
"utils",
".",
"parseaddr",
"(",
"settings",
".",
"DEFAULT_FROM_EMAIL",
")",
"[",
"1",
"]",
",",
")",
"reply_to",
"=",
"\"%s %s <%s>\"",
"%",
"(",
"sender",
".",
"first_name",
",",
"sender",
".",
"last_name",
",",
"sender",
".",
"email",
")",
"headers",
"=",
"{",
"\"Reply-To\"",
":",
"reply_to",
"}",
"kwargs",
".",
"update",
"(",
"{",
"\"sender\"",
":",
"sender",
",",
"\"recipient\"",
":",
"recipient",
"}",
")",
"subject_template",
"=",
"loader",
".",
"get_template",
"(",
"subject_template",
")",
"body_template",
"=",
"loader",
".",
"get_template",
"(",
"body_template",
")",
"subject",
"=",
"subject_template",
".",
"render",
"(",
"kwargs",
")",
".",
"strip",
"(",
")",
"# Remove stray newline characters",
"body",
"=",
"body_template",
".",
"render",
"(",
"kwargs",
")",
"return",
"message_class",
"(",
"subject",
",",
"body",
",",
"from_email",
",",
"[",
"recipient",
"]",
",",
"headers",
"=",
"headers",
")"
] |
Returns an invitation email message. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
|
[
"Returns",
"an",
"invitation",
"email",
"message",
".",
"This",
"can",
"be",
"easily",
"overridden",
".",
"For",
"instance",
"to",
"send",
"an",
"HTML",
"message",
"use",
"the",
"EmailMultiAlternatives",
"message_class",
"and",
"attach",
"the",
"additional",
"conent",
"."
] |
python
|
train
| 35.264706 |
Duke-GCB/lando-messaging
|
lando_messaging/workqueue.py
|
https://github.com/Duke-GCB/lando-messaging/blob/b90ccc79a874714e0776af8badf505bb2b56c0ec/lando_messaging/workqueue.py#L242-L249
|
def shutdown(self, payload=None):
"""
Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command.
"""
logging.info("Work queue shutdown.")
self.connection.close()
self.receiving_messages = False
|
[
"def",
"shutdown",
"(",
"self",
",",
"payload",
"=",
"None",
")",
":",
"logging",
".",
"info",
"(",
"\"Work queue shutdown.\"",
")",
"self",
".",
"connection",
".",
"close",
"(",
")",
"self",
".",
"receiving_messages",
"=",
"False"
] |
Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command.
|
[
"Close",
"the",
"connection",
"/",
"shutdown",
"the",
"messaging",
"loop",
".",
":",
"param",
"payload",
":",
"None",
":",
"not",
"used",
".",
"Here",
"to",
"allow",
"using",
"this",
"method",
"with",
"add_command",
"."
] |
python
|
train
| 39.375 |
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py#L389-L398
|
def tacacs_server_tacacs_source_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
tacacs_source_ip = ET.SubElement(tacacs_server, "tacacs-source-ip")
tacacs_source_ip.text = kwargs.pop('tacacs_source_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"tacacs_server_tacacs_source_ip",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"tacacs_server",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"tacacs-server\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-aaa\"",
")",
"tacacs_source_ip",
"=",
"ET",
".",
"SubElement",
"(",
"tacacs_server",
",",
"\"tacacs-source-ip\"",
")",
"tacacs_source_ip",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'tacacs_source_ip'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
| 45.8 |
mayfield/shellish
|
shellish/command/supplement.py
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/supplement.py#L329-L337
|
def print_help(self, *args, **kwargs):
""" Add pager support to help output. """
if self._command is not None and self._command.session.allow_pager:
desc = 'Help\: %s' % '-'.join(self.prog.split())
pager_kwargs = self._command.get_pager_spec()
with paging.pager_redirect(desc, **pager_kwargs):
return super().print_help(*args, **kwargs)
else:
return super().print_help(*args, **kwargs)
|
[
"def",
"print_help",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_command",
"is",
"not",
"None",
"and",
"self",
".",
"_command",
".",
"session",
".",
"allow_pager",
":",
"desc",
"=",
"'Help\\: %s'",
"%",
"'-'",
".",
"join",
"(",
"self",
".",
"prog",
".",
"split",
"(",
")",
")",
"pager_kwargs",
"=",
"self",
".",
"_command",
".",
"get_pager_spec",
"(",
")",
"with",
"paging",
".",
"pager_redirect",
"(",
"desc",
",",
"*",
"*",
"pager_kwargs",
")",
":",
"return",
"super",
"(",
")",
".",
"print_help",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"super",
"(",
")",
".",
"print_help",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Add pager support to help output.
|
[
"Add",
"pager",
"support",
"to",
"help",
"output",
"."
] |
python
|
train
| 52.111111 |
pyamg/pyamg
|
pyamg/util/utils.py
|
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L478-L524
|
def to_type(upcast_type, varlist):
"""Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type)
except AttributeError:
warn('Failed to cast in to_type')
pass
return varlist
|
[
"def",
"to_type",
"(",
"upcast_type",
",",
"varlist",
")",
":",
"# convert_type = type(np.array([0], upcast_type)[0])",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"varlist",
")",
")",
":",
"# convert scalars to complex",
"if",
"np",
".",
"isscalar",
"(",
"varlist",
"[",
"i",
"]",
")",
":",
"varlist",
"[",
"i",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"varlist",
"[",
"i",
"]",
"]",
",",
"upcast_type",
")",
"[",
"0",
"]",
"else",
":",
"# convert sparse and dense mats to complex",
"try",
":",
"if",
"varlist",
"[",
"i",
"]",
".",
"dtype",
"!=",
"upcast_type",
":",
"varlist",
"[",
"i",
"]",
"=",
"varlist",
"[",
"i",
"]",
".",
"astype",
"(",
"upcast_type",
")",
"except",
"AttributeError",
":",
"warn",
"(",
"'Failed to cast in to_type'",
")",
"pass",
"return",
"varlist"
] |
Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
|
[
"Loop",
"over",
"all",
"elements",
"of",
"varlist",
"and",
"convert",
"them",
"to",
"upcasttype",
"."
] |
python
|
train
| 28.425532 |
TurboGears/gearbox
|
gearbox/commands/serve.py
|
https://github.com/TurboGears/gearbox/blob/df496ab28050ce6a4cc4c502488f5c5812f2baff/gearbox/commands/serve.py#L624-L634
|
def _turn_sigterm_into_systemexit(): # pragma: no cover
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
|
[
"def",
"_turn_sigterm_into_systemexit",
"(",
")",
":",
"# pragma: no cover",
"try",
":",
"import",
"signal",
"except",
"ImportError",
":",
"return",
"def",
"handle_term",
"(",
"signo",
",",
"frame",
")",
":",
"raise",
"SystemExit",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"handle_term",
")"
] |
Attempts to turn a SIGTERM exception into a SystemExit exception.
|
[
"Attempts",
"to",
"turn",
"a",
"SIGTERM",
"exception",
"into",
"a",
"SystemExit",
"exception",
"."
] |
python
|
train
| 28 |
thanethomson/statik
|
statik/autogen.py
|
https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/autogen.py#L67-L84
|
def generate_model_file(filename, project, model, fields):
"""Creates a webpage for a given instance of a model."""
for field in fields:
field.type = field.__class__.__name__
content = open(os.path.join(os.path.dirname(__file__), 'templates/model_page.html'), 'r').read()
engine = StatikTemplateEngine(project)
template = engine.create_template(content)
# create context and update from project.config
context = {'model': model,
'fields': fields}
context.update(dict(project.config.context_static))
string = template.render(context)
with open(filename, 'w') as file:
file.write(string)
|
[
"def",
"generate_model_file",
"(",
"filename",
",",
"project",
",",
"model",
",",
"fields",
")",
":",
"for",
"field",
"in",
"fields",
":",
"field",
".",
"type",
"=",
"field",
".",
"__class__",
".",
"__name__",
"content",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'templates/model_page.html'",
")",
",",
"'r'",
")",
".",
"read",
"(",
")",
"engine",
"=",
"StatikTemplateEngine",
"(",
"project",
")",
"template",
"=",
"engine",
".",
"create_template",
"(",
"content",
")",
"# create context and update from project.config",
"context",
"=",
"{",
"'model'",
":",
"model",
",",
"'fields'",
":",
"fields",
"}",
"context",
".",
"update",
"(",
"dict",
"(",
"project",
".",
"config",
".",
"context_static",
")",
")",
"string",
"=",
"template",
".",
"render",
"(",
"context",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"string",
")"
] |
Creates a webpage for a given instance of a model.
|
[
"Creates",
"a",
"webpage",
"for",
"a",
"given",
"instance",
"of",
"a",
"model",
"."
] |
python
|
train
| 35.777778 |
gwpy/gwpy
|
gwpy/types/series.py
|
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L163-L207
|
def _update_index(self, axis, key, value):
"""Update the current axis index based on a given key or value
This is an internal method designed to set the origin or step for
an index, whilst updating existing Index arrays as appropriate
Examples
--------
>>> self._update_index("x0", 0)
>>> self._update_index("dx", 0)
To actually set an index array, use `_set_index`
"""
# delete current value if given None
if value is None:
return delattr(self, key)
_key = "_{}".format(key)
index = "{[0]}index".format(axis)
unit = "{[0]}unit".format(axis)
# convert float to Quantity
if not isinstance(value, Quantity):
try:
value = Quantity(value, getattr(self, unit))
except TypeError:
value = Quantity(float(value), getattr(self, unit))
# if value is changing, delete current index
try:
curr = getattr(self, _key)
except AttributeError:
delattr(self, index)
else:
if (
value is None or
getattr(self, key) is None or
not value.unit.is_equivalent(curr.unit) or
value != curr
):
delattr(self, index)
# set new value
setattr(self, _key, value)
return value
|
[
"def",
"_update_index",
"(",
"self",
",",
"axis",
",",
"key",
",",
"value",
")",
":",
"# delete current value if given None",
"if",
"value",
"is",
"None",
":",
"return",
"delattr",
"(",
"self",
",",
"key",
")",
"_key",
"=",
"\"_{}\"",
".",
"format",
"(",
"key",
")",
"index",
"=",
"\"{[0]}index\"",
".",
"format",
"(",
"axis",
")",
"unit",
"=",
"\"{[0]}unit\"",
".",
"format",
"(",
"axis",
")",
"# convert float to Quantity",
"if",
"not",
"isinstance",
"(",
"value",
",",
"Quantity",
")",
":",
"try",
":",
"value",
"=",
"Quantity",
"(",
"value",
",",
"getattr",
"(",
"self",
",",
"unit",
")",
")",
"except",
"TypeError",
":",
"value",
"=",
"Quantity",
"(",
"float",
"(",
"value",
")",
",",
"getattr",
"(",
"self",
",",
"unit",
")",
")",
"# if value is changing, delete current index",
"try",
":",
"curr",
"=",
"getattr",
"(",
"self",
",",
"_key",
")",
"except",
"AttributeError",
":",
"delattr",
"(",
"self",
",",
"index",
")",
"else",
":",
"if",
"(",
"value",
"is",
"None",
"or",
"getattr",
"(",
"self",
",",
"key",
")",
"is",
"None",
"or",
"not",
"value",
".",
"unit",
".",
"is_equivalent",
"(",
"curr",
".",
"unit",
")",
"or",
"value",
"!=",
"curr",
")",
":",
"delattr",
"(",
"self",
",",
"index",
")",
"# set new value",
"setattr",
"(",
"self",
",",
"_key",
",",
"value",
")",
"return",
"value"
] |
Update the current axis index based on a given key or value
This is an internal method designed to set the origin or step for
an index, whilst updating existing Index arrays as appropriate
Examples
--------
>>> self._update_index("x0", 0)
>>> self._update_index("dx", 0)
To actually set an index array, use `_set_index`
|
[
"Update",
"the",
"current",
"axis",
"index",
"based",
"on",
"a",
"given",
"key",
"or",
"value"
] |
python
|
train
| 31.111111 |
ray-project/ray
|
python/ray/actor.py
|
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L631-L672
|
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
if state["ray_forking"]:
actor_handle_id = state["actor_handle_id"]
else:
# Right now, if the actor handle has been pickled, we create a
# temporary actor handle id for invocations.
# TODO(pcm): This still leads to a lot of actor handles being
# created, there should be a better way to handle pickled
# actor handles.
# TODO(swang): Accessing the worker's current task ID is not
# thread-safe.
# TODO(swang): Unpickling the same actor handle twice in the same
# task will break the application, and unpickling it twice in the
# same actor is likely a performance bug. We should consider
# logging a warning in these cases.
actor_handle_id = compute_actor_handle_id_non_forked(
state["actor_handle_id"], worker.current_task_id)
self.__init__(
state["actor_id"],
state["module_name"],
state["class_name"],
state["actor_cursor"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
state["actor_creation_dummy_object_id"],
state["actor_method_cpus"],
# This is the driver ID of the driver that owns the actor, not
# necessarily the driver that owns this actor handle.
state["actor_driver_id"],
actor_handle_id=actor_handle_id)
|
[
"def",
"_deserialization_helper",
"(",
"self",
",",
"state",
",",
"ray_forking",
")",
":",
"worker",
"=",
"ray",
".",
"worker",
".",
"get_global_worker",
"(",
")",
"worker",
".",
"check_connected",
"(",
")",
"if",
"state",
"[",
"\"ray_forking\"",
"]",
":",
"actor_handle_id",
"=",
"state",
"[",
"\"actor_handle_id\"",
"]",
"else",
":",
"# Right now, if the actor handle has been pickled, we create a",
"# temporary actor handle id for invocations.",
"# TODO(pcm): This still leads to a lot of actor handles being",
"# created, there should be a better way to handle pickled",
"# actor handles.",
"# TODO(swang): Accessing the worker's current task ID is not",
"# thread-safe.",
"# TODO(swang): Unpickling the same actor handle twice in the same",
"# task will break the application, and unpickling it twice in the",
"# same actor is likely a performance bug. We should consider",
"# logging a warning in these cases.",
"actor_handle_id",
"=",
"compute_actor_handle_id_non_forked",
"(",
"state",
"[",
"\"actor_handle_id\"",
"]",
",",
"worker",
".",
"current_task_id",
")",
"self",
".",
"__init__",
"(",
"state",
"[",
"\"actor_id\"",
"]",
",",
"state",
"[",
"\"module_name\"",
"]",
",",
"state",
"[",
"\"class_name\"",
"]",
",",
"state",
"[",
"\"actor_cursor\"",
"]",
",",
"state",
"[",
"\"actor_method_names\"",
"]",
",",
"state",
"[",
"\"method_signatures\"",
"]",
",",
"state",
"[",
"\"method_num_return_vals\"",
"]",
",",
"state",
"[",
"\"actor_creation_dummy_object_id\"",
"]",
",",
"state",
"[",
"\"actor_method_cpus\"",
"]",
",",
"# This is the driver ID of the driver that owns the actor, not",
"# necessarily the driver that owns this actor handle.",
"state",
"[",
"\"actor_driver_id\"",
"]",
",",
"actor_handle_id",
"=",
"actor_handle_id",
")"
] |
This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
|
[
"This",
"is",
"defined",
"in",
"order",
"to",
"make",
"pickling",
"work",
"."
] |
python
|
train
| 45.547619 |
juju/charm-helpers
|
charmhelpers/fetch/ubuntu.py
|
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/ubuntu.py#L440-L450
|
def _write_apt_gpg_keyfile(key_name, key_material):
"""Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
"""
with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
'wb') as keyf:
keyf.write(key_material)
|
[
"def",
"_write_apt_gpg_keyfile",
"(",
"key_name",
",",
"key_material",
")",
":",
"with",
"open",
"(",
"'/etc/apt/trusted.gpg.d/{}.gpg'",
".",
"format",
"(",
"key_name",
")",
",",
"'wb'",
")",
"as",
"keyf",
":",
"keyf",
".",
"write",
"(",
"key_material",
")"
] |
Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
|
[
"Writes",
"GPG",
"key",
"material",
"into",
"a",
"file",
"at",
"a",
"provided",
"path",
"."
] |
python
|
train
| 39.272727 |
moonso/vcftoolbox
|
vcftoolbox/parse_variant.py
|
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/parse_variant.py#L113-L135
|
def get_snpeff_info(snpeff_string, snpeff_header):
"""Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
"""
snpeff_annotations = [
dict(zip(snpeff_header, snpeff_annotation.split('|')))
for snpeff_annotation in snpeff_string.split(',')
]
return snpeff_annotations
|
[
"def",
"get_snpeff_info",
"(",
"snpeff_string",
",",
"snpeff_header",
")",
":",
"snpeff_annotations",
"=",
"[",
"dict",
"(",
"zip",
"(",
"snpeff_header",
",",
"snpeff_annotation",
".",
"split",
"(",
"'|'",
")",
")",
")",
"for",
"snpeff_annotation",
"in",
"snpeff_string",
".",
"split",
"(",
"','",
")",
"]",
"return",
"snpeff_annotations"
] |
Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
|
[
"Make",
"the",
"vep",
"annotations",
"into",
"a",
"dictionaries",
"A",
"snpeff",
"dictionary",
"will",
"have",
"the",
"snpeff",
"column",
"names",
"as",
"keys",
"and",
"the",
"vep",
"annotations",
"as",
"values",
".",
"The",
"dictionaries",
"are",
"stored",
"in",
"a",
"list",
".",
"One",
"dictionary",
"for",
"each",
"transcript",
"."
] |
python
|
train
| 31.652174 |
linuxsoftware/ls.joyous
|
ls/joyous/utils/telltime.py
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/utils/telltime.py#L36-L50
|
def getLocalDatetime(date, time, tz=None, timeDefault=dt.time.max):
"""
Get a datetime in the local timezone from date and optionally time
"""
localTZ = timezone.get_current_timezone()
if tz is None or tz == localTZ:
localDt = getAwareDatetime(date, time, tz, timeDefault)
else:
# create in event's time zone
eventDt = getAwareDatetime(date, time, tz, timeDefault)
# convert to local time zone
localDt = eventDt.astimezone(localTZ)
if time is None:
localDt = getAwareDatetime(localDt.date(), None, localTZ, timeDefault)
return localDt
|
[
"def",
"getLocalDatetime",
"(",
"date",
",",
"time",
",",
"tz",
"=",
"None",
",",
"timeDefault",
"=",
"dt",
".",
"time",
".",
"max",
")",
":",
"localTZ",
"=",
"timezone",
".",
"get_current_timezone",
"(",
")",
"if",
"tz",
"is",
"None",
"or",
"tz",
"==",
"localTZ",
":",
"localDt",
"=",
"getAwareDatetime",
"(",
"date",
",",
"time",
",",
"tz",
",",
"timeDefault",
")",
"else",
":",
"# create in event's time zone",
"eventDt",
"=",
"getAwareDatetime",
"(",
"date",
",",
"time",
",",
"tz",
",",
"timeDefault",
")",
"# convert to local time zone",
"localDt",
"=",
"eventDt",
".",
"astimezone",
"(",
"localTZ",
")",
"if",
"time",
"is",
"None",
":",
"localDt",
"=",
"getAwareDatetime",
"(",
"localDt",
".",
"date",
"(",
")",
",",
"None",
",",
"localTZ",
",",
"timeDefault",
")",
"return",
"localDt"
] |
Get a datetime in the local timezone from date and optionally time
|
[
"Get",
"a",
"datetime",
"in",
"the",
"local",
"timezone",
"from",
"date",
"and",
"optionally",
"time"
] |
python
|
train
| 40.533333 |
pantsbuild/pants
|
src/python/pants/backend/jvm/tasks/jvmdoc_gen.py
|
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jvmdoc_gen.py#L82-L140
|
def generate_doc(self, language_predicate, create_jvmdoc_command):
"""
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
"""
catalog = self.context.products.isrequired(self.jvmdoc().product_type)
if catalog and self.combined:
raise TaskError(
'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type))
def docable(target):
if not language_predicate(target):
self.context.log.debug('Skipping [{}] because it is does not pass the language predicate'.format(target.address.spec))
return False
if not self._include_codegen and target.is_synthetic:
self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec))
return False
for pattern in self._exclude_patterns:
if pattern.search(target.address.spec):
self.context.log.debug(
"Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern))
return False
return True
targets = self.get_targets(predicate=docable)
if not targets:
return
with self.invalidated(targets, invalidate_dependents=self.combined) as invalidation_check:
def find_invalid_targets():
invalid_targets = set()
for vt in invalidation_check.invalid_vts:
invalid_targets.update(vt.targets)
return invalid_targets
invalid_targets = list(find_invalid_targets())
if invalid_targets:
if self.combined:
self._generate_combined(targets, create_jvmdoc_command)
else:
self._generate_individual(invalid_targets, create_jvmdoc_command)
if self.open and self.combined:
try:
desktop.ui_open(os.path.join(self.workdir, 'combined', 'index.html'))
except desktop.OpenError as e:
raise TaskError(e)
if catalog:
for target in targets:
gendir = self._gendir(target)
jvmdocs = []
for root, dirs, files in safe_walk(gendir):
jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs)
|
[
"def",
"generate_doc",
"(",
"self",
",",
"language_predicate",
",",
"create_jvmdoc_command",
")",
":",
"catalog",
"=",
"self",
".",
"context",
".",
"products",
".",
"isrequired",
"(",
"self",
".",
"jvmdoc",
"(",
")",
".",
"product_type",
")",
"if",
"catalog",
"and",
"self",
".",
"combined",
":",
"raise",
"TaskError",
"(",
"'Cannot provide {} target mappings for combined output'",
".",
"format",
"(",
"self",
".",
"jvmdoc",
"(",
")",
".",
"product_type",
")",
")",
"def",
"docable",
"(",
"target",
")",
":",
"if",
"not",
"language_predicate",
"(",
"target",
")",
":",
"self",
".",
"context",
".",
"log",
".",
"debug",
"(",
"'Skipping [{}] because it is does not pass the language predicate'",
".",
"format",
"(",
"target",
".",
"address",
".",
"spec",
")",
")",
"return",
"False",
"if",
"not",
"self",
".",
"_include_codegen",
"and",
"target",
".",
"is_synthetic",
":",
"self",
".",
"context",
".",
"log",
".",
"debug",
"(",
"'Skipping [{}] because it is a synthetic target'",
".",
"format",
"(",
"target",
".",
"address",
".",
"spec",
")",
")",
"return",
"False",
"for",
"pattern",
"in",
"self",
".",
"_exclude_patterns",
":",
"if",
"pattern",
".",
"search",
"(",
"target",
".",
"address",
".",
"spec",
")",
":",
"self",
".",
"context",
".",
"log",
".",
"debug",
"(",
"\"Skipping [{}] because it matches exclude pattern '{}'\"",
".",
"format",
"(",
"target",
".",
"address",
".",
"spec",
",",
"pattern",
".",
"pattern",
")",
")",
"return",
"False",
"return",
"True",
"targets",
"=",
"self",
".",
"get_targets",
"(",
"predicate",
"=",
"docable",
")",
"if",
"not",
"targets",
":",
"return",
"with",
"self",
".",
"invalidated",
"(",
"targets",
",",
"invalidate_dependents",
"=",
"self",
".",
"combined",
")",
"as",
"invalidation_check",
":",
"def",
"find_invalid_targets",
"(",
")",
":",
"invalid_targets",
"=",
"set",
"(",
")",
"for",
"vt",
"in",
"invalidation_check",
".",
"invalid_vts",
":",
"invalid_targets",
".",
"update",
"(",
"vt",
".",
"targets",
")",
"return",
"invalid_targets",
"invalid_targets",
"=",
"list",
"(",
"find_invalid_targets",
"(",
")",
")",
"if",
"invalid_targets",
":",
"if",
"self",
".",
"combined",
":",
"self",
".",
"_generate_combined",
"(",
"targets",
",",
"create_jvmdoc_command",
")",
"else",
":",
"self",
".",
"_generate_individual",
"(",
"invalid_targets",
",",
"create_jvmdoc_command",
")",
"if",
"self",
".",
"open",
"and",
"self",
".",
"combined",
":",
"try",
":",
"desktop",
".",
"ui_open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"'combined'",
",",
"'index.html'",
")",
")",
"except",
"desktop",
".",
"OpenError",
"as",
"e",
":",
"raise",
"TaskError",
"(",
"e",
")",
"if",
"catalog",
":",
"for",
"target",
"in",
"targets",
":",
"gendir",
"=",
"self",
".",
"_gendir",
"(",
"target",
")",
"jvmdocs",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"safe_walk",
"(",
"gendir",
")",
":",
"jvmdocs",
".",
"extend",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
",",
"gendir",
")",
"for",
"f",
"in",
"files",
")",
"self",
".",
"context",
".",
"products",
".",
"get",
"(",
"self",
".",
"jvmdoc",
"(",
")",
".",
"product_type",
")",
".",
"add",
"(",
"target",
",",
"gendir",
",",
"jvmdocs",
")"
] |
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
|
[
"Generate",
"an",
"execute",
"method",
"given",
"a",
"language",
"predicate",
"and",
"command",
"to",
"create",
"documentation"
] |
python
|
train
| 42.338983 |
phaethon/kamene
|
kamene/plist.py
|
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/plist.py#L133-L136
|
def filter(self, func):
"""Returns a packet list filtered by a truth function"""
return self.__class__(list(filter(func,self.res)),
name="filtered %s"%self.listname)
|
[
"def",
"filter",
"(",
"self",
",",
"func",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"list",
"(",
"filter",
"(",
"func",
",",
"self",
".",
"res",
")",
")",
",",
"name",
"=",
"\"filtered %s\"",
"%",
"self",
".",
"listname",
")"
] |
Returns a packet list filtered by a truth function
|
[
"Returns",
"a",
"packet",
"list",
"filtered",
"by",
"a",
"truth",
"function"
] |
python
|
train
| 52 |
rwl/pylon
|
pylon/case.py
|
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/case.py#L988-L992
|
def save_dot(self, fd):
""" Saves a representation of the case in the Graphviz DOT language.
"""
from pylon.io import DotWriter
DotWriter(self).write(fd)
|
[
"def",
"save_dot",
"(",
"self",
",",
"fd",
")",
":",
"from",
"pylon",
".",
"io",
"import",
"DotWriter",
"DotWriter",
"(",
"self",
")",
".",
"write",
"(",
"fd",
")"
] |
Saves a representation of the case in the Graphviz DOT language.
|
[
"Saves",
"a",
"representation",
"of",
"the",
"case",
"in",
"the",
"Graphviz",
"DOT",
"language",
"."
] |
python
|
train
| 36.2 |
mbj4668/pyang
|
pyang/translators/schemanode.py
|
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/schemanode.py#L176-L182
|
def end_tag(self, alt=None):
"""Return XML end tag for the receiver."""
if alt:
name = alt
else:
name = self.name
return "</" + name + ">"
|
[
"def",
"end_tag",
"(",
"self",
",",
"alt",
"=",
"None",
")",
":",
"if",
"alt",
":",
"name",
"=",
"alt",
"else",
":",
"name",
"=",
"self",
".",
"name",
"return",
"\"</\"",
"+",
"name",
"+",
"\">\""
] |
Return XML end tag for the receiver.
|
[
"Return",
"XML",
"end",
"tag",
"for",
"the",
"receiver",
"."
] |
python
|
train
| 26.857143 |
saltstack/salt
|
salt/pillar/sqlite3.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/sqlite3.py#L110-L117
|
def ext_pillar(minion_id,
pillar,
*args,
**kwargs):
'''
Execute queries against SQLite3, merge and return as a dict
'''
return SQLite3ExtPillar().fetch(minion_id, pillar, *args, **kwargs)
|
[
"def",
"ext_pillar",
"(",
"minion_id",
",",
"pillar",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"SQLite3ExtPillar",
"(",
")",
".",
"fetch",
"(",
"minion_id",
",",
"pillar",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Execute queries against SQLite3, merge and return as a dict
|
[
"Execute",
"queries",
"against",
"SQLite3",
"merge",
"and",
"return",
"as",
"a",
"dict"
] |
python
|
train
| 30.125 |
mardix/Mocha
|
mocha/contrib/auth/__init__.py
|
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/__init__.py#L367-L382
|
def change_email(self, email):
"""
Change user's login email
:param user: AuthUser
:param email:
:return:
"""
def cb():
if not utils.is_email_valid(email):
raise exceptions.AuthError("Email address invalid")
self.user.change_email(email)
return email
return signals.user_update(self, ACTIONS["EMAIL"], cb,
{"email": self.email})
|
[
"def",
"change_email",
"(",
"self",
",",
"email",
")",
":",
"def",
"cb",
"(",
")",
":",
"if",
"not",
"utils",
".",
"is_email_valid",
"(",
"email",
")",
":",
"raise",
"exceptions",
".",
"AuthError",
"(",
"\"Email address invalid\"",
")",
"self",
".",
"user",
".",
"change_email",
"(",
"email",
")",
"return",
"email",
"return",
"signals",
".",
"user_update",
"(",
"self",
",",
"ACTIONS",
"[",
"\"EMAIL\"",
"]",
",",
"cb",
",",
"{",
"\"email\"",
":",
"self",
".",
"email",
"}",
")"
] |
Change user's login email
:param user: AuthUser
:param email:
:return:
|
[
"Change",
"user",
"s",
"login",
"email",
":",
"param",
"user",
":",
"AuthUser",
":",
"param",
"email",
":",
":",
"return",
":"
] |
python
|
train
| 29.125 |
quintusdias/glymur
|
glymur/jp2k.py
|
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/jp2k.py#L308-L330
|
def _validate(self):
"""Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level.
"""
# A JP2 file must contain certain boxes. The 2nd box must be a file
# type box.
if not isinstance(self.box[1], FileTypeBox):
msg = "{filename} does not contain a valid File Type box."
msg = msg.format(filename=self.filename)
raise IOError(msg)
# A jp2-branded file cannot contain an "any ICC profile
ftyp = self.box[1]
if ftyp.brand == 'jp2 ':
jp2h = [box for box in self.box if box.box_id == 'jp2h'][0]
colrs = [box for box in jp2h.box if box.box_id == 'colr']
for colr in colrs:
if colr.method not in (core.ENUMERATED_COLORSPACE,
core.RESTRICTED_ICC_PROFILE):
msg = ("Color Specification box method must specify "
"either an enumerated colorspace or a restricted "
"ICC profile if the file type box brand is 'jp2 '.")
warnings.warn(msg, UserWarning)
|
[
"def",
"_validate",
"(",
"self",
")",
":",
"# A JP2 file must contain certain boxes. The 2nd box must be a file",
"# type box.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"box",
"[",
"1",
"]",
",",
"FileTypeBox",
")",
":",
"msg",
"=",
"\"{filename} does not contain a valid File Type box.\"",
"msg",
"=",
"msg",
".",
"format",
"(",
"filename",
"=",
"self",
".",
"filename",
")",
"raise",
"IOError",
"(",
"msg",
")",
"# A jp2-branded file cannot contain an \"any ICC profile",
"ftyp",
"=",
"self",
".",
"box",
"[",
"1",
"]",
"if",
"ftyp",
".",
"brand",
"==",
"'jp2 '",
":",
"jp2h",
"=",
"[",
"box",
"for",
"box",
"in",
"self",
".",
"box",
"if",
"box",
".",
"box_id",
"==",
"'jp2h'",
"]",
"[",
"0",
"]",
"colrs",
"=",
"[",
"box",
"for",
"box",
"in",
"jp2h",
".",
"box",
"if",
"box",
".",
"box_id",
"==",
"'colr'",
"]",
"for",
"colr",
"in",
"colrs",
":",
"if",
"colr",
".",
"method",
"not",
"in",
"(",
"core",
".",
"ENUMERATED_COLORSPACE",
",",
"core",
".",
"RESTRICTED_ICC_PROFILE",
")",
":",
"msg",
"=",
"(",
"\"Color Specification box method must specify \"",
"\"either an enumerated colorspace or a restricted \"",
"\"ICC profile if the file type box brand is 'jp2 '.\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"UserWarning",
")"
] |
Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level.
|
[
"Validate",
"the",
"JPEG",
"2000",
"outermost",
"superbox",
".",
"These",
"checks",
"must",
"be",
"done",
"at",
"a",
"file",
"level",
"."
] |
python
|
train
| 49.521739 |
openatx/facebook-wda
|
wda/__init__.py
|
https://github.com/openatx/facebook-wda/blob/aa644204620c6d5c7705a9c7452d8c0cc39330d5/wda/__init__.py#L786-L795
|
def find_elements(self):
"""
Returns:
Element (list): all the elements
"""
es = []
for element_id in self.find_element_ids():
e = Element(self.http.new_client(''), element_id)
es.append(e)
return es
|
[
"def",
"find_elements",
"(",
"self",
")",
":",
"es",
"=",
"[",
"]",
"for",
"element_id",
"in",
"self",
".",
"find_element_ids",
"(",
")",
":",
"e",
"=",
"Element",
"(",
"self",
".",
"http",
".",
"new_client",
"(",
"''",
")",
",",
"element_id",
")",
"es",
".",
"append",
"(",
"e",
")",
"return",
"es"
] |
Returns:
Element (list): all the elements
|
[
"Returns",
":",
"Element",
"(",
"list",
")",
":",
"all",
"the",
"elements"
] |
python
|
train
| 27.3 |
ella/ella
|
ella/core/models/main.py
|
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/models/main.py#L129-L145
|
def save(self, **kwargs):
"Override save() to construct tree_path based on the category's parent."
old_tree_path = self.tree_path
if self.tree_parent:
if self.tree_parent.tree_path:
self.tree_path = '%s/%s' % (self.tree_parent.tree_path, self.slug)
else:
self.tree_path = self.slug
else:
self.tree_path = ''
Category.objects.clear_cache()
super(Category, self).save(**kwargs)
if old_tree_path != self.tree_path:
# the tree_path has changed, update children
children = Category.objects.filter(tree_parent=self)
for child in children:
child.save(force_update=True)
|
[
"def",
"save",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"old_tree_path",
"=",
"self",
".",
"tree_path",
"if",
"self",
".",
"tree_parent",
":",
"if",
"self",
".",
"tree_parent",
".",
"tree_path",
":",
"self",
".",
"tree_path",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"tree_parent",
".",
"tree_path",
",",
"self",
".",
"slug",
")",
"else",
":",
"self",
".",
"tree_path",
"=",
"self",
".",
"slug",
"else",
":",
"self",
".",
"tree_path",
"=",
"''",
"Category",
".",
"objects",
".",
"clear_cache",
"(",
")",
"super",
"(",
"Category",
",",
"self",
")",
".",
"save",
"(",
"*",
"*",
"kwargs",
")",
"if",
"old_tree_path",
"!=",
"self",
".",
"tree_path",
":",
"# the tree_path has changed, update children",
"children",
"=",
"Category",
".",
"objects",
".",
"filter",
"(",
"tree_parent",
"=",
"self",
")",
"for",
"child",
"in",
"children",
":",
"child",
".",
"save",
"(",
"force_update",
"=",
"True",
")"
] |
Override save() to construct tree_path based on the category's parent.
|
[
"Override",
"save",
"()",
"to",
"construct",
"tree_path",
"based",
"on",
"the",
"category",
"s",
"parent",
"."
] |
python
|
train
| 42.470588 |
JoseAntFer/pyny3d
|
pyny3d/geoms.py
|
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L434-L474
|
def get_height(self, points, only_in = True, edge=True, full=False):
"""
Given a set of points, it computes the z value for the
parametric equation of the plane where the polygon belongs.
Only the two first columns of the points will be taken into
account as x and y.
By default, the points outside the object will have a NaN value
in the z column. If the inputed points has a third column the z
values outside the Surface's domain will remain unchanged, the
rest will be replaced.
:param points: Coordinates of the points to calculate.
:type points: ndarray shape=(N, 2 or 3)
:param only_in: If True, computes only the points which are
inside of the Polygon.
:type only_in: bool
:param edge: If True, consider the points in the Polygon's edge
inside the Polygon.
:type edge: bool
:param full: If true, the return will have three columns
(x, y, z) instead of one (z).
:type full: bool
:returns: (z) or (x, y, z)
:rtype: ndarray shape=(N, 1 or 3)
"""
p = self.get_parametric()
z = (-p[0]*points[:, 0]-p[1]*points[:, 1]-p[3])/p[2]
if only_in:
pip = self.contains(points, edge=edge)
z[pip == False] *= np.nan
if full:
z = np.hstack((points[:, :2],
np.reshape(z, (points.shape[0], 1))))
if points.shape[1] == 3: # Restore original z
z[pip == False] = points[pip == False]
return z
|
[
"def",
"get_height",
"(",
"self",
",",
"points",
",",
"only_in",
"=",
"True",
",",
"edge",
"=",
"True",
",",
"full",
"=",
"False",
")",
":",
"p",
"=",
"self",
".",
"get_parametric",
"(",
")",
"z",
"=",
"(",
"-",
"p",
"[",
"0",
"]",
"*",
"points",
"[",
":",
",",
"0",
"]",
"-",
"p",
"[",
"1",
"]",
"*",
"points",
"[",
":",
",",
"1",
"]",
"-",
"p",
"[",
"3",
"]",
")",
"/",
"p",
"[",
"2",
"]",
"if",
"only_in",
":",
"pip",
"=",
"self",
".",
"contains",
"(",
"points",
",",
"edge",
"=",
"edge",
")",
"z",
"[",
"pip",
"==",
"False",
"]",
"*=",
"np",
".",
"nan",
"if",
"full",
":",
"z",
"=",
"np",
".",
"hstack",
"(",
"(",
"points",
"[",
":",
",",
":",
"2",
"]",
",",
"np",
".",
"reshape",
"(",
"z",
",",
"(",
"points",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
")",
")",
"if",
"points",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
":",
"# Restore original z\r",
"z",
"[",
"pip",
"==",
"False",
"]",
"=",
"points",
"[",
"pip",
"==",
"False",
"]",
"return",
"z"
] |
Given a set of points, it computes the z value for the
parametric equation of the plane where the polygon belongs.
Only the two first columns of the points will be taken into
account as x and y.
By default, the points outside the object will have a NaN value
in the z column. If the inputed points has a third column the z
values outside the Surface's domain will remain unchanged, the
rest will be replaced.
:param points: Coordinates of the points to calculate.
:type points: ndarray shape=(N, 2 or 3)
:param only_in: If True, computes only the points which are
inside of the Polygon.
:type only_in: bool
:param edge: If True, consider the points in the Polygon's edge
inside the Polygon.
:type edge: bool
:param full: If true, the return will have three columns
(x, y, z) instead of one (z).
:type full: bool
:returns: (z) or (x, y, z)
:rtype: ndarray shape=(N, 1 or 3)
|
[
"Given",
"a",
"set",
"of",
"points",
"it",
"computes",
"the",
"z",
"value",
"for",
"the",
"parametric",
"equation",
"of",
"the",
"plane",
"where",
"the",
"polygon",
"belongs",
".",
"Only",
"the",
"two",
"first",
"columns",
"of",
"the",
"points",
"will",
"be",
"taken",
"into",
"account",
"as",
"x",
"and",
"y",
".",
"By",
"default",
"the",
"points",
"outside",
"the",
"object",
"will",
"have",
"a",
"NaN",
"value",
"in",
"the",
"z",
"column",
".",
"If",
"the",
"inputed",
"points",
"has",
"a",
"third",
"column",
"the",
"z",
"values",
"outside",
"the",
"Surface",
"s",
"domain",
"will",
"remain",
"unchanged",
"the",
"rest",
"will",
"be",
"replaced",
".",
":",
"param",
"points",
":",
"Coordinates",
"of",
"the",
"points",
"to",
"calculate",
".",
":",
"type",
"points",
":",
"ndarray",
"shape",
"=",
"(",
"N",
"2",
"or",
"3",
")",
":",
"param",
"only_in",
":",
"If",
"True",
"computes",
"only",
"the",
"points",
"which",
"are",
"inside",
"of",
"the",
"Polygon",
".",
":",
"type",
"only_in",
":",
"bool",
":",
"param",
"edge",
":",
"If",
"True",
"consider",
"the",
"points",
"in",
"the",
"Polygon",
"s",
"edge",
"inside",
"the",
"Polygon",
".",
":",
"type",
"edge",
":",
"bool",
":",
"param",
"full",
":",
"If",
"true",
"the",
"return",
"will",
"have",
"three",
"columns",
"(",
"x",
"y",
"z",
")",
"instead",
"of",
"one",
"(",
"z",
")",
".",
":",
"type",
"full",
":",
"bool",
":",
"returns",
":",
"(",
"z",
")",
"or",
"(",
"x",
"y",
"z",
")",
":",
"rtype",
":",
"ndarray",
"shape",
"=",
"(",
"N",
"1",
"or",
"3",
")"
] |
python
|
train
| 40.560976 |
ThreatConnect-Inc/tcex
|
tcex/tcex.py
|
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex.py#L63-L86
|
def _association_types(self):
"""Retrieve Custom Indicator Associations types from the ThreatConnect API."""
# Dynamically create custom indicator class
r = self.session.get('/v2/types/associationTypes')
# check for bad status code and response that is not JSON
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
warn = u'Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
# validate successful API results
data = r.json()
if data.get('status') != 'Success':
warn = u'Bad Status: Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
try:
# Association Type Name is not a unique value at this time, but should be.
for association in data.get('data', {}).get('associationType', []):
self._indicator_associations_types_data[association.get('name')] = association
except Exception as e:
self.handle_error(200, [e])
|
[
"def",
"_association_types",
"(",
"self",
")",
":",
"# Dynamically create custom indicator class",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"'/v2/types/associationTypes'",
")",
"# check for bad status code and response that is not JSON",
"if",
"not",
"r",
".",
"ok",
"or",
"'application/json'",
"not",
"in",
"r",
".",
"headers",
".",
"get",
"(",
"'content-type'",
",",
"''",
")",
":",
"warn",
"=",
"u'Custom Indicators Associations are not supported.'",
"self",
".",
"log",
".",
"warning",
"(",
"warn",
")",
"return",
"# validate successful API results",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"data",
".",
"get",
"(",
"'status'",
")",
"!=",
"'Success'",
":",
"warn",
"=",
"u'Bad Status: Custom Indicators Associations are not supported.'",
"self",
".",
"log",
".",
"warning",
"(",
"warn",
")",
"return",
"try",
":",
"# Association Type Name is not a unique value at this time, but should be.",
"for",
"association",
"in",
"data",
".",
"get",
"(",
"'data'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'associationType'",
",",
"[",
"]",
")",
":",
"self",
".",
"_indicator_associations_types_data",
"[",
"association",
".",
"get",
"(",
"'name'",
")",
"]",
"=",
"association",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"handle_error",
"(",
"200",
",",
"[",
"e",
"]",
")"
] |
Retrieve Custom Indicator Associations types from the ThreatConnect API.
|
[
"Retrieve",
"Custom",
"Indicator",
"Associations",
"types",
"from",
"the",
"ThreatConnect",
"API",
"."
] |
python
|
train
| 44.875 |
PiotrDabkowski/Js2Py
|
js2py/legecy_translators/utils.py
|
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/legecy_translators/utils.py#L16-L21
|
def is_valid_lval(t):
"""Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal"""
if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:
return True
return False
|
[
"def",
"is_valid_lval",
"(",
"t",
")",
":",
"if",
"not",
"is_internal",
"(",
"t",
")",
"and",
"is_lval",
"(",
"t",
")",
"and",
"t",
"not",
"in",
"RESERVED_NAMES",
":",
"return",
"True",
"return",
"False"
] |
Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal
|
[
"Checks",
"whether",
"t",
"is",
"valid",
"JS",
"identifier",
"name",
"(",
"no",
"keyword",
"like",
"var",
"function",
"if",
"etc",
")",
"Also",
"returns",
"false",
"on",
"internal"
] |
python
|
valid
| 42.166667 |
cackharot/suds-py3
|
suds/properties.py
|
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/properties.py#L365-L384
|
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
|
[
"def",
"keys",
"(",
"self",
",",
"history",
"=",
"None",
")",
":",
"if",
"history",
"is",
"None",
":",
"history",
"=",
"[",
"]",
"history",
".",
"append",
"(",
"self",
")",
"keys",
"=",
"set",
"(",
")",
"keys",
".",
"update",
"(",
"self",
".",
"definitions",
".",
"keys",
"(",
")",
")",
"for",
"x",
"in",
"self",
".",
"links",
":",
"if",
"x",
"in",
"history",
":",
"continue",
"keys",
".",
"update",
"(",
"x",
".",
"keys",
"(",
"history",
")",
")",
"history",
".",
"remove",
"(",
"self",
")",
"return",
"keys"
] |
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
|
[
"Get",
"the",
"set",
"of",
"I",
"{",
"all",
"}",
"property",
"names",
"."
] |
python
|
train
| 29.9 |
clalancette/pycdlib
|
pycdlib/headervd.py
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/headervd.py#L613-L635
|
def add_to_ptr_size(self, ptr_size):
# type: (int) -> bool
'''
Add the space for a path table record to the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being added to this Volume Descriptor.
Returns:
True if extents need to be added to the Volume Descriptor, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
# First add to the path table size.
self.path_tbl_size += ptr_size
if (utils.ceiling_div(self.path_tbl_size, 4096) * 2) > self.path_table_num_extents:
# If we overflowed the path table size, then we need to update the
# space size. Since we always add two extents for the little and
# two for the big, add four total extents. The locations will be
# fixed up during reshuffle_extents.
self.path_table_num_extents += 2
return True
return False
|
[
"def",
"add_to_ptr_size",
"(",
"self",
",",
"ptr_size",
")",
":",
"# type: (int) -> bool",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'This Volume Descriptor is not yet initialized'",
")",
"# First add to the path table size.",
"self",
".",
"path_tbl_size",
"+=",
"ptr_size",
"if",
"(",
"utils",
".",
"ceiling_div",
"(",
"self",
".",
"path_tbl_size",
",",
"4096",
")",
"*",
"2",
")",
">",
"self",
".",
"path_table_num_extents",
":",
"# If we overflowed the path table size, then we need to update the",
"# space size. Since we always add two extents for the little and",
"# two for the big, add four total extents. The locations will be",
"# fixed up during reshuffle_extents.",
"self",
".",
"path_table_num_extents",
"+=",
"2",
"return",
"True",
"return",
"False"
] |
Add the space for a path table record to the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being added to this Volume Descriptor.
Returns:
True if extents need to be added to the Volume Descriptor, False otherwise.
|
[
"Add",
"the",
"space",
"for",
"a",
"path",
"table",
"record",
"to",
"the",
"volume",
"descriptor",
"."
] |
python
|
train
| 45.521739 |
edx/edx-drf-extensions
|
edx_rest_framework_extensions/paginators.py
|
https://github.com/edx/edx-drf-extensions/blob/2f4c1682b8471bf894ea566a43fd9f91ba219f83/edx_rest_framework_extensions/paginators.py#L79-L125
|
def paginate_search_results(object_class, search_results, page_size, page):
"""
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
"""
paginator = Paginator(search_results['results'], page_size)
# This code is taken from within the GenericAPIView#paginate_queryset method.
# It is common code, but
try:
page_number = paginator.validate_number(page)
except InvalidPage:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404("Page is not 'last', nor can it be converted to an int.")
try:
paged_results = paginator.page(page_number)
except InvalidPage as exception:
raise Http404(
"Invalid page {page_number}: {message}".format(
page_number=page_number,
message=str(exception)
)
)
search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list]
queryset = object_class.objects.filter(pk__in=search_queryset_pks)
def ordered_objects(primary_key):
""" Returns database object matching the search result object"""
for obj in queryset:
if obj.pk == primary_key:
return obj
# map over the search results and get a list of database objects in the same order
object_results = list(map(ordered_objects, search_queryset_pks))
paged_results.object_list = object_results
return paged_results
|
[
"def",
"paginate_search_results",
"(",
"object_class",
",",
"search_results",
",",
"page_size",
",",
"page",
")",
":",
"paginator",
"=",
"Paginator",
"(",
"search_results",
"[",
"'results'",
"]",
",",
"page_size",
")",
"# This code is taken from within the GenericAPIView#paginate_queryset method.",
"# It is common code, but",
"try",
":",
"page_number",
"=",
"paginator",
".",
"validate_number",
"(",
"page",
")",
"except",
"InvalidPage",
":",
"if",
"page",
"==",
"'last'",
":",
"page_number",
"=",
"paginator",
".",
"num_pages",
"else",
":",
"raise",
"Http404",
"(",
"\"Page is not 'last', nor can it be converted to an int.\"",
")",
"try",
":",
"paged_results",
"=",
"paginator",
".",
"page",
"(",
"page_number",
")",
"except",
"InvalidPage",
"as",
"exception",
":",
"raise",
"Http404",
"(",
"\"Invalid page {page_number}: {message}\"",
".",
"format",
"(",
"page_number",
"=",
"page_number",
",",
"message",
"=",
"str",
"(",
"exception",
")",
")",
")",
"search_queryset_pks",
"=",
"[",
"item",
"[",
"'data'",
"]",
"[",
"'pk'",
"]",
"for",
"item",
"in",
"paged_results",
".",
"object_list",
"]",
"queryset",
"=",
"object_class",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"search_queryset_pks",
")",
"def",
"ordered_objects",
"(",
"primary_key",
")",
":",
"\"\"\" Returns database object matching the search result object\"\"\"",
"for",
"obj",
"in",
"queryset",
":",
"if",
"obj",
".",
"pk",
"==",
"primary_key",
":",
"return",
"obj",
"# map over the search results and get a list of database objects in the same order",
"object_results",
"=",
"list",
"(",
"map",
"(",
"ordered_objects",
",",
"search_queryset_pks",
")",
")",
"paged_results",
".",
"object_list",
"=",
"object_results",
"return",
"paged_results"
] |
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
|
[
"Takes",
"edx",
"-",
"search",
"results",
"and",
"returns",
"a",
"Page",
"object",
"populated",
"with",
"db",
"objects",
"for",
"that",
"page",
"."
] |
python
|
train
| 36.446809 |
RudolfCardinal/pythonlib
|
cardinal_pythonlib/register_db_with_odbc.py
|
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/register_db_with_odbc.py#L122-L144
|
def register_access_db(fullfilename: str, dsn: str, description: str) -> bool:
"""
(Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
"""
directory = os.path.dirname(fullfilename)
return create_sys_dsn(
access_driver,
SERVER="",
DESCRIPTION=description,
DSN=dsn,
DBQ=fullfilename,
DefaultDir=directory
)
|
[
"def",
"register_access_db",
"(",
"fullfilename",
":",
"str",
",",
"dsn",
":",
"str",
",",
"description",
":",
"str",
")",
"->",
"bool",
":",
"directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fullfilename",
")",
"return",
"create_sys_dsn",
"(",
"access_driver",
",",
"SERVER",
"=",
"\"\"",
",",
"DESCRIPTION",
"=",
"description",
",",
"DSN",
"=",
"dsn",
",",
"DBQ",
"=",
"fullfilename",
",",
"DefaultDir",
"=",
"directory",
")"
] |
(Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
|
[
"(",
"Windows",
"only",
".",
")",
"Registers",
"a",
"Microsoft",
"Access",
"database",
"with",
"ODBC",
"."
] |
python
|
train
| 25.304348 |
gamechanger/schemer
|
schemer/validators.py
|
https://github.com/gamechanger/schemer/blob/1d1dd7da433d3b84ce5a80ded5a84ab4a65825ee/schemer/validators.py#L169-L178
|
def distinct():
"""
Validates that all items in the given field list value are distinct,
i.e. that the list contains no duplicates.
"""
def validate(value):
for i, item in enumerate(value):
if item in value[i+1:]:
return e("{} is not a distinct set of values", value)
return validate
|
[
"def",
"distinct",
"(",
")",
":",
"def",
"validate",
"(",
"value",
")",
":",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"value",
")",
":",
"if",
"item",
"in",
"value",
"[",
"i",
"+",
"1",
":",
"]",
":",
"return",
"e",
"(",
"\"{} is not a distinct set of values\"",
",",
"value",
")",
"return",
"validate"
] |
Validates that all items in the given field list value are distinct,
i.e. that the list contains no duplicates.
|
[
"Validates",
"that",
"all",
"items",
"in",
"the",
"given",
"field",
"list",
"value",
"are",
"distinct",
"i",
".",
"e",
".",
"that",
"the",
"list",
"contains",
"no",
"duplicates",
"."
] |
python
|
train
| 33.4 |
inveniosoftware/invenio-oauthclient
|
invenio_oauthclient/utils.py
|
https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/utils.py#L100-L114
|
def oauth_register(form):
"""Register user if possible.
:param form: A form instance.
:returns: A :class:`invenio_accounts.models.User` instance.
"""
if form.validate():
data = form.to_dict()
if not data.get('password'):
data['password'] = ''
user = register_user(**data)
if not data['password']:
user.password = None
_datastore.commit()
return user
|
[
"def",
"oauth_register",
"(",
"form",
")",
":",
"if",
"form",
".",
"validate",
"(",
")",
":",
"data",
"=",
"form",
".",
"to_dict",
"(",
")",
"if",
"not",
"data",
".",
"get",
"(",
"'password'",
")",
":",
"data",
"[",
"'password'",
"]",
"=",
"''",
"user",
"=",
"register_user",
"(",
"*",
"*",
"data",
")",
"if",
"not",
"data",
"[",
"'password'",
"]",
":",
"user",
".",
"password",
"=",
"None",
"_datastore",
".",
"commit",
"(",
")",
"return",
"user"
] |
Register user if possible.
:param form: A form instance.
:returns: A :class:`invenio_accounts.models.User` instance.
|
[
"Register",
"user",
"if",
"possible",
"."
] |
python
|
train
| 28.533333 |
andrewgross/pyrelic
|
pyrelic/base_client.py
|
https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/base_client.py#L83-L90
|
def _make_get_request(self, uri, parameters=None, timeout=None):
"""
Given a request add in the required parameters and return the parsed
XML object.
"""
if not timeout:
timeout = self.timeout
return self._make_request(requests.get, uri, params=parameters, timeout=timeout)
|
[
"def",
"_make_get_request",
"(",
"self",
",",
"uri",
",",
"parameters",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"timeout",
":",
"timeout",
"=",
"self",
".",
"timeout",
"return",
"self",
".",
"_make_request",
"(",
"requests",
".",
"get",
",",
"uri",
",",
"params",
"=",
"parameters",
",",
"timeout",
"=",
"timeout",
")"
] |
Given a request add in the required parameters and return the parsed
XML object.
|
[
"Given",
"a",
"request",
"add",
"in",
"the",
"required",
"parameters",
"and",
"return",
"the",
"parsed",
"XML",
"object",
"."
] |
python
|
train
| 40.75 |
saltstack/salt
|
salt/modules/snapper.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L424-L457
|
def delete_snapshot(snapshots_ids=None, config="root"):
'''
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
'''
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
|
[
"def",
"delete_snapshot",
"(",
"snapshots_ids",
"=",
"None",
",",
"config",
"=",
"\"root\"",
")",
":",
"if",
"not",
"snapshots_ids",
":",
"raise",
"CommandExecutionError",
"(",
"'Error: No snapshot ID has been provided'",
")",
"try",
":",
"current_snapshots_ids",
"=",
"[",
"x",
"[",
"'id'",
"]",
"for",
"x",
"in",
"list_snapshots",
"(",
"config",
")",
"]",
"if",
"not",
"isinstance",
"(",
"snapshots_ids",
",",
"list",
")",
":",
"snapshots_ids",
"=",
"[",
"snapshots_ids",
"]",
"if",
"not",
"set",
"(",
"snapshots_ids",
")",
".",
"issubset",
"(",
"set",
"(",
"current_snapshots_ids",
")",
")",
":",
"raise",
"CommandExecutionError",
"(",
"\"Error: Snapshots '{0}' not found\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"[",
"six",
".",
"text_type",
"(",
"x",
")",
"for",
"x",
"in",
"set",
"(",
"snapshots_ids",
")",
".",
"difference",
"(",
"set",
"(",
"current_snapshots_ids",
")",
")",
"]",
")",
")",
")",
"snapper",
".",
"DeleteSnapshots",
"(",
"config",
",",
"snapshots_ids",
")",
"return",
"{",
"config",
":",
"{",
"\"ids\"",
":",
"snapshots_ids",
",",
"\"status\"",
":",
"\"deleted\"",
"}",
"}",
"except",
"dbus",
".",
"DBusException",
"as",
"exc",
":",
"raise",
"CommandExecutionError",
"(",
"_dbus_exception_to_reason",
"(",
"exc",
",",
"locals",
"(",
")",
")",
")"
] |
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
|
[
"Deletes",
"an",
"snapshot"
] |
python
|
train
| 37 |
pytroll/satpy
|
satpy/readers/ahi_hsd.py
|
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/ahi_hsd.py#L363-L367
|
def _check_fpos(self, fp_, fpos, offset, block):
"""Check file position matches blocksize"""
if (fp_.tell() + offset != fpos):
warnings.warn("Actual "+block+" header size does not match expected")
return
|
[
"def",
"_check_fpos",
"(",
"self",
",",
"fp_",
",",
"fpos",
",",
"offset",
",",
"block",
")",
":",
"if",
"(",
"fp_",
".",
"tell",
"(",
")",
"+",
"offset",
"!=",
"fpos",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Actual \"",
"+",
"block",
"+",
"\" header size does not match expected\"",
")",
"return"
] |
Check file position matches blocksize
|
[
"Check",
"file",
"position",
"matches",
"blocksize"
] |
python
|
train
| 47 |
Zaeb0s/epoll-socket-server
|
esockets/socket_server.py
|
https://github.com/Zaeb0s/epoll-socket-server/blob/0b8c8b3c368f8948777579ea1e89e75ab5899372/esockets/socket_server.py#L117-L127
|
def _mainthread_poll_readable(self):
"""Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
"""
events = self._recv_selector.select(self.block_time)
for key, mask in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable,
args=(key.fileobj,))
|
[
"def",
"_mainthread_poll_readable",
"(",
"self",
")",
":",
"events",
"=",
"self",
".",
"_recv_selector",
".",
"select",
"(",
"self",
".",
"block_time",
")",
"for",
"key",
",",
"mask",
"in",
"events",
":",
"if",
"mask",
"==",
"selectors",
".",
"EVENT_READ",
":",
"self",
".",
"_recv_selector",
".",
"unregister",
"(",
"key",
".",
"fileobj",
")",
"self",
".",
"_threads_limiter",
".",
"start_thread",
"(",
"target",
"=",
"self",
".",
"_subthread_handle_readable",
",",
"args",
"=",
"(",
"key",
".",
"fileobj",
",",
")",
")"
] |
Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
|
[
"Searches",
"for",
"readable",
"client",
"sockets",
".",
"These",
"sockets",
"are",
"then",
"put",
"in",
"a",
"subthread",
"to",
"be",
"handled",
"by",
"_handle_readable"
] |
python
|
train
| 48.545455 |
trevisanj/a99
|
a99/gui/xmisc.py
|
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/gui/xmisc.py#L424-L430
|
def add_signal(self, signal):
"""Adds "input" signal to connected signals.
Internally connects the signal to a control slot."""
self.__signals.append(signal)
if self.__connected:
# Connects signal if the current state is "connected"
self.__connect_signal(signal)
|
[
"def",
"add_signal",
"(",
"self",
",",
"signal",
")",
":",
"self",
".",
"__signals",
".",
"append",
"(",
"signal",
")",
"if",
"self",
".",
"__connected",
":",
"# Connects signal if the current state is \"connected\"\r",
"self",
".",
"__connect_signal",
"(",
"signal",
")"
] |
Adds "input" signal to connected signals.
Internally connects the signal to a control slot.
|
[
"Adds",
"input",
"signal",
"to",
"connected",
"signals",
".",
"Internally",
"connects",
"the",
"signal",
"to",
"a",
"control",
"slot",
"."
] |
python
|
train
| 45.428571 |
bxlab/bx-python
|
lib/bx/align/maf.py
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/maf.py#L203-L210
|
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
|
[
"def",
"readline",
"(",
"file",
",",
"skip_blank",
"=",
"False",
")",
":",
"while",
"1",
":",
"line",
"=",
"file",
".",
"readline",
"(",
")",
"#print \"every line: %r\" % line",
"if",
"not",
"line",
":",
"return",
"None",
"if",
"line",
"[",
"0",
"]",
"!=",
"'#'",
"and",
"not",
"(",
"skip_blank",
"and",
"line",
".",
"isspace",
"(",
")",
")",
":",
"return",
"line"
] |
Read a line from provided file, skipping any blank or comment lines
|
[
"Read",
"a",
"line",
"from",
"provided",
"file",
"skipping",
"any",
"blank",
"or",
"comment",
"lines"
] |
python
|
train
| 40 |
econ-ark/HARK
|
HARK/cAndCwithStickyE/StickyEmodel.py
|
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/cAndCwithStickyE/StickyEmodel.py#L387-L411
|
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlTrue = 1.0 in the very first simulated period, as well as initializing the perception
of aggregate productivity for each Markov state. The representative agent begins with
the correct perception of the Markov state.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
if which_agents==np.array([True]):
RepAgentMarkovConsumerType.simBirth(self,which_agents)
if self.t_sim == 0: # Initialize perception distribution for Markov state
self.pLvlTrue = np.ones(self.AgentCount)
self.aLvlNow = self.aNrmNow*self.pLvlTrue
StateCount = self.MrkvArray.shape[0]
self.pLvlNow = np.ones(StateCount) # Perceived productivity level by Markov state
self.MrkvPcvd = np.zeros(StateCount) # Distribution of perceived Markov state
self.MrkvPcvd[self.MrkvNow[0]] = 1.0
|
[
"def",
"simBirth",
"(",
"self",
",",
"which_agents",
")",
":",
"if",
"which_agents",
"==",
"np",
".",
"array",
"(",
"[",
"True",
"]",
")",
":",
"RepAgentMarkovConsumerType",
".",
"simBirth",
"(",
"self",
",",
"which_agents",
")",
"if",
"self",
".",
"t_sim",
"==",
"0",
":",
"# Initialize perception distribution for Markov state",
"self",
".",
"pLvlTrue",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"AgentCount",
")",
"self",
".",
"aLvlNow",
"=",
"self",
".",
"aNrmNow",
"*",
"self",
".",
"pLvlTrue",
"StateCount",
"=",
"self",
".",
"MrkvArray",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"pLvlNow",
"=",
"np",
".",
"ones",
"(",
"StateCount",
")",
"# Perceived productivity level by Markov state",
"self",
".",
"MrkvPcvd",
"=",
"np",
".",
"zeros",
"(",
"StateCount",
")",
"# Distribution of perceived Markov state",
"self",
".",
"MrkvPcvd",
"[",
"self",
".",
"MrkvNow",
"[",
"0",
"]",
"]",
"=",
"1.0"
] |
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlTrue = 1.0 in the very first simulated period, as well as initializing the perception
of aggregate productivity for each Markov state. The representative agent begins with
the correct perception of the Markov state.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
|
[
"Makes",
"new",
"consumers",
"for",
"the",
"given",
"indices",
".",
"Slightly",
"extends",
"base",
"method",
"by",
"also",
"setting",
"pLvlTrue",
"=",
"1",
".",
"0",
"in",
"the",
"very",
"first",
"simulated",
"period",
"as",
"well",
"as",
"initializing",
"the",
"perception",
"of",
"aggregate",
"productivity",
"for",
"each",
"Markov",
"state",
".",
"The",
"representative",
"agent",
"begins",
"with",
"the",
"correct",
"perception",
"of",
"the",
"Markov",
"state",
"."
] |
python
|
train
| 47.92 |
saltstack/salt
|
salt/modules/ldapmod.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ldapmod.py#L78-L89
|
def _config(name, key=None, **kwargs):
'''
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
'''
if key is None:
key = name
if name in kwargs:
value = kwargs[name]
else:
value = __salt__['config.option']('ldap.{0}'.format(key))
return salt.utils.data.decode(value, to_str=True)
|
[
"def",
"_config",
"(",
"name",
",",
"key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"key",
"is",
"None",
":",
"key",
"=",
"name",
"if",
"name",
"in",
"kwargs",
":",
"value",
"=",
"kwargs",
"[",
"name",
"]",
"else",
":",
"value",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"'ldap.{0}'",
".",
"format",
"(",
"key",
")",
")",
"return",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"value",
",",
"to_str",
"=",
"True",
")"
] |
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
|
[
"Return",
"a",
"value",
"for",
"name",
"from",
"command",
"line",
"args",
"then",
"config",
"file",
"options",
".",
"Specify",
"key",
"if",
"the",
"config",
"file",
"option",
"is",
"not",
"the",
"same",
"as",
"name",
"."
] |
python
|
train
| 34.5 |
eventable/vobject
|
docs/build/lib/vobject/icalendar.py
|
https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L152-L318
|
def settzinfo(self, tzinfo, start=2000, end=2030):
"""
Create appropriate objects in self to represent tzinfo.
Collapse DST transitions to rrules as much as possible.
Assumptions:
- DST <-> Standard transitions occur on the hour
- never within a month of one another
- twice or fewer times a year
- never in the month of December
- DST always moves offset exactly one hour later
- tzinfo classes dst method always treats times that could be in either
offset as being in the later regime
"""
def fromLastWeek(dt):
"""
How many weeks from the end of the month dt is, starting from 1.
"""
weekDelta = datetime.timedelta(weeks=1)
n = 1
current = dt + weekDelta
while current.month == dt.month:
n += 1
current += weekDelta
return n
# lists of dictionaries defining rules which are no longer in effect
completed = {'daylight' : [], 'standard' : []}
# dictionary defining rules which are currently in effect
working = {'daylight' : None, 'standard' : None}
# rule may be based on nth week of the month or the nth from the last
for year in range(start, end + 1):
newyear = datetime.datetime(year, 1, 1)
for transitionTo in 'daylight', 'standard':
transition = getTransition(transitionTo, year, tzinfo)
oldrule = working[transitionTo]
if transition == newyear:
# transitionTo is in effect for the whole year
rule = {'end' : None,
'start' : newyear,
'month' : 1,
'weekday' : None,
'hour' : None,
'plus' : None,
'minus' : None,
'name' : tzinfo.tzname(newyear),
'offset' : tzinfo.utcoffset(newyear),
'offsetfrom' : tzinfo.utcoffset(newyear)}
if oldrule is None:
# transitionTo was not yet in effect
working[transitionTo] = rule
else:
# transitionTo was already in effect
if (oldrule['offset'] !=
tzinfo.utcoffset(newyear)):
# old rule was different, it shouldn't continue
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
elif transition is None:
# transitionTo is not in effect
if oldrule is not None:
# transitionTo used to be in effect
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = None
else:
# an offset transition was found
try:
old_offset = tzinfo.utcoffset(transition - twoHours)
name = tzinfo.tzname(transition)
offset = tzinfo.utcoffset(transition)
except (pytz.AmbiguousTimeError, pytz.NonExistentTimeError):
# guaranteed that tzinfo is a pytz timezone
is_dst = (transitionTo == "daylight")
old_offset = tzinfo.utcoffset(transition - twoHours, is_dst=is_dst)
name = tzinfo.tzname(transition, is_dst=is_dst)
offset = tzinfo.utcoffset(transition, is_dst=is_dst)
rule = {'end' : None, # None, or an integer year
'start' : transition, # the datetime of transition
'month' : transition.month,
'weekday' : transition.weekday(),
'hour' : transition.hour,
'name' : name,
'plus' : int(
(transition.day - 1)/ 7 + 1), # nth week of the month
'minus' : fromLastWeek(transition), # nth from last week
'offset' : offset,
'offsetfrom' : old_offset}
if oldrule is None:
working[transitionTo] = rule
else:
plusMatch = rule['plus'] == oldrule['plus']
minusMatch = rule['minus'] == oldrule['minus']
truth = plusMatch or minusMatch
for key in 'month', 'weekday', 'hour', 'offset':
truth = truth and rule[key] == oldrule[key]
if truth:
# the old rule is still true, limit to plus or minus
if not plusMatch:
oldrule['plus'] = None
if not minusMatch:
oldrule['minus'] = None
else:
# the new rule did not match the old
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
for transitionTo in 'daylight', 'standard':
if working[transitionTo] is not None:
completed[transitionTo].append(working[transitionTo])
self.tzid = []
self.daylight = []
self.standard = []
self.add('tzid').value = self.pickTzid(tzinfo, True)
# old = None # unused?
for transitionTo in 'daylight', 'standard':
for rule in completed[transitionTo]:
comp = self.add(transitionTo)
dtstart = comp.add('dtstart')
dtstart.value = rule['start']
if rule['name'] is not None:
comp.add('tzname').value = rule['name']
line = comp.add('tzoffsetto')
line.value = deltaToOffset(rule['offset'])
line = comp.add('tzoffsetfrom')
line.value = deltaToOffset(rule['offsetfrom'])
if rule['plus'] is not None:
num = rule['plus']
elif rule['minus'] is not None:
num = -1 * rule['minus']
else:
num = None
if num is not None:
dayString = ";BYDAY=" + str(num) + WEEKDAYS[rule['weekday']]
else:
dayString = ""
if rule['end'] is not None:
if rule['hour'] is None:
# all year offset, with no rule
endDate = datetime.datetime(rule['end'], 1, 1)
else:
weekday = rrule.weekday(rule['weekday'], num)
du_rule = rrule.rrule(rrule.YEARLY,
bymonth = rule['month'],byweekday = weekday,
dtstart = datetime.datetime(
rule['end'], 1, 1, rule['hour'])
)
endDate = du_rule[0]
endDate = endDate.replace(tzinfo = utc) - rule['offsetfrom']
endString = ";UNTIL="+ dateTimeToString(endDate)
else:
endString = ''
new_rule = "FREQ=YEARLY{0!s};BYMONTH={1!s}{2!s}"\
.format(dayString, rule['month'], endString)
comp.add('rrule').value = new_rule
|
[
"def",
"settzinfo",
"(",
"self",
",",
"tzinfo",
",",
"start",
"=",
"2000",
",",
"end",
"=",
"2030",
")",
":",
"def",
"fromLastWeek",
"(",
"dt",
")",
":",
"\"\"\"\n How many weeks from the end of the month dt is, starting from 1.\n \"\"\"",
"weekDelta",
"=",
"datetime",
".",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
"n",
"=",
"1",
"current",
"=",
"dt",
"+",
"weekDelta",
"while",
"current",
".",
"month",
"==",
"dt",
".",
"month",
":",
"n",
"+=",
"1",
"current",
"+=",
"weekDelta",
"return",
"n",
"# lists of dictionaries defining rules which are no longer in effect",
"completed",
"=",
"{",
"'daylight'",
":",
"[",
"]",
",",
"'standard'",
":",
"[",
"]",
"}",
"# dictionary defining rules which are currently in effect",
"working",
"=",
"{",
"'daylight'",
":",
"None",
",",
"'standard'",
":",
"None",
"}",
"# rule may be based on nth week of the month or the nth from the last",
"for",
"year",
"in",
"range",
"(",
"start",
",",
"end",
"+",
"1",
")",
":",
"newyear",
"=",
"datetime",
".",
"datetime",
"(",
"year",
",",
"1",
",",
"1",
")",
"for",
"transitionTo",
"in",
"'daylight'",
",",
"'standard'",
":",
"transition",
"=",
"getTransition",
"(",
"transitionTo",
",",
"year",
",",
"tzinfo",
")",
"oldrule",
"=",
"working",
"[",
"transitionTo",
"]",
"if",
"transition",
"==",
"newyear",
":",
"# transitionTo is in effect for the whole year",
"rule",
"=",
"{",
"'end'",
":",
"None",
",",
"'start'",
":",
"newyear",
",",
"'month'",
":",
"1",
",",
"'weekday'",
":",
"None",
",",
"'hour'",
":",
"None",
",",
"'plus'",
":",
"None",
",",
"'minus'",
":",
"None",
",",
"'name'",
":",
"tzinfo",
".",
"tzname",
"(",
"newyear",
")",
",",
"'offset'",
":",
"tzinfo",
".",
"utcoffset",
"(",
"newyear",
")",
",",
"'offsetfrom'",
":",
"tzinfo",
".",
"utcoffset",
"(",
"newyear",
")",
"}",
"if",
"oldrule",
"is",
"None",
":",
"# transitionTo was not yet in effect",
"working",
"[",
"transitionTo",
"]",
"=",
"rule",
"else",
":",
"# transitionTo was already in effect",
"if",
"(",
"oldrule",
"[",
"'offset'",
"]",
"!=",
"tzinfo",
".",
"utcoffset",
"(",
"newyear",
")",
")",
":",
"# old rule was different, it shouldn't continue",
"oldrule",
"[",
"'end'",
"]",
"=",
"year",
"-",
"1",
"completed",
"[",
"transitionTo",
"]",
".",
"append",
"(",
"oldrule",
")",
"working",
"[",
"transitionTo",
"]",
"=",
"rule",
"elif",
"transition",
"is",
"None",
":",
"# transitionTo is not in effect",
"if",
"oldrule",
"is",
"not",
"None",
":",
"# transitionTo used to be in effect",
"oldrule",
"[",
"'end'",
"]",
"=",
"year",
"-",
"1",
"completed",
"[",
"transitionTo",
"]",
".",
"append",
"(",
"oldrule",
")",
"working",
"[",
"transitionTo",
"]",
"=",
"None",
"else",
":",
"# an offset transition was found",
"try",
":",
"old_offset",
"=",
"tzinfo",
".",
"utcoffset",
"(",
"transition",
"-",
"twoHours",
")",
"name",
"=",
"tzinfo",
".",
"tzname",
"(",
"transition",
")",
"offset",
"=",
"tzinfo",
".",
"utcoffset",
"(",
"transition",
")",
"except",
"(",
"pytz",
".",
"AmbiguousTimeError",
",",
"pytz",
".",
"NonExistentTimeError",
")",
":",
"# guaranteed that tzinfo is a pytz timezone",
"is_dst",
"=",
"(",
"transitionTo",
"==",
"\"daylight\"",
")",
"old_offset",
"=",
"tzinfo",
".",
"utcoffset",
"(",
"transition",
"-",
"twoHours",
",",
"is_dst",
"=",
"is_dst",
")",
"name",
"=",
"tzinfo",
".",
"tzname",
"(",
"transition",
",",
"is_dst",
"=",
"is_dst",
")",
"offset",
"=",
"tzinfo",
".",
"utcoffset",
"(",
"transition",
",",
"is_dst",
"=",
"is_dst",
")",
"rule",
"=",
"{",
"'end'",
":",
"None",
",",
"# None, or an integer year",
"'start'",
":",
"transition",
",",
"# the datetime of transition",
"'month'",
":",
"transition",
".",
"month",
",",
"'weekday'",
":",
"transition",
".",
"weekday",
"(",
")",
",",
"'hour'",
":",
"transition",
".",
"hour",
",",
"'name'",
":",
"name",
",",
"'plus'",
":",
"int",
"(",
"(",
"transition",
".",
"day",
"-",
"1",
")",
"/",
"7",
"+",
"1",
")",
",",
"# nth week of the month",
"'minus'",
":",
"fromLastWeek",
"(",
"transition",
")",
",",
"# nth from last week",
"'offset'",
":",
"offset",
",",
"'offsetfrom'",
":",
"old_offset",
"}",
"if",
"oldrule",
"is",
"None",
":",
"working",
"[",
"transitionTo",
"]",
"=",
"rule",
"else",
":",
"plusMatch",
"=",
"rule",
"[",
"'plus'",
"]",
"==",
"oldrule",
"[",
"'plus'",
"]",
"minusMatch",
"=",
"rule",
"[",
"'minus'",
"]",
"==",
"oldrule",
"[",
"'minus'",
"]",
"truth",
"=",
"plusMatch",
"or",
"minusMatch",
"for",
"key",
"in",
"'month'",
",",
"'weekday'",
",",
"'hour'",
",",
"'offset'",
":",
"truth",
"=",
"truth",
"and",
"rule",
"[",
"key",
"]",
"==",
"oldrule",
"[",
"key",
"]",
"if",
"truth",
":",
"# the old rule is still true, limit to plus or minus",
"if",
"not",
"plusMatch",
":",
"oldrule",
"[",
"'plus'",
"]",
"=",
"None",
"if",
"not",
"minusMatch",
":",
"oldrule",
"[",
"'minus'",
"]",
"=",
"None",
"else",
":",
"# the new rule did not match the old",
"oldrule",
"[",
"'end'",
"]",
"=",
"year",
"-",
"1",
"completed",
"[",
"transitionTo",
"]",
".",
"append",
"(",
"oldrule",
")",
"working",
"[",
"transitionTo",
"]",
"=",
"rule",
"for",
"transitionTo",
"in",
"'daylight'",
",",
"'standard'",
":",
"if",
"working",
"[",
"transitionTo",
"]",
"is",
"not",
"None",
":",
"completed",
"[",
"transitionTo",
"]",
".",
"append",
"(",
"working",
"[",
"transitionTo",
"]",
")",
"self",
".",
"tzid",
"=",
"[",
"]",
"self",
".",
"daylight",
"=",
"[",
"]",
"self",
".",
"standard",
"=",
"[",
"]",
"self",
".",
"add",
"(",
"'tzid'",
")",
".",
"value",
"=",
"self",
".",
"pickTzid",
"(",
"tzinfo",
",",
"True",
")",
"# old = None # unused?",
"for",
"transitionTo",
"in",
"'daylight'",
",",
"'standard'",
":",
"for",
"rule",
"in",
"completed",
"[",
"transitionTo",
"]",
":",
"comp",
"=",
"self",
".",
"add",
"(",
"transitionTo",
")",
"dtstart",
"=",
"comp",
".",
"add",
"(",
"'dtstart'",
")",
"dtstart",
".",
"value",
"=",
"rule",
"[",
"'start'",
"]",
"if",
"rule",
"[",
"'name'",
"]",
"is",
"not",
"None",
":",
"comp",
".",
"add",
"(",
"'tzname'",
")",
".",
"value",
"=",
"rule",
"[",
"'name'",
"]",
"line",
"=",
"comp",
".",
"add",
"(",
"'tzoffsetto'",
")",
"line",
".",
"value",
"=",
"deltaToOffset",
"(",
"rule",
"[",
"'offset'",
"]",
")",
"line",
"=",
"comp",
".",
"add",
"(",
"'tzoffsetfrom'",
")",
"line",
".",
"value",
"=",
"deltaToOffset",
"(",
"rule",
"[",
"'offsetfrom'",
"]",
")",
"if",
"rule",
"[",
"'plus'",
"]",
"is",
"not",
"None",
":",
"num",
"=",
"rule",
"[",
"'plus'",
"]",
"elif",
"rule",
"[",
"'minus'",
"]",
"is",
"not",
"None",
":",
"num",
"=",
"-",
"1",
"*",
"rule",
"[",
"'minus'",
"]",
"else",
":",
"num",
"=",
"None",
"if",
"num",
"is",
"not",
"None",
":",
"dayString",
"=",
"\";BYDAY=\"",
"+",
"str",
"(",
"num",
")",
"+",
"WEEKDAYS",
"[",
"rule",
"[",
"'weekday'",
"]",
"]",
"else",
":",
"dayString",
"=",
"\"\"",
"if",
"rule",
"[",
"'end'",
"]",
"is",
"not",
"None",
":",
"if",
"rule",
"[",
"'hour'",
"]",
"is",
"None",
":",
"# all year offset, with no rule",
"endDate",
"=",
"datetime",
".",
"datetime",
"(",
"rule",
"[",
"'end'",
"]",
",",
"1",
",",
"1",
")",
"else",
":",
"weekday",
"=",
"rrule",
".",
"weekday",
"(",
"rule",
"[",
"'weekday'",
"]",
",",
"num",
")",
"du_rule",
"=",
"rrule",
".",
"rrule",
"(",
"rrule",
".",
"YEARLY",
",",
"bymonth",
"=",
"rule",
"[",
"'month'",
"]",
",",
"byweekday",
"=",
"weekday",
",",
"dtstart",
"=",
"datetime",
".",
"datetime",
"(",
"rule",
"[",
"'end'",
"]",
",",
"1",
",",
"1",
",",
"rule",
"[",
"'hour'",
"]",
")",
")",
"endDate",
"=",
"du_rule",
"[",
"0",
"]",
"endDate",
"=",
"endDate",
".",
"replace",
"(",
"tzinfo",
"=",
"utc",
")",
"-",
"rule",
"[",
"'offsetfrom'",
"]",
"endString",
"=",
"\";UNTIL=\"",
"+",
"dateTimeToString",
"(",
"endDate",
")",
"else",
":",
"endString",
"=",
"''",
"new_rule",
"=",
"\"FREQ=YEARLY{0!s};BYMONTH={1!s}{2!s}\"",
".",
"format",
"(",
"dayString",
",",
"rule",
"[",
"'month'",
"]",
",",
"endString",
")",
"comp",
".",
"add",
"(",
"'rrule'",
")",
".",
"value",
"=",
"new_rule"
] |
Create appropriate objects in self to represent tzinfo.
Collapse DST transitions to rrules as much as possible.
Assumptions:
- DST <-> Standard transitions occur on the hour
- never within a month of one another
- twice or fewer times a year
- never in the month of December
- DST always moves offset exactly one hour later
- tzinfo classes dst method always treats times that could be in either
offset as being in the later regime
|
[
"Create",
"appropriate",
"objects",
"in",
"self",
"to",
"represent",
"tzinfo",
"."
] |
python
|
train
| 47.329341 |
spacetelescope/drizzlepac
|
drizzlepac/staticMask.py
|
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/staticMask.py#L145-L205
|
def addMember(self, imagePtr=None):
"""
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
"""
numchips=imagePtr._numchips
log.info("Computing static mask:\n")
chips = imagePtr.group
if chips is None:
chips = imagePtr.getExtensions()
#for chip in range(1,numchips+1,1):
for chip in chips:
chipid=imagePtr.scienceExt + ','+ str(chip)
chipimage=imagePtr.getData(chipid)
signature=imagePtr[chipid].signature
# If this is a new signature, create a new Static Mask file which is empty
# only create a new mask if one doesn't already exist
if ((signature not in self.masklist) or (len(self.masklist) == 0)):
self.masklist[signature] = self._buildMaskArray(signature)
maskname = constructFilename(signature)
self.masknames[signature] = maskname
else:
chip_sig = buildSignatureKey(signature)
for s in self.masknames:
if chip_sig in self.masknames[s]:
maskname = self.masknames[s]
break
imagePtr[chipid].outputNames['staticMask'] = maskname
stats = ImageStats(chipimage,nclip=3,fields='mode')
mode = stats.mode
rms = stats.stddev
nbins = len(stats.histogram)
del stats
log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' %
(mode, rms, self.static_sig))
if nbins >= 2: # only combine data from new image if enough data to mask
sky_rms_diff = mode - (self.static_sig*rms)
np.bitwise_and(self.masklist[signature],
np.logical_not(np.less(chipimage, sky_rms_diff)),
self.masklist[signature])
del chipimage
|
[
"def",
"addMember",
"(",
"self",
",",
"imagePtr",
"=",
"None",
")",
":",
"numchips",
"=",
"imagePtr",
".",
"_numchips",
"log",
".",
"info",
"(",
"\"Computing static mask:\\n\"",
")",
"chips",
"=",
"imagePtr",
".",
"group",
"if",
"chips",
"is",
"None",
":",
"chips",
"=",
"imagePtr",
".",
"getExtensions",
"(",
")",
"#for chip in range(1,numchips+1,1):",
"for",
"chip",
"in",
"chips",
":",
"chipid",
"=",
"imagePtr",
".",
"scienceExt",
"+",
"','",
"+",
"str",
"(",
"chip",
")",
"chipimage",
"=",
"imagePtr",
".",
"getData",
"(",
"chipid",
")",
"signature",
"=",
"imagePtr",
"[",
"chipid",
"]",
".",
"signature",
"# If this is a new signature, create a new Static Mask file which is empty",
"# only create a new mask if one doesn't already exist",
"if",
"(",
"(",
"signature",
"not",
"in",
"self",
".",
"masklist",
")",
"or",
"(",
"len",
"(",
"self",
".",
"masklist",
")",
"==",
"0",
")",
")",
":",
"self",
".",
"masklist",
"[",
"signature",
"]",
"=",
"self",
".",
"_buildMaskArray",
"(",
"signature",
")",
"maskname",
"=",
"constructFilename",
"(",
"signature",
")",
"self",
".",
"masknames",
"[",
"signature",
"]",
"=",
"maskname",
"else",
":",
"chip_sig",
"=",
"buildSignatureKey",
"(",
"signature",
")",
"for",
"s",
"in",
"self",
".",
"masknames",
":",
"if",
"chip_sig",
"in",
"self",
".",
"masknames",
"[",
"s",
"]",
":",
"maskname",
"=",
"self",
".",
"masknames",
"[",
"s",
"]",
"break",
"imagePtr",
"[",
"chipid",
"]",
".",
"outputNames",
"[",
"'staticMask'",
"]",
"=",
"maskname",
"stats",
"=",
"ImageStats",
"(",
"chipimage",
",",
"nclip",
"=",
"3",
",",
"fields",
"=",
"'mode'",
")",
"mode",
"=",
"stats",
".",
"mode",
"rms",
"=",
"stats",
".",
"stddev",
"nbins",
"=",
"len",
"(",
"stats",
".",
"histogram",
")",
"del",
"stats",
"log",
".",
"info",
"(",
"' mode = %9f; rms = %7f; static_sig = %0.2f'",
"%",
"(",
"mode",
",",
"rms",
",",
"self",
".",
"static_sig",
")",
")",
"if",
"nbins",
">=",
"2",
":",
"# only combine data from new image if enough data to mask",
"sky_rms_diff",
"=",
"mode",
"-",
"(",
"self",
".",
"static_sig",
"*",
"rms",
")",
"np",
".",
"bitwise_and",
"(",
"self",
".",
"masklist",
"[",
"signature",
"]",
",",
"np",
".",
"logical_not",
"(",
"np",
".",
"less",
"(",
"chipimage",
",",
"sky_rms_diff",
")",
")",
",",
"self",
".",
"masklist",
"[",
"signature",
"]",
")",
"del",
"chipimage"
] |
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
|
[
"Combines",
"the",
"input",
"image",
"with",
"the",
"static",
"mask",
"that",
"has",
"the",
"same",
"signature",
"."
] |
python
|
train
| 36.508197 |
UCL-INGI/INGInious
|
inginious/frontend/tasks.py
|
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/tasks.py#L106-L108
|
def get_authors(self, language):
""" Return the list of this task's authors """
return self.gettext(language, self._author) if self._author else ""
|
[
"def",
"get_authors",
"(",
"self",
",",
"language",
")",
":",
"return",
"self",
".",
"gettext",
"(",
"language",
",",
"self",
".",
"_author",
")",
"if",
"self",
".",
"_author",
"else",
"\"\""
] |
Return the list of this task's authors
|
[
"Return",
"the",
"list",
"of",
"this",
"task",
"s",
"authors"
] |
python
|
train
| 53.666667 |
zhmcclient/python-zhmcclient
|
zhmcclient_mock/_urihandler.py
|
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L1110-L1121
|
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Stop CPC (requires DPM mode)."""
assert wait_for_completion is True # async not supported yet
cpc_oid = uri_parms[0]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
if not cpc.dpm_enabled:
raise CpcNotInDpmError(method, uri, cpc)
cpc.properties['status'] = 'not-operating'
|
[
"def",
"post",
"(",
"method",
",",
"hmc",
",",
"uri",
",",
"uri_parms",
",",
"body",
",",
"logon_required",
",",
"wait_for_completion",
")",
":",
"assert",
"wait_for_completion",
"is",
"True",
"# async not supported yet",
"cpc_oid",
"=",
"uri_parms",
"[",
"0",
"]",
"try",
":",
"cpc",
"=",
"hmc",
".",
"cpcs",
".",
"lookup_by_oid",
"(",
"cpc_oid",
")",
"except",
"KeyError",
":",
"raise",
"InvalidResourceError",
"(",
"method",
",",
"uri",
")",
"if",
"not",
"cpc",
".",
"dpm_enabled",
":",
"raise",
"CpcNotInDpmError",
"(",
"method",
",",
"uri",
",",
"cpc",
")",
"cpc",
".",
"properties",
"[",
"'status'",
"]",
"=",
"'not-operating'"
] |
Operation: Stop CPC (requires DPM mode).
|
[
"Operation",
":",
"Stop",
"CPC",
"(",
"requires",
"DPM",
"mode",
")",
"."
] |
python
|
train
| 42.916667 |
euske/pdfminer
|
pdfminer/utils.py
|
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L223-L228
|
def decode_text(s):
"""Decodes a PDFDocEncoding string to Unicode."""
if s.startswith(b'\xfe\xff'):
return unicode(s[2:], 'utf-16be', 'ignore')
else:
return ''.join(PDFDocEncoding[ord(c)] for c in s)
|
[
"def",
"decode_text",
"(",
"s",
")",
":",
"if",
"s",
".",
"startswith",
"(",
"b'\\xfe\\xff'",
")",
":",
"return",
"unicode",
"(",
"s",
"[",
"2",
":",
"]",
",",
"'utf-16be'",
",",
"'ignore'",
")",
"else",
":",
"return",
"''",
".",
"join",
"(",
"PDFDocEncoding",
"[",
"ord",
"(",
"c",
")",
"]",
"for",
"c",
"in",
"s",
")"
] |
Decodes a PDFDocEncoding string to Unicode.
|
[
"Decodes",
"a",
"PDFDocEncoding",
"string",
"to",
"Unicode",
"."
] |
python
|
train
| 37 |
hyperledger/indy-plenum
|
plenum/server/replica.py
|
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1428-L1433
|
def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
return {key for key in reqKeys if not self.requests.is_finalised(key)}
|
[
"def",
"nonFinalisedReqs",
"(",
"self",
",",
"reqKeys",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"int",
"]",
"]",
")",
":",
"return",
"{",
"key",
"for",
"key",
"in",
"reqKeys",
"if",
"not",
"self",
".",
"requests",
".",
"is_finalised",
"(",
"key",
")",
"}"
] |
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
|
[
"Check",
"if",
"there",
"are",
"any",
"requests",
"which",
"are",
"not",
"finalised",
"i",
".",
"e",
"for",
"which",
"there",
"are",
"not",
"enough",
"PROPAGATEs"
] |
python
|
train
| 46 |
glomex/gcdt
|
gcdt/tenkai_core.py
|
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/tenkai_core.py#L144-L159
|
def _get_deployment_instance_summary(awsclient, deployment_id, instance_id):
"""instance summary.
:param awsclient:
:param deployment_id:
:param instance_id:
return: status, last_event
"""
client_codedeploy = awsclient.get_client('codedeploy')
request = {
'deploymentId': deployment_id,
'instanceId': instance_id
}
response = client_codedeploy.get_deployment_instance(**request)
return response['instanceSummary']['status'], \
response['instanceSummary']['lifecycleEvents'][-1]['lifecycleEventName']
|
[
"def",
"_get_deployment_instance_summary",
"(",
"awsclient",
",",
"deployment_id",
",",
"instance_id",
")",
":",
"client_codedeploy",
"=",
"awsclient",
".",
"get_client",
"(",
"'codedeploy'",
")",
"request",
"=",
"{",
"'deploymentId'",
":",
"deployment_id",
",",
"'instanceId'",
":",
"instance_id",
"}",
"response",
"=",
"client_codedeploy",
".",
"get_deployment_instance",
"(",
"*",
"*",
"request",
")",
"return",
"response",
"[",
"'instanceSummary'",
"]",
"[",
"'status'",
"]",
",",
"response",
"[",
"'instanceSummary'",
"]",
"[",
"'lifecycleEvents'",
"]",
"[",
"-",
"1",
"]",
"[",
"'lifecycleEventName'",
"]"
] |
instance summary.
:param awsclient:
:param deployment_id:
:param instance_id:
return: status, last_event
|
[
"instance",
"summary",
"."
] |
python
|
train
| 34.75 |
deepmind/sonnet
|
sonnet/python/modules/gated_rnn.py
|
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/gated_rnn.py#L1661-L1686
|
def get_possible_initializer_keys(cls, num_layers):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wt: weight for input -> T gate
wh: weight for input -> H gate
wtL: weight for prev state -> T gate for layer L (indexed from 0)
whL: weight for prev state -> H gate for layer L (indexed from 0)
btL: bias for prev state -> T gate for layer L (indexed from 0)
bhL: bias for prev state -> H gate for layer L (indexed from 0)
Args:
num_layers: (int) Number of highway layers.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
keys = [cls.WT, cls.WH]
for layer_index in xrange(num_layers):
layer_str = str(layer_index)
keys += [
cls.WT + layer_str,
cls.BT + layer_str,
cls.WH + layer_str,
cls.BH + layer_str]
return set(keys)
|
[
"def",
"get_possible_initializer_keys",
"(",
"cls",
",",
"num_layers",
")",
":",
"keys",
"=",
"[",
"cls",
".",
"WT",
",",
"cls",
".",
"WH",
"]",
"for",
"layer_index",
"in",
"xrange",
"(",
"num_layers",
")",
":",
"layer_str",
"=",
"str",
"(",
"layer_index",
")",
"keys",
"+=",
"[",
"cls",
".",
"WT",
"+",
"layer_str",
",",
"cls",
".",
"BT",
"+",
"layer_str",
",",
"cls",
".",
"WH",
"+",
"layer_str",
",",
"cls",
".",
"BH",
"+",
"layer_str",
"]",
"return",
"set",
"(",
"keys",
")"
] |
Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wt: weight for input -> T gate
wh: weight for input -> H gate
wtL: weight for prev state -> T gate for layer L (indexed from 0)
whL: weight for prev state -> H gate for layer L (indexed from 0)
btL: bias for prev state -> T gate for layer L (indexed from 0)
bhL: bias for prev state -> H gate for layer L (indexed from 0)
Args:
num_layers: (int) Number of highway layers.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
|
[
"Returns",
"the",
"keys",
"the",
"dictionary",
"of",
"variable",
"initializers",
"may",
"contain",
"."
] |
python
|
train
| 36.769231 |
misli/django-cms-articles
|
cms_articles/search_indexes.py
|
https://github.com/misli/django-cms-articles/blob/d96ac77e049022deb4c70d268e4eab74d175145c/cms_articles/search_indexes.py#L46-L69
|
def get_article_placeholders(self, article):
"""
In the project settings set up the variable
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {
'include': [ 'slot1', 'slot2', etc. ],
'exclude': [ 'slot3', 'slot4', etc. ],
}
or leave it empty
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {}
"""
placeholders_search_list = getattr(settings, 'CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST', {})
included = placeholders_search_list.get('include', [])
excluded = placeholders_search_list.get('exclude', [])
diff = set(included) - set(excluded)
if diff:
return article.placeholders.filter(slot__in=diff)
elif excluded:
return article.placeholders.exclude(slot__in=excluded)
else:
return article.placeholders.all()
|
[
"def",
"get_article_placeholders",
"(",
"self",
",",
"article",
")",
":",
"placeholders_search_list",
"=",
"getattr",
"(",
"settings",
",",
"'CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST'",
",",
"{",
"}",
")",
"included",
"=",
"placeholders_search_list",
".",
"get",
"(",
"'include'",
",",
"[",
"]",
")",
"excluded",
"=",
"placeholders_search_list",
".",
"get",
"(",
"'exclude'",
",",
"[",
"]",
")",
"diff",
"=",
"set",
"(",
"included",
")",
"-",
"set",
"(",
"excluded",
")",
"if",
"diff",
":",
"return",
"article",
".",
"placeholders",
".",
"filter",
"(",
"slot__in",
"=",
"diff",
")",
"elif",
"excluded",
":",
"return",
"article",
".",
"placeholders",
".",
"exclude",
"(",
"slot__in",
"=",
"excluded",
")",
"else",
":",
"return",
"article",
".",
"placeholders",
".",
"all",
"(",
")"
] |
In the project settings set up the variable
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {
'include': [ 'slot1', 'slot2', etc. ],
'exclude': [ 'slot3', 'slot4', etc. ],
}
or leave it empty
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {}
|
[
"In",
"the",
"project",
"settings",
"set",
"up",
"the",
"variable"
] |
python
|
train
| 34.916667 |
kwikteam/phy
|
phy/traces/filter.py
|
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/traces/filter.py#L19-L25
|
def bandpass_filter(rate=None, low=None, high=None, order=None):
"""Butterworth bandpass filter."""
assert low < high
assert order >= 1
return signal.butter(order,
(low / (rate / 2.), high / (rate / 2.)),
'pass')
|
[
"def",
"bandpass_filter",
"(",
"rate",
"=",
"None",
",",
"low",
"=",
"None",
",",
"high",
"=",
"None",
",",
"order",
"=",
"None",
")",
":",
"assert",
"low",
"<",
"high",
"assert",
"order",
">=",
"1",
"return",
"signal",
".",
"butter",
"(",
"order",
",",
"(",
"low",
"/",
"(",
"rate",
"/",
"2.",
")",
",",
"high",
"/",
"(",
"rate",
"/",
"2.",
")",
")",
",",
"'pass'",
")"
] |
Butterworth bandpass filter.
|
[
"Butterworth",
"bandpass",
"filter",
"."
] |
python
|
train
| 38.857143 |
serkanyersen/underscore.py
|
src/underscore.py
|
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L289-L297
|
def include(self, target):
"""
Determine if a given value is included in the
array or object using `is`.
"""
if self._clean.isDict():
return self._wrap(target in self.obj.values())
else:
return self._wrap(target in self.obj)
|
[
"def",
"include",
"(",
"self",
",",
"target",
")",
":",
"if",
"self",
".",
"_clean",
".",
"isDict",
"(",
")",
":",
"return",
"self",
".",
"_wrap",
"(",
"target",
"in",
"self",
".",
"obj",
".",
"values",
"(",
")",
")",
"else",
":",
"return",
"self",
".",
"_wrap",
"(",
"target",
"in",
"self",
".",
"obj",
")"
] |
Determine if a given value is included in the
array or object using `is`.
|
[
"Determine",
"if",
"a",
"given",
"value",
"is",
"included",
"in",
"the",
"array",
"or",
"object",
"using",
"is",
"."
] |
python
|
train
| 32 |
boriel/zxbasic
|
arch/zx48k/optimizer.py
|
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L310-L350
|
def result(i):
""" Returns which 8-bit registers are used by an asm
instruction to return a result.
"""
ins = inst(i)
op = oper(i)
if ins in ('or', 'and') and op == ['a']:
return ['f']
if ins in {'xor', 'or', 'and', 'neg', 'cpl', 'daa', 'rld', 'rrd', 'rra', 'rla', 'rrca', 'rlca'}:
return ['a', 'f']
if ins in {'bit', 'cp', 'scf', 'ccf'}:
return ['f']
if ins in {'sub', 'add', 'sbc', 'adc'}:
if len(op) == 1:
return ['a', 'f']
else:
return single_registers(op[0]) + ['f']
if ins == 'djnz':
return ['b', 'f']
if ins in {'ldir', 'ldi', 'lddr', 'ldd'}:
return ['f', 'b', 'c', 'd', 'e', 'h', 'l']
if ins in {'cpi', 'cpir', 'cpd', 'cpdr'}:
return ['f', 'b', 'c', 'h', 'l']
if ins in ('pop', 'ld'):
return single_registers(op[0])
if ins in {'inc', 'dec', 'sbc', 'rr', 'rl', 'rrc', 'rlc'}:
return ['f'] + single_registers(op[0])
if ins in ('set', 'res'):
return single_registers(op[1])
return []
|
[
"def",
"result",
"(",
"i",
")",
":",
"ins",
"=",
"inst",
"(",
"i",
")",
"op",
"=",
"oper",
"(",
"i",
")",
"if",
"ins",
"in",
"(",
"'or'",
",",
"'and'",
")",
"and",
"op",
"==",
"[",
"'a'",
"]",
":",
"return",
"[",
"'f'",
"]",
"if",
"ins",
"in",
"{",
"'xor'",
",",
"'or'",
",",
"'and'",
",",
"'neg'",
",",
"'cpl'",
",",
"'daa'",
",",
"'rld'",
",",
"'rrd'",
",",
"'rra'",
",",
"'rla'",
",",
"'rrca'",
",",
"'rlca'",
"}",
":",
"return",
"[",
"'a'",
",",
"'f'",
"]",
"if",
"ins",
"in",
"{",
"'bit'",
",",
"'cp'",
",",
"'scf'",
",",
"'ccf'",
"}",
":",
"return",
"[",
"'f'",
"]",
"if",
"ins",
"in",
"{",
"'sub'",
",",
"'add'",
",",
"'sbc'",
",",
"'adc'",
"}",
":",
"if",
"len",
"(",
"op",
")",
"==",
"1",
":",
"return",
"[",
"'a'",
",",
"'f'",
"]",
"else",
":",
"return",
"single_registers",
"(",
"op",
"[",
"0",
"]",
")",
"+",
"[",
"'f'",
"]",
"if",
"ins",
"==",
"'djnz'",
":",
"return",
"[",
"'b'",
",",
"'f'",
"]",
"if",
"ins",
"in",
"{",
"'ldir'",
",",
"'ldi'",
",",
"'lddr'",
",",
"'ldd'",
"}",
":",
"return",
"[",
"'f'",
",",
"'b'",
",",
"'c'",
",",
"'d'",
",",
"'e'",
",",
"'h'",
",",
"'l'",
"]",
"if",
"ins",
"in",
"{",
"'cpi'",
",",
"'cpir'",
",",
"'cpd'",
",",
"'cpdr'",
"}",
":",
"return",
"[",
"'f'",
",",
"'b'",
",",
"'c'",
",",
"'h'",
",",
"'l'",
"]",
"if",
"ins",
"in",
"(",
"'pop'",
",",
"'ld'",
")",
":",
"return",
"single_registers",
"(",
"op",
"[",
"0",
"]",
")",
"if",
"ins",
"in",
"{",
"'inc'",
",",
"'dec'",
",",
"'sbc'",
",",
"'rr'",
",",
"'rl'",
",",
"'rrc'",
",",
"'rlc'",
"}",
":",
"return",
"[",
"'f'",
"]",
"+",
"single_registers",
"(",
"op",
"[",
"0",
"]",
")",
"if",
"ins",
"in",
"(",
"'set'",
",",
"'res'",
")",
":",
"return",
"single_registers",
"(",
"op",
"[",
"1",
"]",
")",
"return",
"[",
"]"
] |
Returns which 8-bit registers are used by an asm
instruction to return a result.
|
[
"Returns",
"which",
"8",
"-",
"bit",
"registers",
"are",
"used",
"by",
"an",
"asm",
"instruction",
"to",
"return",
"a",
"result",
"."
] |
python
|
train
| 25.243902 |
ixc/python-edtf
|
edtf/jdutil.py
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L274-L298
|
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
|
[
"def",
"datetime_to_jd",
"(",
"date",
")",
":",
"days",
"=",
"date",
".",
"day",
"+",
"hmsm_to_days",
"(",
"date",
".",
"hour",
",",
"date",
".",
"minute",
",",
"date",
".",
"second",
",",
"date",
".",
"microsecond",
")",
"return",
"date_to_jd",
"(",
"date",
".",
"year",
",",
"date",
".",
"month",
",",
"days",
")"
] |
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
|
[
"Convert",
"a",
"datetime",
".",
"datetime",
"object",
"to",
"Julian",
"Day",
"."
] |
python
|
train
| 20.52 |
fabioz/PyDev.Debugger
|
_pydevd_bundle/pydevd_constants.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_constants.py#L152-L178
|
def protect_libraries_from_patching():
"""
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
"""
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select',
'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer',
'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver']
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
# import for side effects
import _pydev_imps._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name]
|
[
"def",
"protect_libraries_from_patching",
"(",
")",
":",
"patched",
"=",
"[",
"'threading'",
",",
"'thread'",
",",
"'_thread'",
",",
"'time'",
",",
"'socket'",
",",
"'Queue'",
",",
"'queue'",
",",
"'select'",
",",
"'xmlrpclib'",
",",
"'SimpleXMLRPCServer'",
",",
"'BaseHTTPServer'",
",",
"'SocketServer'",
",",
"'xmlrpc.client'",
",",
"'xmlrpc.server'",
",",
"'http.server'",
",",
"'socketserver'",
"]",
"for",
"name",
"in",
"patched",
":",
"try",
":",
"__import__",
"(",
"name",
")",
"except",
":",
"pass",
"patched_modules",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"sys",
".",
"modules",
".",
"items",
"(",
")",
"if",
"k",
"in",
"patched",
"]",
")",
"for",
"name",
"in",
"patched_modules",
":",
"del",
"sys",
".",
"modules",
"[",
"name",
"]",
"# import for side effects",
"import",
"_pydev_imps",
".",
"_pydev_saved_modules",
"for",
"name",
"in",
"patched_modules",
":",
"sys",
".",
"modules",
"[",
"name",
"]",
"=",
"patched_modules",
"[",
"name",
"]"
] |
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
|
[
"In",
"this",
"function",
"we",
"delete",
"some",
"modules",
"from",
"sys",
".",
"modules",
"dictionary",
"and",
"import",
"them",
"again",
"inside",
"_pydev_saved_modules",
"in",
"order",
"to",
"save",
"their",
"original",
"copies",
"there",
".",
"After",
"that",
"we",
"can",
"use",
"these",
"saved",
"modules",
"within",
"the",
"debugger",
"to",
"protect",
"them",
"from",
"patching",
"by",
"external",
"libraries",
"(",
"e",
".",
"g",
".",
"gevent",
")",
"."
] |
python
|
train
| 38.444444 |
brbsix/subsystem
|
subsystem/subsystem.py
|
https://github.com/brbsix/subsystem/blob/57705bc20d71ceaed9e22e21246265d717e98eb8/subsystem/subsystem.py#L121-L130
|
def epilog(self):
"""Return text formatted for the usage description's epilog."""
bold = '\033[1m'
end = '\033[0m'
available = self.available.copy()
index = available.index(Config.DOWNLOADER_DEFAULT)
available[index] = bold + '(' + available[index] + ')' + end
formatted = ' | '.join(available)
return 'Downloaders available: ' + formatted
|
[
"def",
"epilog",
"(",
"self",
")",
":",
"bold",
"=",
"'\\033[1m'",
"end",
"=",
"'\\033[0m'",
"available",
"=",
"self",
".",
"available",
".",
"copy",
"(",
")",
"index",
"=",
"available",
".",
"index",
"(",
"Config",
".",
"DOWNLOADER_DEFAULT",
")",
"available",
"[",
"index",
"]",
"=",
"bold",
"+",
"'('",
"+",
"available",
"[",
"index",
"]",
"+",
"')'",
"+",
"end",
"formatted",
"=",
"' | '",
".",
"join",
"(",
"available",
")",
"return",
"'Downloaders available: '",
"+",
"formatted"
] |
Return text formatted for the usage description's epilog.
|
[
"Return",
"text",
"formatted",
"for",
"the",
"usage",
"description",
"s",
"epilog",
"."
] |
python
|
train
| 39.7 |
log2timeline/plaso
|
plaso/cli/image_export_tool.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/image_export_tool.py#L316-L365
|
def _ExtractWithFilter(
self, source_path_specs, destination_path, output_writer,
artifact_filters, filter_file, artifact_definitions_path,
custom_artifacts_path, skip_duplicates=True):
"""Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
extraction_engine = engine.BaseEngine()
# If the source is a directory or a storage media image
# run pre-processing.
if self._source_type in self._SOURCE_TYPES_TO_PREPROCESS:
self._PreprocessSources(extraction_engine)
for source_path_spec in source_path_specs:
file_system, mount_point = self._GetSourceFileSystem(
source_path_spec, resolver_context=self._resolver_context)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
source_path_spec)
output_writer.Write(
'Extracting file entries from: {0:s}\n'.format(display_name))
filter_find_specs = extraction_engine.BuildFilterFindSpecs(
artifact_definitions_path, custom_artifacts_path,
extraction_engine.knowledge_base, artifact_filters, filter_file)
searcher = file_system_searcher.FileSystemSearcher(
file_system, mount_point)
for path_spec in searcher.Find(find_specs=filter_find_specs):
self._ExtractFileEntry(
path_spec, destination_path, output_writer,
skip_duplicates=skip_duplicates)
file_system.Close()
|
[
"def",
"_ExtractWithFilter",
"(",
"self",
",",
"source_path_specs",
",",
"destination_path",
",",
"output_writer",
",",
"artifact_filters",
",",
"filter_file",
",",
"artifact_definitions_path",
",",
"custom_artifacts_path",
",",
"skip_duplicates",
"=",
"True",
")",
":",
"extraction_engine",
"=",
"engine",
".",
"BaseEngine",
"(",
")",
"# If the source is a directory or a storage media image",
"# run pre-processing.",
"if",
"self",
".",
"_source_type",
"in",
"self",
".",
"_SOURCE_TYPES_TO_PREPROCESS",
":",
"self",
".",
"_PreprocessSources",
"(",
"extraction_engine",
")",
"for",
"source_path_spec",
"in",
"source_path_specs",
":",
"file_system",
",",
"mount_point",
"=",
"self",
".",
"_GetSourceFileSystem",
"(",
"source_path_spec",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"display_name",
"=",
"path_helper",
".",
"PathHelper",
".",
"GetDisplayNameForPathSpec",
"(",
"source_path_spec",
")",
"output_writer",
".",
"Write",
"(",
"'Extracting file entries from: {0:s}\\n'",
".",
"format",
"(",
"display_name",
")",
")",
"filter_find_specs",
"=",
"extraction_engine",
".",
"BuildFilterFindSpecs",
"(",
"artifact_definitions_path",
",",
"custom_artifacts_path",
",",
"extraction_engine",
".",
"knowledge_base",
",",
"artifact_filters",
",",
"filter_file",
")",
"searcher",
"=",
"file_system_searcher",
".",
"FileSystemSearcher",
"(",
"file_system",
",",
"mount_point",
")",
"for",
"path_spec",
"in",
"searcher",
".",
"Find",
"(",
"find_specs",
"=",
"filter_find_specs",
")",
":",
"self",
".",
"_ExtractFileEntry",
"(",
"path_spec",
",",
"destination_path",
",",
"output_writer",
",",
"skip_duplicates",
"=",
"skip_duplicates",
")",
"file_system",
".",
"Close",
"(",
")"
] |
Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
|
[
"Extracts",
"files",
"using",
"a",
"filter",
"expression",
"."
] |
python
|
train
| 43.88 |
getsentry/libsourcemap
|
libsourcemap/highlevel.py
|
https://github.com/getsentry/libsourcemap/blob/94b5a34814fafee9dc23da8ec0ccca77f30e3370/libsourcemap/highlevel.py#L203-L206
|
def has_source_contents(self, src_id):
"""Checks if some sources exist."""
return bool(rustcall(_lib.lsm_view_has_source_contents,
self._get_ptr(), src_id))
|
[
"def",
"has_source_contents",
"(",
"self",
",",
"src_id",
")",
":",
"return",
"bool",
"(",
"rustcall",
"(",
"_lib",
".",
"lsm_view_has_source_contents",
",",
"self",
".",
"_get_ptr",
"(",
")",
",",
"src_id",
")",
")"
] |
Checks if some sources exist.
|
[
"Checks",
"if",
"some",
"sources",
"exist",
"."
] |
python
|
train
| 49.5 |
IvanMalison/okcupyd
|
okcupyd/profile_copy.py
|
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/profile_copy.py#L121-L132
|
def looking_for(self):
"""Copy looking for attributes from the source profile to the
destination profile.
"""
looking_for = self.source_profile.looking_for
return self.dest_user.profile.looking_for.update(
gentation=looking_for.gentation,
single=looking_for.single,
near_me=looking_for.near_me,
kinds=looking_for.kinds,
ages=looking_for.ages
)
|
[
"def",
"looking_for",
"(",
"self",
")",
":",
"looking_for",
"=",
"self",
".",
"source_profile",
".",
"looking_for",
"return",
"self",
".",
"dest_user",
".",
"profile",
".",
"looking_for",
".",
"update",
"(",
"gentation",
"=",
"looking_for",
".",
"gentation",
",",
"single",
"=",
"looking_for",
".",
"single",
",",
"near_me",
"=",
"looking_for",
".",
"near_me",
",",
"kinds",
"=",
"looking_for",
".",
"kinds",
",",
"ages",
"=",
"looking_for",
".",
"ages",
")"
] |
Copy looking for attributes from the source profile to the
destination profile.
|
[
"Copy",
"looking",
"for",
"attributes",
"from",
"the",
"source",
"profile",
"to",
"the",
"destination",
"profile",
"."
] |
python
|
train
| 36.666667 |
ibis-project/ibis
|
ibis/impala/client.py
|
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/impala/client.py#L33-L40
|
def create_table(self, table_name, obj=None, **kwargs):
"""
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
"""
return self.client.create_table(
table_name, obj=obj, database=self.name, **kwargs
)
|
[
"def",
"create_table",
"(",
"self",
",",
"table_name",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"client",
".",
"create_table",
"(",
"table_name",
",",
"obj",
"=",
"obj",
",",
"database",
"=",
"self",
".",
"name",
",",
"*",
"*",
"kwargs",
")"
] |
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
|
[
"Dispatch",
"to",
"ImpalaClient",
".",
"create_table",
".",
"See",
"that",
"function",
"s",
"docstring",
"for",
"more"
] |
python
|
train
| 34.875 |
DataBiosphere/toil
|
src/toil/utils/toilStats.py
|
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L126-L135
|
def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s
|
[
"def",
"padStr",
"(",
"s",
",",
"field",
"=",
"None",
")",
":",
"if",
"field",
"is",
"None",
":",
"return",
"s",
"else",
":",
"if",
"len",
"(",
"s",
")",
">=",
"field",
":",
"return",
"s",
"else",
":",
"return",
"\" \"",
"*",
"(",
"field",
"-",
"len",
"(",
"s",
")",
")",
"+",
"s"
] |
Pad the begining of a string with spaces, if necessary.
|
[
"Pad",
"the",
"begining",
"of",
"a",
"string",
"with",
"spaces",
"if",
"necessary",
"."
] |
python
|
train
| 23.9 |
lambdamusic/Ontospy
|
ontospy/extras/sparqlpy.py
|
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/sparqlpy.py#L212-L223
|
def __doQuery(self, query, format, convert):
"""
Inner method that does the actual query
"""
self.__getFormat(format)
self.sparql.setQuery(query)
if convert:
results = self.sparql.query().convert()
else:
results = self.sparql.query()
return results
|
[
"def",
"__doQuery",
"(",
"self",
",",
"query",
",",
"format",
",",
"convert",
")",
":",
"self",
".",
"__getFormat",
"(",
"format",
")",
"self",
".",
"sparql",
".",
"setQuery",
"(",
"query",
")",
"if",
"convert",
":",
"results",
"=",
"self",
".",
"sparql",
".",
"query",
"(",
")",
".",
"convert",
"(",
")",
"else",
":",
"results",
"=",
"self",
".",
"sparql",
".",
"query",
"(",
")",
"return",
"results"
] |
Inner method that does the actual query
|
[
"Inner",
"method",
"that",
"does",
"the",
"actual",
"query"
] |
python
|
train
| 21.666667 |
aiortc/aiortc
|
aiortc/rtcsctptransport.py
|
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcsctptransport.py#L1033-L1124
|
async def _receive_sack_chunk(self, chunk):
"""
Handle a SACK chunk.
"""
if uint32_gt(self._last_sacked_tsn, chunk.cumulative_tsn):
return
received_time = time.time()
self._last_sacked_tsn = chunk.cumulative_tsn
cwnd_fully_utilized = (self._flight_size >= self._cwnd)
done = 0
done_bytes = 0
# handle acknowledged data
while self._sent_queue and uint32_gte(self._last_sacked_tsn, self._sent_queue[0].tsn):
schunk = self._sent_queue.popleft()
done += 1
if not schunk._acked:
done_bytes += schunk._book_size
self._flight_size_decrease(schunk)
# update RTO estimate
if done == 1 and schunk._sent_count == 1:
self._update_rto(received_time - schunk._sent_time)
# handle gap blocks
loss = False
if chunk.gaps:
seen = set()
for gap in chunk.gaps:
for pos in range(gap[0], gap[1] + 1):
highest_seen_tsn = (chunk.cumulative_tsn + pos) % SCTP_TSN_MODULO
seen.add(highest_seen_tsn)
# determined Highest TSN Newly Acked (HTNA)
highest_newly_acked = chunk.cumulative_tsn
for schunk in self._sent_queue:
if uint32_gt(schunk.tsn, highest_seen_tsn):
break
if schunk.tsn in seen and not schunk._acked:
done_bytes += schunk._book_size
schunk._acked = True
self._flight_size_decrease(schunk)
highest_newly_acked = schunk.tsn
# strike missing chunks prior to HTNA
for schunk in self._sent_queue:
if uint32_gt(schunk.tsn, highest_newly_acked):
break
if schunk.tsn not in seen:
schunk._misses += 1
if schunk._misses == 3:
schunk._misses = 0
if not self._maybe_abandon(schunk):
schunk._retransmit = True
schunk._acked = False
self._flight_size_decrease(schunk)
loss = True
# adjust congestion window
if self._fast_recovery_exit is None:
if done and cwnd_fully_utilized:
if self._cwnd <= self._ssthresh:
# slow start
self._cwnd += min(done_bytes, USERDATA_MAX_LENGTH)
else:
# congestion avoidance
self._partial_bytes_acked += done_bytes
if self._partial_bytes_acked >= self._cwnd:
self._partial_bytes_acked -= self._cwnd
self._cwnd += USERDATA_MAX_LENGTH
if loss:
self._ssthresh = max(self._cwnd // 2, 4 * USERDATA_MAX_LENGTH)
self._cwnd = self._ssthresh
self._partial_bytes_acked = 0
self._fast_recovery_exit = self._sent_queue[-1].tsn
self._fast_recovery_transmit = True
elif uint32_gte(chunk.cumulative_tsn, self._fast_recovery_exit):
self._fast_recovery_exit = None
if not self._sent_queue:
# there is no outstanding data, stop T3
self._t3_cancel()
elif done:
# the earliest outstanding chunk was acknowledged, restart T3
self._t3_restart()
self._update_advanced_peer_ack_point()
await self._data_channel_flush()
await self._transmit()
|
[
"async",
"def",
"_receive_sack_chunk",
"(",
"self",
",",
"chunk",
")",
":",
"if",
"uint32_gt",
"(",
"self",
".",
"_last_sacked_tsn",
",",
"chunk",
".",
"cumulative_tsn",
")",
":",
"return",
"received_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"_last_sacked_tsn",
"=",
"chunk",
".",
"cumulative_tsn",
"cwnd_fully_utilized",
"=",
"(",
"self",
".",
"_flight_size",
">=",
"self",
".",
"_cwnd",
")",
"done",
"=",
"0",
"done_bytes",
"=",
"0",
"# handle acknowledged data",
"while",
"self",
".",
"_sent_queue",
"and",
"uint32_gte",
"(",
"self",
".",
"_last_sacked_tsn",
",",
"self",
".",
"_sent_queue",
"[",
"0",
"]",
".",
"tsn",
")",
":",
"schunk",
"=",
"self",
".",
"_sent_queue",
".",
"popleft",
"(",
")",
"done",
"+=",
"1",
"if",
"not",
"schunk",
".",
"_acked",
":",
"done_bytes",
"+=",
"schunk",
".",
"_book_size",
"self",
".",
"_flight_size_decrease",
"(",
"schunk",
")",
"# update RTO estimate",
"if",
"done",
"==",
"1",
"and",
"schunk",
".",
"_sent_count",
"==",
"1",
":",
"self",
".",
"_update_rto",
"(",
"received_time",
"-",
"schunk",
".",
"_sent_time",
")",
"# handle gap blocks",
"loss",
"=",
"False",
"if",
"chunk",
".",
"gaps",
":",
"seen",
"=",
"set",
"(",
")",
"for",
"gap",
"in",
"chunk",
".",
"gaps",
":",
"for",
"pos",
"in",
"range",
"(",
"gap",
"[",
"0",
"]",
",",
"gap",
"[",
"1",
"]",
"+",
"1",
")",
":",
"highest_seen_tsn",
"=",
"(",
"chunk",
".",
"cumulative_tsn",
"+",
"pos",
")",
"%",
"SCTP_TSN_MODULO",
"seen",
".",
"add",
"(",
"highest_seen_tsn",
")",
"# determined Highest TSN Newly Acked (HTNA)",
"highest_newly_acked",
"=",
"chunk",
".",
"cumulative_tsn",
"for",
"schunk",
"in",
"self",
".",
"_sent_queue",
":",
"if",
"uint32_gt",
"(",
"schunk",
".",
"tsn",
",",
"highest_seen_tsn",
")",
":",
"break",
"if",
"schunk",
".",
"tsn",
"in",
"seen",
"and",
"not",
"schunk",
".",
"_acked",
":",
"done_bytes",
"+=",
"schunk",
".",
"_book_size",
"schunk",
".",
"_acked",
"=",
"True",
"self",
".",
"_flight_size_decrease",
"(",
"schunk",
")",
"highest_newly_acked",
"=",
"schunk",
".",
"tsn",
"# strike missing chunks prior to HTNA",
"for",
"schunk",
"in",
"self",
".",
"_sent_queue",
":",
"if",
"uint32_gt",
"(",
"schunk",
".",
"tsn",
",",
"highest_newly_acked",
")",
":",
"break",
"if",
"schunk",
".",
"tsn",
"not",
"in",
"seen",
":",
"schunk",
".",
"_misses",
"+=",
"1",
"if",
"schunk",
".",
"_misses",
"==",
"3",
":",
"schunk",
".",
"_misses",
"=",
"0",
"if",
"not",
"self",
".",
"_maybe_abandon",
"(",
"schunk",
")",
":",
"schunk",
".",
"_retransmit",
"=",
"True",
"schunk",
".",
"_acked",
"=",
"False",
"self",
".",
"_flight_size_decrease",
"(",
"schunk",
")",
"loss",
"=",
"True",
"# adjust congestion window",
"if",
"self",
".",
"_fast_recovery_exit",
"is",
"None",
":",
"if",
"done",
"and",
"cwnd_fully_utilized",
":",
"if",
"self",
".",
"_cwnd",
"<=",
"self",
".",
"_ssthresh",
":",
"# slow start",
"self",
".",
"_cwnd",
"+=",
"min",
"(",
"done_bytes",
",",
"USERDATA_MAX_LENGTH",
")",
"else",
":",
"# congestion avoidance",
"self",
".",
"_partial_bytes_acked",
"+=",
"done_bytes",
"if",
"self",
".",
"_partial_bytes_acked",
">=",
"self",
".",
"_cwnd",
":",
"self",
".",
"_partial_bytes_acked",
"-=",
"self",
".",
"_cwnd",
"self",
".",
"_cwnd",
"+=",
"USERDATA_MAX_LENGTH",
"if",
"loss",
":",
"self",
".",
"_ssthresh",
"=",
"max",
"(",
"self",
".",
"_cwnd",
"//",
"2",
",",
"4",
"*",
"USERDATA_MAX_LENGTH",
")",
"self",
".",
"_cwnd",
"=",
"self",
".",
"_ssthresh",
"self",
".",
"_partial_bytes_acked",
"=",
"0",
"self",
".",
"_fast_recovery_exit",
"=",
"self",
".",
"_sent_queue",
"[",
"-",
"1",
"]",
".",
"tsn",
"self",
".",
"_fast_recovery_transmit",
"=",
"True",
"elif",
"uint32_gte",
"(",
"chunk",
".",
"cumulative_tsn",
",",
"self",
".",
"_fast_recovery_exit",
")",
":",
"self",
".",
"_fast_recovery_exit",
"=",
"None",
"if",
"not",
"self",
".",
"_sent_queue",
":",
"# there is no outstanding data, stop T3",
"self",
".",
"_t3_cancel",
"(",
")",
"elif",
"done",
":",
"# the earliest outstanding chunk was acknowledged, restart T3",
"self",
".",
"_t3_restart",
"(",
")",
"self",
".",
"_update_advanced_peer_ack_point",
"(",
")",
"await",
"self",
".",
"_data_channel_flush",
"(",
")",
"await",
"self",
".",
"_transmit",
"(",
")"
] |
Handle a SACK chunk.
|
[
"Handle",
"a",
"SACK",
"chunk",
"."
] |
python
|
train
| 39 |
bslatkin/dpxdt
|
dpxdt/client/workers.py
|
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L450-L462
|
def wait_one(self):
"""Waits until this worker has finished one work item or died."""
while True:
try:
item = self.output_queue.get(True, self.polltime)
except Queue.Empty:
continue
except KeyboardInterrupt:
LOGGER.debug('Exiting')
return
else:
item.check_result()
return
|
[
"def",
"wait_one",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"item",
"=",
"self",
".",
"output_queue",
".",
"get",
"(",
"True",
",",
"self",
".",
"polltime",
")",
"except",
"Queue",
".",
"Empty",
":",
"continue",
"except",
"KeyboardInterrupt",
":",
"LOGGER",
".",
"debug",
"(",
"'Exiting'",
")",
"return",
"else",
":",
"item",
".",
"check_result",
"(",
")",
"return"
] |
Waits until this worker has finished one work item or died.
|
[
"Waits",
"until",
"this",
"worker",
"has",
"finished",
"one",
"work",
"item",
"or",
"died",
"."
] |
python
|
train
| 32.230769 |
scheibler/khard
|
khard/khard.py
|
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/khard.py#L478-L524
|
def get_contacts(address_books, query, method="all", reverse=False,
group=False, sort="first_name"):
"""Get a list of contacts from one or more address books.
:param address_books: the address books to search
:type address_books: list(address_book.AddressBook)
:param query: a search query to select contacts
:type quer: str
:param method: the search method, one of "all", "name" or "uid"
:type method: str
:param reverse: reverse the order of the returned contacts
:type reverse: bool
:param group: group results by address book
:type group: bool
:param sort: the field to use for sorting, one of "first_name", "last_name"
:type sort: str
:returns: contacts from the address_books that match the query
:rtype: list(CarddavObject)
"""
# Search for the contacts in all address books.
contacts = []
for address_book in address_books:
contacts.extend(address_book.search(query, method=method))
# Sort the contacts.
if group:
if sort == "first_name":
return sorted(contacts, reverse=reverse, key=lambda x: (
unidecode(x.address_book.name).lower(),
unidecode(x.get_first_name_last_name()).lower()))
elif sort == "last_name":
return sorted(contacts, reverse=reverse, key=lambda x: (
unidecode(x.address_book.name).lower(),
unidecode(x.get_last_name_first_name()).lower()))
else:
raise ValueError('sort must be "first_name" or "last_name" not '
'{}.'.format(sort))
else:
if sort == "first_name":
return sorted(contacts, reverse=reverse, key=lambda x:
unidecode(x.get_first_name_last_name()).lower())
elif sort == "last_name":
return sorted(contacts, reverse=reverse, key=lambda x:
unidecode(x.get_last_name_first_name()).lower())
else:
raise ValueError('sort must be "first_name" or "last_name" not '
'{}.'.format(sort))
|
[
"def",
"get_contacts",
"(",
"address_books",
",",
"query",
",",
"method",
"=",
"\"all\"",
",",
"reverse",
"=",
"False",
",",
"group",
"=",
"False",
",",
"sort",
"=",
"\"first_name\"",
")",
":",
"# Search for the contacts in all address books.",
"contacts",
"=",
"[",
"]",
"for",
"address_book",
"in",
"address_books",
":",
"contacts",
".",
"extend",
"(",
"address_book",
".",
"search",
"(",
"query",
",",
"method",
"=",
"method",
")",
")",
"# Sort the contacts.",
"if",
"group",
":",
"if",
"sort",
"==",
"\"first_name\"",
":",
"return",
"sorted",
"(",
"contacts",
",",
"reverse",
"=",
"reverse",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"unidecode",
"(",
"x",
".",
"address_book",
".",
"name",
")",
".",
"lower",
"(",
")",
",",
"unidecode",
"(",
"x",
".",
"get_first_name_last_name",
"(",
")",
")",
".",
"lower",
"(",
")",
")",
")",
"elif",
"sort",
"==",
"\"last_name\"",
":",
"return",
"sorted",
"(",
"contacts",
",",
"reverse",
"=",
"reverse",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"unidecode",
"(",
"x",
".",
"address_book",
".",
"name",
")",
".",
"lower",
"(",
")",
",",
"unidecode",
"(",
"x",
".",
"get_last_name_first_name",
"(",
")",
")",
".",
"lower",
"(",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'sort must be \"first_name\" or \"last_name\" not '",
"'{}.'",
".",
"format",
"(",
"sort",
")",
")",
"else",
":",
"if",
"sort",
"==",
"\"first_name\"",
":",
"return",
"sorted",
"(",
"contacts",
",",
"reverse",
"=",
"reverse",
",",
"key",
"=",
"lambda",
"x",
":",
"unidecode",
"(",
"x",
".",
"get_first_name_last_name",
"(",
")",
")",
".",
"lower",
"(",
")",
")",
"elif",
"sort",
"==",
"\"last_name\"",
":",
"return",
"sorted",
"(",
"contacts",
",",
"reverse",
"=",
"reverse",
",",
"key",
"=",
"lambda",
"x",
":",
"unidecode",
"(",
"x",
".",
"get_last_name_first_name",
"(",
")",
")",
".",
"lower",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'sort must be \"first_name\" or \"last_name\" not '",
"'{}.'",
".",
"format",
"(",
"sort",
")",
")"
] |
Get a list of contacts from one or more address books.
:param address_books: the address books to search
:type address_books: list(address_book.AddressBook)
:param query: a search query to select contacts
:type quer: str
:param method: the search method, one of "all", "name" or "uid"
:type method: str
:param reverse: reverse the order of the returned contacts
:type reverse: bool
:param group: group results by address book
:type group: bool
:param sort: the field to use for sorting, one of "first_name", "last_name"
:type sort: str
:returns: contacts from the address_books that match the query
:rtype: list(CarddavObject)
|
[
"Get",
"a",
"list",
"of",
"contacts",
"from",
"one",
"or",
"more",
"address",
"books",
"."
] |
python
|
test
| 44.170213 |
jopohl/urh
|
src/urh/signalprocessing/ProtocolSniffer.py
|
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/signalprocessing/ProtocolSniffer.py#L155-L203
|
def __demodulate_data(self, data):
"""
Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return:
"""
if len(data) == 0:
return
power_spectrum = data.real ** 2 + data.imag ** 2
is_above_noise = np.sqrt(np.mean(power_spectrum)) > self.signal.noise_threshold
if self.adaptive_noise and not is_above_noise:
self.signal.noise_threshold = 0.9 * self.signal.noise_threshold + 0.1 * np.sqrt(np.max(power_spectrum))
if is_above_noise:
self.__add_to_buffer(data)
self.pause_length = 0
if not self.__buffer_is_full():
return
else:
self.pause_length += len(data)
if self.pause_length < 10 * self.signal.bit_len:
self.__add_to_buffer(data)
if not self.__buffer_is_full():
return
if self.__current_buffer_index == 0:
return
# clear cache and start a new message
self.signal._fulldata = self.__buffer[0:self.__current_buffer_index]
self.__clear_buffer()
self.signal._qad = None
bit_len = self.signal.bit_len
if self.automatic_center:
self.signal.qad_center = AutoInterpretation.detect_center(self.signal.qad, max_size=150*self.signal.bit_len)
ppseq = grab_pulse_lens(self.signal.qad, self.signal.qad_center,
self.signal.tolerance, self.signal.modulation_type, self.signal.bit_len)
bit_data, pauses, bit_sample_pos = self._ppseq_to_bits(ppseq, bit_len, write_bit_sample_pos=False)
for bits, pause in zip(bit_data, pauses):
message = Message(bits, pause, bit_len=bit_len, message_type=self.default_message_type,
decoder=self.decoder)
self.messages.append(message)
self.message_sniffed.emit(len(self.messages) - 1)
|
[
"def",
"__demodulate_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"power_spectrum",
"=",
"data",
".",
"real",
"**",
"2",
"+",
"data",
".",
"imag",
"**",
"2",
"is_above_noise",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"power_spectrum",
")",
")",
">",
"self",
".",
"signal",
".",
"noise_threshold",
"if",
"self",
".",
"adaptive_noise",
"and",
"not",
"is_above_noise",
":",
"self",
".",
"signal",
".",
"noise_threshold",
"=",
"0.9",
"*",
"self",
".",
"signal",
".",
"noise_threshold",
"+",
"0.1",
"*",
"np",
".",
"sqrt",
"(",
"np",
".",
"max",
"(",
"power_spectrum",
")",
")",
"if",
"is_above_noise",
":",
"self",
".",
"__add_to_buffer",
"(",
"data",
")",
"self",
".",
"pause_length",
"=",
"0",
"if",
"not",
"self",
".",
"__buffer_is_full",
"(",
")",
":",
"return",
"else",
":",
"self",
".",
"pause_length",
"+=",
"len",
"(",
"data",
")",
"if",
"self",
".",
"pause_length",
"<",
"10",
"*",
"self",
".",
"signal",
".",
"bit_len",
":",
"self",
".",
"__add_to_buffer",
"(",
"data",
")",
"if",
"not",
"self",
".",
"__buffer_is_full",
"(",
")",
":",
"return",
"if",
"self",
".",
"__current_buffer_index",
"==",
"0",
":",
"return",
"# clear cache and start a new message",
"self",
".",
"signal",
".",
"_fulldata",
"=",
"self",
".",
"__buffer",
"[",
"0",
":",
"self",
".",
"__current_buffer_index",
"]",
"self",
".",
"__clear_buffer",
"(",
")",
"self",
".",
"signal",
".",
"_qad",
"=",
"None",
"bit_len",
"=",
"self",
".",
"signal",
".",
"bit_len",
"if",
"self",
".",
"automatic_center",
":",
"self",
".",
"signal",
".",
"qad_center",
"=",
"AutoInterpretation",
".",
"detect_center",
"(",
"self",
".",
"signal",
".",
"qad",
",",
"max_size",
"=",
"150",
"*",
"self",
".",
"signal",
".",
"bit_len",
")",
"ppseq",
"=",
"grab_pulse_lens",
"(",
"self",
".",
"signal",
".",
"qad",
",",
"self",
".",
"signal",
".",
"qad_center",
",",
"self",
".",
"signal",
".",
"tolerance",
",",
"self",
".",
"signal",
".",
"modulation_type",
",",
"self",
".",
"signal",
".",
"bit_len",
")",
"bit_data",
",",
"pauses",
",",
"bit_sample_pos",
"=",
"self",
".",
"_ppseq_to_bits",
"(",
"ppseq",
",",
"bit_len",
",",
"write_bit_sample_pos",
"=",
"False",
")",
"for",
"bits",
",",
"pause",
"in",
"zip",
"(",
"bit_data",
",",
"pauses",
")",
":",
"message",
"=",
"Message",
"(",
"bits",
",",
"pause",
",",
"bit_len",
"=",
"bit_len",
",",
"message_type",
"=",
"self",
".",
"default_message_type",
",",
"decoder",
"=",
"self",
".",
"decoder",
")",
"self",
".",
"messages",
".",
"append",
"(",
"message",
")",
"self",
".",
"message_sniffed",
".",
"emit",
"(",
"len",
"(",
"self",
".",
"messages",
")",
"-",
"1",
")"
] |
Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return:
|
[
"Demodulates",
"received",
"IQ",
"data",
"and",
"adds",
"demodulated",
"bits",
"to",
"messages",
":",
"param",
"data",
":",
":",
"return",
":"
] |
python
|
train
| 39.387755 |
benvanwerkhoven/kernel_tuner
|
kernel_tuner/cuda.py
|
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/cuda.py#L322-L334
|
def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(dest, drv.DeviceAllocation):
drv.memcpy_htod(dest, src)
else:
dest = src
|
[
"def",
"memcpy_htod",
"(",
"self",
",",
"dest",
",",
"src",
")",
":",
"if",
"isinstance",
"(",
"dest",
",",
"drv",
".",
"DeviceAllocation",
")",
":",
"drv",
".",
"memcpy_htod",
"(",
"dest",
",",
"src",
")",
"else",
":",
"dest",
"=",
"src"
] |
perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
|
[
"perform",
"a",
"host",
"to",
"device",
"memory",
"copy"
] |
python
|
train
| 31.615385 |
python-diamond/Diamond
|
src/diamond/metric.py
|
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/metric.py#L132-L146
|
def getCollectorPath(self):
"""
Returns collector path
servers.host.cpu.total.idle
return "cpu"
"""
# If we don't have a host name, assume it's just the third part of the
# metric path
if self.host is None:
return self.path.split('.')[2]
offset = self.path.index(self.host)
offset += len(self.host) + 1
endoffset = self.path.index('.', offset)
return self.path[offset:endoffset]
|
[
"def",
"getCollectorPath",
"(",
"self",
")",
":",
"# If we don't have a host name, assume it's just the third part of the",
"# metric path",
"if",
"self",
".",
"host",
"is",
"None",
":",
"return",
"self",
".",
"path",
".",
"split",
"(",
"'.'",
")",
"[",
"2",
"]",
"offset",
"=",
"self",
".",
"path",
".",
"index",
"(",
"self",
".",
"host",
")",
"offset",
"+=",
"len",
"(",
"self",
".",
"host",
")",
"+",
"1",
"endoffset",
"=",
"self",
".",
"path",
".",
"index",
"(",
"'.'",
",",
"offset",
")",
"return",
"self",
".",
"path",
"[",
"offset",
":",
"endoffset",
"]"
] |
Returns collector path
servers.host.cpu.total.idle
return "cpu"
|
[
"Returns",
"collector",
"path",
"servers",
".",
"host",
".",
"cpu",
".",
"total",
".",
"idle",
"return",
"cpu"
] |
python
|
train
| 32.333333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.