text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def text_fd_to_metric_families(fd):
"""Parse Prometheus text format from a file descriptor.
This is a laxer parser than the main Go parser,
so successful parsing does not imply that the parsed
text meets the specification.
Yields Metric's.
"""
name = None
allowed_names = []
eof = False
seen_metrics = set()
def build_metric(name, documentation, typ, unit, samples):
if name in seen_metrics:
raise ValueError("Duplicate metric: " + name)
seen_metrics.add(name)
if typ is None:
typ = 'unknown'
if documentation is None:
documentation = ''
if unit is None:
unit = ''
if unit and not name.endswith("_" + unit):
raise ValueError("Unit does not match metric name: " + name)
if unit and typ in ['info', 'stateset']:
raise ValueError("Units not allowed for this metric type: " + name)
if typ in ['histogram', 'gaugehistogram']:
_check_histogram(samples, name)
metric = Metric(name, documentation, typ, unit)
# TODO: check labelvalues are valid utf8
metric.samples = samples
return metric
for line in fd:
if line[-1] == '\n':
line = line[:-1]
if eof:
raise ValueError("Received line after # EOF: " + line)
if line == '# EOF':
eof = True
elif line.startswith('#'):
parts = line.split(' ', 3)
if len(parts) < 4:
raise ValueError("Invalid line: " + line)
if parts[2] == name and samples:
raise ValueError("Received metadata after samples: " + line)
if parts[2] != name:
if name is not None:
yield build_metric(name, documentation, typ, unit, samples)
# New metric
name = parts[2]
unit = None
typ = None
documentation = None
group = None
seen_groups = set()
group_timestamp = None
group_timestamp_samples = set()
samples = []
allowed_names = [parts[2]]
if parts[1] == 'HELP':
if documentation is not None:
raise ValueError("More than one HELP for metric: " + line)
if len(parts) == 4:
documentation = _unescape_help(parts[3])
elif len(parts) == 3:
raise ValueError("Invalid line: " + line)
elif parts[1] == 'TYPE':
if typ is not None:
raise ValueError("More than one TYPE for metric: " + line)
typ = parts[3]
if typ == 'untyped':
raise ValueError("Invalid TYPE for metric: " + line)
allowed_names = {
'counter': ['_total', '_created'],
'summary': ['_count', '_sum', '', '_created'],
'histogram': ['_count', '_sum', '_bucket', '_created'],
'gaugehistogram': ['_gcount', '_gsum', '_bucket'],
'info': ['_info'],
}.get(typ, [''])
allowed_names = [name + n for n in allowed_names]
elif parts[1] == 'UNIT':
if unit is not None:
raise ValueError("More than one UNIT for metric: " + line)
unit = parts[3]
else:
raise ValueError("Invalid line: " + line)
else:
sample = _parse_sample(line)
if sample.name not in allowed_names:
if name is not None:
yield build_metric(name, documentation, typ, unit, samples)
# Start an unknown metric.
name = sample.name
documentation = None
unit = None
typ = 'unknown'
samples = []
group = None
group_timestamp = None
group_timestamp_samples = set()
seen_groups = set()
allowed_names = [sample.name]
if typ == 'stateset' and name not in sample.labels:
raise ValueError("Stateset missing label: " + line)
if (typ in ['histogram', 'gaugehistogram'] and name + '_bucket' == sample.name
and (float(sample.labels.get('le', -1)) < 0
or sample.labels['le'] != floatToGoString(sample.labels['le']))):
raise ValueError("Invalid le label: " + line)
if (typ == 'summary' and name == sample.name
and (not (0 <= float(sample.labels.get('quantile', -1)) <= 1)
or sample.labels['quantile'] != floatToGoString(sample.labels['quantile']))):
raise ValueError("Invalid quantile label: " + line)
g = tuple(sorted(_group_for_sample(sample, name, typ).items()))
if group is not None and g != group and g in seen_groups:
raise ValueError("Invalid metric grouping: " + line)
if group is not None and g == group:
if (sample.timestamp is None) != (group_timestamp is None):
raise ValueError("Mix of timestamp presence within a group: " + line)
if group_timestamp is not None and group_timestamp > sample.timestamp and typ != 'info':
raise ValueError("Timestamps went backwards within a group: " + line)
else:
group_timestamp_samples = set()
series_id = (sample.name, tuple(sorted(sample.labels.items())))
if sample.timestamp != group_timestamp or series_id not in group_timestamp_samples:
# Not a duplicate due to timestamp truncation.
samples.append(sample)
group_timestamp_samples.add(series_id)
group = g
group_timestamp = sample.timestamp
seen_groups.add(g)
if typ == 'stateset' and sample.value not in [0, 1]:
raise ValueError("Stateset samples can only have values zero and one: " + line)
if typ == 'info' and sample.value != 1:
raise ValueError("Info samples can only have value one: " + line)
if typ == 'summary' and name == sample.name and sample.value < 0:
raise ValueError("Quantile values cannot be negative: " + line)
if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and math.isnan(
sample.value):
raise ValueError("Counter-like samples cannot be NaN: " + line)
if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount',
'_gsum'] and sample.value < 0:
raise ValueError("Counter-like samples cannot be negative: " + line)
if sample.exemplar and not (
typ in ['histogram', 'gaugehistogram']
and sample.name.endswith('_bucket')):
raise ValueError("Invalid line only histogram/gaugehistogram buckets can have exemplars: " + line)
if name is not None:
yield build_metric(name, documentation, typ, unit, samples)
if not eof:
raise ValueError("Missing # EOF at end")
|
[
"def",
"text_fd_to_metric_families",
"(",
"fd",
")",
":",
"name",
"=",
"None",
"allowed_names",
"=",
"[",
"]",
"eof",
"=",
"False",
"seen_metrics",
"=",
"set",
"(",
")",
"def",
"build_metric",
"(",
"name",
",",
"documentation",
",",
"typ",
",",
"unit",
",",
"samples",
")",
":",
"if",
"name",
"in",
"seen_metrics",
":",
"raise",
"ValueError",
"(",
"\"Duplicate metric: \"",
"+",
"name",
")",
"seen_metrics",
".",
"add",
"(",
"name",
")",
"if",
"typ",
"is",
"None",
":",
"typ",
"=",
"'unknown'",
"if",
"documentation",
"is",
"None",
":",
"documentation",
"=",
"''",
"if",
"unit",
"is",
"None",
":",
"unit",
"=",
"''",
"if",
"unit",
"and",
"not",
"name",
".",
"endswith",
"(",
"\"_\"",
"+",
"unit",
")",
":",
"raise",
"ValueError",
"(",
"\"Unit does not match metric name: \"",
"+",
"name",
")",
"if",
"unit",
"and",
"typ",
"in",
"[",
"'info'",
",",
"'stateset'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Units not allowed for this metric type: \"",
"+",
"name",
")",
"if",
"typ",
"in",
"[",
"'histogram'",
",",
"'gaugehistogram'",
"]",
":",
"_check_histogram",
"(",
"samples",
",",
"name",
")",
"metric",
"=",
"Metric",
"(",
"name",
",",
"documentation",
",",
"typ",
",",
"unit",
")",
"# TODO: check labelvalues are valid utf8",
"metric",
".",
"samples",
"=",
"samples",
"return",
"metric",
"for",
"line",
"in",
"fd",
":",
"if",
"line",
"[",
"-",
"1",
"]",
"==",
"'\\n'",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"if",
"eof",
":",
"raise",
"ValueError",
"(",
"\"Received line after # EOF: \"",
"+",
"line",
")",
"if",
"line",
"==",
"'# EOF'",
":",
"eof",
"=",
"True",
"elif",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"parts",
"=",
"line",
".",
"split",
"(",
"' '",
",",
"3",
")",
"if",
"len",
"(",
"parts",
")",
"<",
"4",
":",
"raise",
"ValueError",
"(",
"\"Invalid line: \"",
"+",
"line",
")",
"if",
"parts",
"[",
"2",
"]",
"==",
"name",
"and",
"samples",
":",
"raise",
"ValueError",
"(",
"\"Received metadata after samples: \"",
"+",
"line",
")",
"if",
"parts",
"[",
"2",
"]",
"!=",
"name",
":",
"if",
"name",
"is",
"not",
"None",
":",
"yield",
"build_metric",
"(",
"name",
",",
"documentation",
",",
"typ",
",",
"unit",
",",
"samples",
")",
"# New metric",
"name",
"=",
"parts",
"[",
"2",
"]",
"unit",
"=",
"None",
"typ",
"=",
"None",
"documentation",
"=",
"None",
"group",
"=",
"None",
"seen_groups",
"=",
"set",
"(",
")",
"group_timestamp",
"=",
"None",
"group_timestamp_samples",
"=",
"set",
"(",
")",
"samples",
"=",
"[",
"]",
"allowed_names",
"=",
"[",
"parts",
"[",
"2",
"]",
"]",
"if",
"parts",
"[",
"1",
"]",
"==",
"'HELP'",
":",
"if",
"documentation",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"More than one HELP for metric: \"",
"+",
"line",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"4",
":",
"documentation",
"=",
"_unescape_help",
"(",
"parts",
"[",
"3",
"]",
")",
"elif",
"len",
"(",
"parts",
")",
"==",
"3",
":",
"raise",
"ValueError",
"(",
"\"Invalid line: \"",
"+",
"line",
")",
"elif",
"parts",
"[",
"1",
"]",
"==",
"'TYPE'",
":",
"if",
"typ",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"More than one TYPE for metric: \"",
"+",
"line",
")",
"typ",
"=",
"parts",
"[",
"3",
"]",
"if",
"typ",
"==",
"'untyped'",
":",
"raise",
"ValueError",
"(",
"\"Invalid TYPE for metric: \"",
"+",
"line",
")",
"allowed_names",
"=",
"{",
"'counter'",
":",
"[",
"'_total'",
",",
"'_created'",
"]",
",",
"'summary'",
":",
"[",
"'_count'",
",",
"'_sum'",
",",
"''",
",",
"'_created'",
"]",
",",
"'histogram'",
":",
"[",
"'_count'",
",",
"'_sum'",
",",
"'_bucket'",
",",
"'_created'",
"]",
",",
"'gaugehistogram'",
":",
"[",
"'_gcount'",
",",
"'_gsum'",
",",
"'_bucket'",
"]",
",",
"'info'",
":",
"[",
"'_info'",
"]",
",",
"}",
".",
"get",
"(",
"typ",
",",
"[",
"''",
"]",
")",
"allowed_names",
"=",
"[",
"name",
"+",
"n",
"for",
"n",
"in",
"allowed_names",
"]",
"elif",
"parts",
"[",
"1",
"]",
"==",
"'UNIT'",
":",
"if",
"unit",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"More than one UNIT for metric: \"",
"+",
"line",
")",
"unit",
"=",
"parts",
"[",
"3",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid line: \"",
"+",
"line",
")",
"else",
":",
"sample",
"=",
"_parse_sample",
"(",
"line",
")",
"if",
"sample",
".",
"name",
"not",
"in",
"allowed_names",
":",
"if",
"name",
"is",
"not",
"None",
":",
"yield",
"build_metric",
"(",
"name",
",",
"documentation",
",",
"typ",
",",
"unit",
",",
"samples",
")",
"# Start an unknown metric.",
"name",
"=",
"sample",
".",
"name",
"documentation",
"=",
"None",
"unit",
"=",
"None",
"typ",
"=",
"'unknown'",
"samples",
"=",
"[",
"]",
"group",
"=",
"None",
"group_timestamp",
"=",
"None",
"group_timestamp_samples",
"=",
"set",
"(",
")",
"seen_groups",
"=",
"set",
"(",
")",
"allowed_names",
"=",
"[",
"sample",
".",
"name",
"]",
"if",
"typ",
"==",
"'stateset'",
"and",
"name",
"not",
"in",
"sample",
".",
"labels",
":",
"raise",
"ValueError",
"(",
"\"Stateset missing label: \"",
"+",
"line",
")",
"if",
"(",
"typ",
"in",
"[",
"'histogram'",
",",
"'gaugehistogram'",
"]",
"and",
"name",
"+",
"'_bucket'",
"==",
"sample",
".",
"name",
"and",
"(",
"float",
"(",
"sample",
".",
"labels",
".",
"get",
"(",
"'le'",
",",
"-",
"1",
")",
")",
"<",
"0",
"or",
"sample",
".",
"labels",
"[",
"'le'",
"]",
"!=",
"floatToGoString",
"(",
"sample",
".",
"labels",
"[",
"'le'",
"]",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid le label: \"",
"+",
"line",
")",
"if",
"(",
"typ",
"==",
"'summary'",
"and",
"name",
"==",
"sample",
".",
"name",
"and",
"(",
"not",
"(",
"0",
"<=",
"float",
"(",
"sample",
".",
"labels",
".",
"get",
"(",
"'quantile'",
",",
"-",
"1",
")",
")",
"<=",
"1",
")",
"or",
"sample",
".",
"labels",
"[",
"'quantile'",
"]",
"!=",
"floatToGoString",
"(",
"sample",
".",
"labels",
"[",
"'quantile'",
"]",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid quantile label: \"",
"+",
"line",
")",
"g",
"=",
"tuple",
"(",
"sorted",
"(",
"_group_for_sample",
"(",
"sample",
",",
"name",
",",
"typ",
")",
".",
"items",
"(",
")",
")",
")",
"if",
"group",
"is",
"not",
"None",
"and",
"g",
"!=",
"group",
"and",
"g",
"in",
"seen_groups",
":",
"raise",
"ValueError",
"(",
"\"Invalid metric grouping: \"",
"+",
"line",
")",
"if",
"group",
"is",
"not",
"None",
"and",
"g",
"==",
"group",
":",
"if",
"(",
"sample",
".",
"timestamp",
"is",
"None",
")",
"!=",
"(",
"group_timestamp",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Mix of timestamp presence within a group: \"",
"+",
"line",
")",
"if",
"group_timestamp",
"is",
"not",
"None",
"and",
"group_timestamp",
">",
"sample",
".",
"timestamp",
"and",
"typ",
"!=",
"'info'",
":",
"raise",
"ValueError",
"(",
"\"Timestamps went backwards within a group: \"",
"+",
"line",
")",
"else",
":",
"group_timestamp_samples",
"=",
"set",
"(",
")",
"series_id",
"=",
"(",
"sample",
".",
"name",
",",
"tuple",
"(",
"sorted",
"(",
"sample",
".",
"labels",
".",
"items",
"(",
")",
")",
")",
")",
"if",
"sample",
".",
"timestamp",
"!=",
"group_timestamp",
"or",
"series_id",
"not",
"in",
"group_timestamp_samples",
":",
"# Not a duplicate due to timestamp truncation.",
"samples",
".",
"append",
"(",
"sample",
")",
"group_timestamp_samples",
".",
"add",
"(",
"series_id",
")",
"group",
"=",
"g",
"group_timestamp",
"=",
"sample",
".",
"timestamp",
"seen_groups",
".",
"add",
"(",
"g",
")",
"if",
"typ",
"==",
"'stateset'",
"and",
"sample",
".",
"value",
"not",
"in",
"[",
"0",
",",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Stateset samples can only have values zero and one: \"",
"+",
"line",
")",
"if",
"typ",
"==",
"'info'",
"and",
"sample",
".",
"value",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Info samples can only have value one: \"",
"+",
"line",
")",
"if",
"typ",
"==",
"'summary'",
"and",
"name",
"==",
"sample",
".",
"name",
"and",
"sample",
".",
"value",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Quantile values cannot be negative: \"",
"+",
"line",
")",
"if",
"sample",
".",
"name",
"[",
"len",
"(",
"name",
")",
":",
"]",
"in",
"[",
"'_total'",
",",
"'_sum'",
",",
"'_count'",
",",
"'_bucket'",
",",
"'_gcount'",
",",
"'_gsum'",
"]",
"and",
"math",
".",
"isnan",
"(",
"sample",
".",
"value",
")",
":",
"raise",
"ValueError",
"(",
"\"Counter-like samples cannot be NaN: \"",
"+",
"line",
")",
"if",
"sample",
".",
"name",
"[",
"len",
"(",
"name",
")",
":",
"]",
"in",
"[",
"'_total'",
",",
"'_sum'",
",",
"'_count'",
",",
"'_bucket'",
",",
"'_gcount'",
",",
"'_gsum'",
"]",
"and",
"sample",
".",
"value",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Counter-like samples cannot be negative: \"",
"+",
"line",
")",
"if",
"sample",
".",
"exemplar",
"and",
"not",
"(",
"typ",
"in",
"[",
"'histogram'",
",",
"'gaugehistogram'",
"]",
"and",
"sample",
".",
"name",
".",
"endswith",
"(",
"'_bucket'",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid line only histogram/gaugehistogram buckets can have exemplars: \"",
"+",
"line",
")",
"if",
"name",
"is",
"not",
"None",
":",
"yield",
"build_metric",
"(",
"name",
",",
"documentation",
",",
"typ",
",",
"unit",
",",
"samples",
")",
"if",
"not",
"eof",
":",
"raise",
"ValueError",
"(",
"\"Missing # EOF at end\"",
")"
] | 44.45122 | 20.95122 |
def find_jobs(self, job_ids):
"""Finds the jobs in the completed job queue."""
matched_jobs = []
if self.skip:
return matched_jobs
json_data = self.download_queue(job_ids)
if not json_data:
return matched_jobs
jobs = json_data["jobs"]
for job in jobs:
if (
job.get("id") in job_ids
and job.get("status", "").lower() not in _NOT_FINISHED_STATUSES
):
matched_jobs.append(job)
return matched_jobs
|
[
"def",
"find_jobs",
"(",
"self",
",",
"job_ids",
")",
":",
"matched_jobs",
"=",
"[",
"]",
"if",
"self",
".",
"skip",
":",
"return",
"matched_jobs",
"json_data",
"=",
"self",
".",
"download_queue",
"(",
"job_ids",
")",
"if",
"not",
"json_data",
":",
"return",
"matched_jobs",
"jobs",
"=",
"json_data",
"[",
"\"jobs\"",
"]",
"for",
"job",
"in",
"jobs",
":",
"if",
"(",
"job",
".",
"get",
"(",
"\"id\"",
")",
"in",
"job_ids",
"and",
"job",
".",
"get",
"(",
"\"status\"",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
"not",
"in",
"_NOT_FINISHED_STATUSES",
")",
":",
"matched_jobs",
".",
"append",
"(",
"job",
")",
"return",
"matched_jobs"
] | 28.315789 | 17.473684 |
def visit_while(self, node, parent):
"""visit a While node by returning a fresh instance of it"""
newnode = nodes.While(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.test, newnode),
[self.visit(child, newnode) for child in node.body],
[self.visit(child, newnode) for child in node.orelse],
)
return newnode
|
[
"def",
"visit_while",
"(",
"self",
",",
"node",
",",
"parent",
")",
":",
"newnode",
"=",
"nodes",
".",
"While",
"(",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"parent",
")",
"newnode",
".",
"postinit",
"(",
"self",
".",
"visit",
"(",
"node",
".",
"test",
",",
"newnode",
")",
",",
"[",
"self",
".",
"visit",
"(",
"child",
",",
"newnode",
")",
"for",
"child",
"in",
"node",
".",
"body",
"]",
",",
"[",
"self",
".",
"visit",
"(",
"child",
",",
"newnode",
")",
"for",
"child",
"in",
"node",
".",
"orelse",
"]",
",",
")",
"return",
"newnode"
] | 44.444444 | 16.444444 |
def request(self, url, method='GET', params=None, data=None,
expected_response_code=200, headers=None):
"""Make a HTTP request to the InfluxDB API.
:param url: the path of the HTTP request, e.g. write, query, etc.
:type url: str
:param method: the HTTP method for the request, defaults to GET
:type method: str
:param params: additional parameters for the request, defaults to None
:type params: dict
:param data: the data of the request, defaults to None
:type data: str
:param expected_response_code: the expected response code of
the request, defaults to 200
:type expected_response_code: int
:param headers: headers to add to the request
:type headers: dict
:returns: the response from the request
:rtype: :class:`requests.Response`
:raises InfluxDBServerError: if the response code is any server error
code (5xx)
:raises InfluxDBClientError: if the response code is not the
same as `expected_response_code` and is not a server error code
"""
url = "{0}/{1}".format(self._baseurl, url)
if headers is None:
headers = self._headers
if params is None:
params = {}
if isinstance(data, (dict, list)):
data = json.dumps(data)
# Try to send the request more than once by default (see #103)
retry = True
_try = 0
while retry:
try:
response = self._session.request(
method=method,
url=url,
auth=(self._username, self._password),
params=params,
data=data,
headers=headers,
proxies=self._proxies,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout):
_try += 1
if self._retries != 0:
retry = _try < self._retries
if method == "POST":
time.sleep((2 ** _try) * random.random() / 100.0)
if not retry:
raise
# if there's not an error, there must have been a successful response
if 500 <= response.status_code < 600:
raise InfluxDBServerError(response.content)
elif response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code)
|
[
"def",
"request",
"(",
"self",
",",
"url",
",",
"method",
"=",
"'GET'",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"expected_response_code",
"=",
"200",
",",
"headers",
"=",
"None",
")",
":",
"url",
"=",
"\"{0}/{1}\"",
".",
"format",
"(",
"self",
".",
"_baseurl",
",",
"url",
")",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"self",
".",
"_headers",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"data",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"# Try to send the request more than once by default (see #103)",
"retry",
"=",
"True",
"_try",
"=",
"0",
"while",
"retry",
":",
"try",
":",
"response",
"=",
"self",
".",
"_session",
".",
"request",
"(",
"method",
"=",
"method",
",",
"url",
"=",
"url",
",",
"auth",
"=",
"(",
"self",
".",
"_username",
",",
"self",
".",
"_password",
")",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"proxies",
"=",
"self",
".",
"_proxies",
",",
"verify",
"=",
"self",
".",
"_verify_ssl",
",",
"timeout",
"=",
"self",
".",
"_timeout",
")",
"break",
"except",
"(",
"requests",
".",
"exceptions",
".",
"ConnectionError",
",",
"requests",
".",
"exceptions",
".",
"HTTPError",
",",
"requests",
".",
"exceptions",
".",
"Timeout",
")",
":",
"_try",
"+=",
"1",
"if",
"self",
".",
"_retries",
"!=",
"0",
":",
"retry",
"=",
"_try",
"<",
"self",
".",
"_retries",
"if",
"method",
"==",
"\"POST\"",
":",
"time",
".",
"sleep",
"(",
"(",
"2",
"**",
"_try",
")",
"*",
"random",
".",
"random",
"(",
")",
"/",
"100.0",
")",
"if",
"not",
"retry",
":",
"raise",
"# if there's not an error, there must have been a successful response",
"if",
"500",
"<=",
"response",
".",
"status_code",
"<",
"600",
":",
"raise",
"InfluxDBServerError",
"(",
"response",
".",
"content",
")",
"elif",
"response",
".",
"status_code",
"==",
"expected_response_code",
":",
"return",
"response",
"else",
":",
"raise",
"InfluxDBClientError",
"(",
"response",
".",
"content",
",",
"response",
".",
"status_code",
")"
] | 39.318841 | 17.086957 |
def _check_pattern_list(patterns, key, default=None):
"""Validates file search patterns from user configuration.
Acceptable input is a string (which will be converted to a singleton list),
a list of strings, or anything falsy (such as None or an empty dictionary).
Empty or unset input will be converted to a default.
Args:
patterns: input from user configuration (YAML).
key (str): name of the configuration key the input came from,
used for error display purposes.
Keyword Args:
default: value to return in case the input is empty or unset.
Returns:
list[str]: validated list of patterns
Raises:
ValueError: if the input is unacceptable.
"""
if not patterns:
return default
if isinstance(patterns, basestring):
return [patterns]
if isinstance(patterns, list):
if all(isinstance(p, basestring) for p in patterns):
return patterns
raise ValueError("Invalid file patterns in key '{}': must be a string or "
'list of strings'.format(key))
|
[
"def",
"_check_pattern_list",
"(",
"patterns",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"if",
"not",
"patterns",
":",
"return",
"default",
"if",
"isinstance",
"(",
"patterns",
",",
"basestring",
")",
":",
"return",
"[",
"patterns",
"]",
"if",
"isinstance",
"(",
"patterns",
",",
"list",
")",
":",
"if",
"all",
"(",
"isinstance",
"(",
"p",
",",
"basestring",
")",
"for",
"p",
"in",
"patterns",
")",
":",
"return",
"patterns",
"raise",
"ValueError",
"(",
"\"Invalid file patterns in key '{}': must be a string or \"",
"'list of strings'",
".",
"format",
"(",
"key",
")",
")"
] | 32.515152 | 23.333333 |
def process_utterance_online(self, utterance, frame_size=400, hop_size=160, chunk_size=1,
buffer_size=5760000, corpus=None):
"""
Process the utterance in **online** mode, chunk by chunk.
The processed chunks are yielded one after another.
Args:
utterance (Utterance): The utterance to process.
frame_size (int): The number of samples per frame.
hop_size (int): The number of samples between two frames.
chunk_size (int): Number of frames to process per chunk.
buffer_size (int): Number of samples to load into memory at once.
The exact number of loaded samples depends on the block-size of the audioread library.
So it can be of block-size higher, where the block-size is typically 1024 or 4096.
corpus (Corpus): The corpus this utterance is part of, if available.
Returns:
Generator: A generator that yield processed chunks.
"""
return self.process_track_online(utterance.track,
frame_size=frame_size,
hop_size=hop_size,
start=utterance.start,
end=utterance.end,
utterance=utterance,
corpus=corpus,
chunk_size=chunk_size,
buffer_size=buffer_size)
|
[
"def",
"process_utterance_online",
"(",
"self",
",",
"utterance",
",",
"frame_size",
"=",
"400",
",",
"hop_size",
"=",
"160",
",",
"chunk_size",
"=",
"1",
",",
"buffer_size",
"=",
"5760000",
",",
"corpus",
"=",
"None",
")",
":",
"return",
"self",
".",
"process_track_online",
"(",
"utterance",
".",
"track",
",",
"frame_size",
"=",
"frame_size",
",",
"hop_size",
"=",
"hop_size",
",",
"start",
"=",
"utterance",
".",
"start",
",",
"end",
"=",
"utterance",
".",
"end",
",",
"utterance",
"=",
"utterance",
",",
"corpus",
"=",
"corpus",
",",
"chunk_size",
"=",
"chunk_size",
",",
"buffer_size",
"=",
"buffer_size",
")"
] | 56.464286 | 27.892857 |
def deepcopy_strip(item): # type: (Any) -> Any
"""
Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
"""
if isinstance(item, MutableMapping):
return {k: deepcopy_strip(v) for k, v in iteritems(item)}
if isinstance(item, MutableSequence):
return [deepcopy_strip(k) for k in item]
return item
|
[
"def",
"deepcopy_strip",
"(",
"item",
")",
":",
"# type: (Any) -> Any",
"if",
"isinstance",
"(",
"item",
",",
"MutableMapping",
")",
":",
"return",
"{",
"k",
":",
"deepcopy_strip",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"item",
")",
"}",
"if",
"isinstance",
"(",
"item",
",",
"MutableSequence",
")",
":",
"return",
"[",
"deepcopy_strip",
"(",
"k",
")",
"for",
"k",
"in",
"item",
"]",
"return",
"item"
] | 36.230769 | 17.461538 |
def add_default_name(text):
"""
Go through each line of the text and ensure that
a name is defined. Use '@' if there is none.
"""
global SUPPORTED_RECORDS
lines = text.split("\n")
ret = []
for line in lines:
tokens = tokenize_line(line)
if len(tokens) == 0:
continue
if tokens[0] in SUPPORTED_RECORDS and not tokens[0].startswith("$"):
# add back the name
tokens = ['@'] + tokens
ret.append(serialize(tokens))
return "\n".join(ret)
|
[
"def",
"add_default_name",
"(",
"text",
")",
":",
"global",
"SUPPORTED_RECORDS",
"lines",
"=",
"text",
".",
"split",
"(",
"\"\\n\"",
")",
"ret",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"tokens",
"=",
"tokenize_line",
"(",
"line",
")",
"if",
"len",
"(",
"tokens",
")",
"==",
"0",
":",
"continue",
"if",
"tokens",
"[",
"0",
"]",
"in",
"SUPPORTED_RECORDS",
"and",
"not",
"tokens",
"[",
"0",
"]",
".",
"startswith",
"(",
"\"$\"",
")",
":",
"# add back the name",
"tokens",
"=",
"[",
"'@'",
"]",
"+",
"tokens",
"ret",
".",
"append",
"(",
"serialize",
"(",
"tokens",
")",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"ret",
")"
] | 24.857143 | 17.52381 |
def create_filters(predicate_params, predicate_factory):
"""Create filter functions from a list of string parameters.
:param predicate_params: A list of predicate_param arguments as in `create_filter`.
:param predicate_factory: As in `create_filter`.
"""
filters = []
for predicate_param in predicate_params:
filters.append(create_filter(predicate_param, predicate_factory))
return filters
|
[
"def",
"create_filters",
"(",
"predicate_params",
",",
"predicate_factory",
")",
":",
"filters",
"=",
"[",
"]",
"for",
"predicate_param",
"in",
"predicate_params",
":",
"filters",
".",
"append",
"(",
"create_filter",
"(",
"predicate_param",
",",
"predicate_factory",
")",
")",
"return",
"filters"
] | 39.9 | 19.2 |
def _nodedev_event_update_cb(conn, dev, opaque):
'''
Node device update events handler
'''
_salt_send_event(opaque, conn, {
'nodedev': {
'name': dev.name()
},
'event': opaque['event']
})
|
[
"def",
"_nodedev_event_update_cb",
"(",
"conn",
",",
"dev",
",",
"opaque",
")",
":",
"_salt_send_event",
"(",
"opaque",
",",
"conn",
",",
"{",
"'nodedev'",
":",
"{",
"'name'",
":",
"dev",
".",
"name",
"(",
")",
"}",
",",
"'event'",
":",
"opaque",
"[",
"'event'",
"]",
"}",
")"
] | 23.3 | 18.3 |
def deconv2d(self, filter_size, output_channels, stride=1, padding='SAME', activation_fn=tf.nn.relu, b_value=0.0,
s_value=1.0, bn=True, trainable=True):
"""
2D Deconvolutional Layer
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float
"""
self.count['deconv'] += 1
scope = 'deconv_' + str(self.count['deconv'])
with tf.variable_scope(scope):
# Calculate the dimensions for deconv function
batch_size = tf.shape(self.input)[0]
input_height = tf.shape(self.input)[1]
input_width = tf.shape(self.input)[2]
if padding == "VALID":
out_rows = (input_height - 1) * stride + filter_size
out_cols = (input_width - 1) * stride + filter_size
else: # padding == "SAME":
out_rows = input_height * stride
out_cols = input_width * stride
# Deconv function
input_channels = self.input.get_shape()[3]
output_shape = [filter_size, filter_size, output_channels, input_channels]
w = self.weight_variable(name='weights', shape=output_shape, trainable=trainable)
deconv_out_shape = tf.stack([batch_size, out_rows, out_cols, output_channels])
self.input = tf.nn.conv2d_transpose(self.input, w, deconv_out_shape, [1, stride, stride, 1], padding)
if bn is True: # batch normalization
self.input = self.batch_norm(self.input)
if b_value is not None: # bias value
b = self.const_variable(name='bias', shape=[output_channels], value=b_value, trainable=trainable)
self.input = tf.add(self.input, b)
if s_value is not None: # scale value
s = self.const_variable(name='scale', shape=[output_channels], value=s_value, trainable=trainable)
self.input = tf.multiply(self.input, s)
if activation_fn is not None: # non-linear activation function
self.input = activation_fn(self.input)
print(scope + ' output: ' + str(self.input.get_shape()))
|
[
"def",
"deconv2d",
"(",
"self",
",",
"filter_size",
",",
"output_channels",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"'SAME'",
",",
"activation_fn",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"b_value",
"=",
"0.0",
",",
"s_value",
"=",
"1.0",
",",
"bn",
"=",
"True",
",",
"trainable",
"=",
"True",
")",
":",
"self",
".",
"count",
"[",
"'deconv'",
"]",
"+=",
"1",
"scope",
"=",
"'deconv_'",
"+",
"str",
"(",
"self",
".",
"count",
"[",
"'deconv'",
"]",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
")",
":",
"# Calculate the dimensions for deconv function",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"self",
".",
"input",
")",
"[",
"0",
"]",
"input_height",
"=",
"tf",
".",
"shape",
"(",
"self",
".",
"input",
")",
"[",
"1",
"]",
"input_width",
"=",
"tf",
".",
"shape",
"(",
"self",
".",
"input",
")",
"[",
"2",
"]",
"if",
"padding",
"==",
"\"VALID\"",
":",
"out_rows",
"=",
"(",
"input_height",
"-",
"1",
")",
"*",
"stride",
"+",
"filter_size",
"out_cols",
"=",
"(",
"input_width",
"-",
"1",
")",
"*",
"stride",
"+",
"filter_size",
"else",
":",
"# padding == \"SAME\":",
"out_rows",
"=",
"input_height",
"*",
"stride",
"out_cols",
"=",
"input_width",
"*",
"stride",
"# Deconv function",
"input_channels",
"=",
"self",
".",
"input",
".",
"get_shape",
"(",
")",
"[",
"3",
"]",
"output_shape",
"=",
"[",
"filter_size",
",",
"filter_size",
",",
"output_channels",
",",
"input_channels",
"]",
"w",
"=",
"self",
".",
"weight_variable",
"(",
"name",
"=",
"'weights'",
",",
"shape",
"=",
"output_shape",
",",
"trainable",
"=",
"trainable",
")",
"deconv_out_shape",
"=",
"tf",
".",
"stack",
"(",
"[",
"batch_size",
",",
"out_rows",
",",
"out_cols",
",",
"output_channels",
"]",
")",
"self",
".",
"input",
"=",
"tf",
".",
"nn",
".",
"conv2d_transpose",
"(",
"self",
".",
"input",
",",
"w",
",",
"deconv_out_shape",
",",
"[",
"1",
",",
"stride",
",",
"stride",
",",
"1",
"]",
",",
"padding",
")",
"if",
"bn",
"is",
"True",
":",
"# batch normalization",
"self",
".",
"input",
"=",
"self",
".",
"batch_norm",
"(",
"self",
".",
"input",
")",
"if",
"b_value",
"is",
"not",
"None",
":",
"# bias value",
"b",
"=",
"self",
".",
"const_variable",
"(",
"name",
"=",
"'bias'",
",",
"shape",
"=",
"[",
"output_channels",
"]",
",",
"value",
"=",
"b_value",
",",
"trainable",
"=",
"trainable",
")",
"self",
".",
"input",
"=",
"tf",
".",
"add",
"(",
"self",
".",
"input",
",",
"b",
")",
"if",
"s_value",
"is",
"not",
"None",
":",
"# scale value",
"s",
"=",
"self",
".",
"const_variable",
"(",
"name",
"=",
"'scale'",
",",
"shape",
"=",
"[",
"output_channels",
"]",
",",
"value",
"=",
"s_value",
",",
"trainable",
"=",
"trainable",
")",
"self",
".",
"input",
"=",
"tf",
".",
"multiply",
"(",
"self",
".",
"input",
",",
"s",
")",
"if",
"activation_fn",
"is",
"not",
"None",
":",
"# non-linear activation function",
"self",
".",
"input",
"=",
"activation_fn",
"(",
"self",
".",
"input",
")",
"print",
"(",
"scope",
"+",
"' output: '",
"+",
"str",
"(",
"self",
".",
"input",
".",
"get_shape",
"(",
")",
")",
")"
] | 50.130435 | 21.652174 |
def body_encode(s, maxlinelen=76, eol=NL):
r"""Encode a string with base64.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters).
Each line of encoded text will end with eol, which defaults to "\n". Set
this to "\r\n" if you will be using the result of this function directly
in an email.
"""
if not s:
return s
encvec = []
max_unencoded = maxlinelen * 3 // 4
for i in range(0, len(s), max_unencoded):
# BAW: should encode() inherit b2a_base64()'s dubious behavior in
# adding a newline to the encoded string?
enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii")
if enc.endswith(NL) and eol != NL:
enc = enc[:-1] + eol
encvec.append(enc)
return EMPTYSTRING.join(encvec)
|
[
"def",
"body_encode",
"(",
"s",
",",
"maxlinelen",
"=",
"76",
",",
"eol",
"=",
"NL",
")",
":",
"if",
"not",
"s",
":",
"return",
"s",
"encvec",
"=",
"[",
"]",
"max_unencoded",
"=",
"maxlinelen",
"*",
"3",
"//",
"4",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"s",
")",
",",
"max_unencoded",
")",
":",
"# BAW: should encode() inherit b2a_base64()'s dubious behavior in",
"# adding a newline to the encoded string?",
"enc",
"=",
"b2a_base64",
"(",
"s",
"[",
"i",
":",
"i",
"+",
"max_unencoded",
"]",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
"if",
"enc",
".",
"endswith",
"(",
"NL",
")",
"and",
"eol",
"!=",
"NL",
":",
"enc",
"=",
"enc",
"[",
":",
"-",
"1",
"]",
"+",
"eol",
"encvec",
".",
"append",
"(",
"enc",
")",
"return",
"EMPTYSTRING",
".",
"join",
"(",
"encvec",
")"
] | 34.73913 | 19.73913 |
def has(self, url, xpath=None):
"""Check if a URL (and xpath) exists in the cache
If DB has not been initialized yet, returns ``False`` for any URL.
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``)
Returns:
bool: ``True`` if URL exists, ``False`` otherwise
"""
if not path.exists(self.db_path):
return False
return self._query(url, xpath).count() > 0
|
[
"def",
"has",
"(",
"self",
",",
"url",
",",
"xpath",
"=",
"None",
")",
":",
"if",
"not",
"path",
".",
"exists",
"(",
"self",
".",
"db_path",
")",
":",
"return",
"False",
"return",
"self",
".",
"_query",
"(",
"url",
",",
"xpath",
")",
".",
"count",
"(",
")",
">",
"0"
] | 32.75 | 23 |
def find_one_and_replace(self, filter, replacement, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one_and_replace
"""
self._arctic_lib.check_quota()
return self._collection.find_one_and_replace(filter, replacement, **kwargs)
|
[
"def",
"find_one_and_replace",
"(",
"self",
",",
"filter",
",",
"replacement",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_arctic_lib",
".",
"check_quota",
"(",
")",
"return",
"self",
".",
"_collection",
".",
"find_one_and_replace",
"(",
"filter",
",",
"replacement",
",",
"*",
"*",
"kwargs",
")"
] | 55.5 | 25.833333 |
def do_not_disturb(self):
"""Get if do not disturb is enabled."""
return bool(strtobool(str(self._settings_json.get(
CONST.SETTINGS_DO_NOT_DISTURB))))
|
[
"def",
"do_not_disturb",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"strtobool",
"(",
"str",
"(",
"self",
".",
"_settings_json",
".",
"get",
"(",
"CONST",
".",
"SETTINGS_DO_NOT_DISTURB",
")",
")",
")",
")"
] | 43.75 | 9.5 |
def isConnected(self):
"""
Returns whether or not this connection is currently
active.
:return <bool> connected
"""
for pool in self.__pool.values():
if not pool.empty():
return True
return False
|
[
"def",
"isConnected",
"(",
"self",
")",
":",
"for",
"pool",
"in",
"self",
".",
"__pool",
".",
"values",
"(",
")",
":",
"if",
"not",
"pool",
".",
"empty",
"(",
")",
":",
"return",
"True",
"return",
"False"
] | 24.909091 | 13.454545 |
def create_datastore_from_yaml_schema(self, yaml_path, delete_first=0,
path=None):
# type: (str, Optional[int], Optional[str]) -> None
"""For tabular data, create a resource in the HDX datastore which enables data preview in HDX from a YAML file
containing a list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'} and optionally a primary key.
If path is not supplied, the file is first downloaded from HDX.
Args:
yaml_path (str): Path to YAML file containing list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
"""
data = load_yaml(yaml_path)
self.create_datastore_from_dict_schema(data, delete_first, path=path)
|
[
"def",
"create_datastore_from_yaml_schema",
"(",
"self",
",",
"yaml_path",
",",
"delete_first",
"=",
"0",
",",
"path",
"=",
"None",
")",
":",
"# type: (str, Optional[int], Optional[str]) -> None",
"data",
"=",
"load_yaml",
"(",
"yaml_path",
")",
"self",
".",
"create_datastore_from_dict_schema",
"(",
"data",
",",
"delete_first",
",",
"path",
"=",
"path",
")"
] | 57.941176 | 33.882353 |
def separable_convolution(input, weights, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculate a n-dimensional convolution of a separable kernel to a n-dimensional input.
Achieved by calling convolution1d along the first axis, obtaining an intermediate
image, on which the next convolution1d along the second axis is called and so on.
Parameters
----------
input : array_like
Array of which to estimate the noise.
weights : ndarray
One-dimensional sequence of numbers.
output : array, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0.
Returns
-------
output : ndarray
Input image convolved with the supplied kernel.
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
convolve1d(input, weights, axes[0], output, mode, cval, origin)
for ii in range(1, len(axes)):
convolve1d(output, weights, axes[ii], output, mode, cval, origin)
else:
output[...] = input[...]
return output
|
[
"def",
"separable_convolution",
"(",
"input",
",",
"weights",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"\"reflect\"",
",",
"cval",
"=",
"0.0",
",",
"origin",
"=",
"0",
")",
":",
"input",
"=",
"numpy",
".",
"asarray",
"(",
"input",
")",
"output",
"=",
"_ni_support",
".",
"_get_output",
"(",
"output",
",",
"input",
")",
"axes",
"=",
"list",
"(",
"range",
"(",
"input",
".",
"ndim",
")",
")",
"if",
"len",
"(",
"axes",
")",
">",
"0",
":",
"convolve1d",
"(",
"input",
",",
"weights",
",",
"axes",
"[",
"0",
"]",
",",
"output",
",",
"mode",
",",
"cval",
",",
"origin",
")",
"for",
"ii",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"axes",
")",
")",
":",
"convolve1d",
"(",
"output",
",",
"weights",
",",
"axes",
"[",
"ii",
"]",
",",
"output",
",",
"mode",
",",
"cval",
",",
"origin",
")",
"else",
":",
"output",
"[",
"...",
"]",
"=",
"input",
"[",
"...",
"]",
"return",
"output"
] | 38.619048 | 22.452381 |
def _token_to_ids(self, token):
"""Convert a single token to a list of integer ids."""
# Check cache
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._token_to_ids_cache[cache_location]
if cache_key == token:
return cache_value
subwords = self._token_to_subwords(token)
ids = []
for subword in subwords:
if subword == _UNDERSCORE_REPLACEMENT:
ids.append(len(self._subwords) + ord("_"))
continue
subword_id = self._subword_to_id.get(subword)
if subword_id is None:
# Byte-encode
ids.extend(self._byte_encode(subword))
else:
ids.append(subword_id)
# Update cache
self._token_to_ids_cache[cache_location] = (token, ids)
return ids
|
[
"def",
"_token_to_ids",
"(",
"self",
",",
"token",
")",
":",
"# Check cache",
"cache_location",
"=",
"hash",
"(",
"token",
")",
"%",
"self",
".",
"_cache_size",
"cache_key",
",",
"cache_value",
"=",
"self",
".",
"_token_to_ids_cache",
"[",
"cache_location",
"]",
"if",
"cache_key",
"==",
"token",
":",
"return",
"cache_value",
"subwords",
"=",
"self",
".",
"_token_to_subwords",
"(",
"token",
")",
"ids",
"=",
"[",
"]",
"for",
"subword",
"in",
"subwords",
":",
"if",
"subword",
"==",
"_UNDERSCORE_REPLACEMENT",
":",
"ids",
".",
"append",
"(",
"len",
"(",
"self",
".",
"_subwords",
")",
"+",
"ord",
"(",
"\"_\"",
")",
")",
"continue",
"subword_id",
"=",
"self",
".",
"_subword_to_id",
".",
"get",
"(",
"subword",
")",
"if",
"subword_id",
"is",
"None",
":",
"# Byte-encode",
"ids",
".",
"extend",
"(",
"self",
".",
"_byte_encode",
"(",
"subword",
")",
")",
"else",
":",
"ids",
".",
"append",
"(",
"subword_id",
")",
"# Update cache",
"self",
".",
"_token_to_ids_cache",
"[",
"cache_location",
"]",
"=",
"(",
"token",
",",
"ids",
")",
"return",
"ids"
] | 29.96 | 18.36 |
def network_interface_get_effective_route_table(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get all route tables for a specific network interface.
:param name: The name of the network interface to query.
:param resource_group: The resource group name assigned to the
network interface.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interface_get_effective_route_table test-iface0 testgroup
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
nic = netconn.network_interfaces.get_effective_route_table(
network_interface_name=name,
resource_group_name=resource_group
)
nic.wait()
tables = nic.result()
tables = tables.as_dict()
result = tables['value']
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result
|
[
"def",
"network_interface_get_effective_route_table",
"(",
"name",
",",
"resource_group",
",",
"*",
"*",
"kwargs",
")",
":",
"netconn",
"=",
"__utils__",
"[",
"'azurearm.get_client'",
"]",
"(",
"'network'",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"nic",
"=",
"netconn",
".",
"network_interfaces",
".",
"get_effective_route_table",
"(",
"network_interface_name",
"=",
"name",
",",
"resource_group_name",
"=",
"resource_group",
")",
"nic",
".",
"wait",
"(",
")",
"tables",
"=",
"nic",
".",
"result",
"(",
")",
"tables",
"=",
"tables",
".",
"as_dict",
"(",
")",
"result",
"=",
"tables",
"[",
"'value'",
"]",
"except",
"CloudError",
"as",
"exc",
":",
"__utils__",
"[",
"'azurearm.log_cloud_error'",
"]",
"(",
"'network'",
",",
"str",
"(",
"exc",
")",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"{",
"'error'",
":",
"str",
"(",
"exc",
")",
"}",
"return",
"result"
] | 29.727273 | 26.030303 |
def estimate_allele_frequency(ac, an, a=1, b=100):
"""
Make sample (or other) names.
Parameters:
-----------
ac : array-like
Array-like object with the observed allele counts for each variant. If
ac is a pandas Series, the output dataframe will have the same index as
ac.
an : array-like
Array-like object with the number of haplotypes that were genotyped.
a : float
Parameter for prior distribution beta(a, b).
b : float
Parameter for prior distribution beta(a, b).
Returns
-------
out : pandas.DataFrame
Pandas dataframe with allele frequency estimate
"""
# Credible interval is 95% highest posterior density
td = dict(zip(['ci_lower', 'ci_upper'],
stats.beta(a + ac, b + an - ac).interval(0.95)))
td['af'] = (a + ac) / (a + b + an)
td['af_mle'] = np.array(ac).astype(float) / np.array(an)
out = pd.DataFrame(td)[['af_mle', 'af', 'ci_lower', 'ci_upper']]
if type(ac) == pd.Series:
out.index = ac.index
return(out)
|
[
"def",
"estimate_allele_frequency",
"(",
"ac",
",",
"an",
",",
"a",
"=",
"1",
",",
"b",
"=",
"100",
")",
":",
"# Credible interval is 95% highest posterior density",
"td",
"=",
"dict",
"(",
"zip",
"(",
"[",
"'ci_lower'",
",",
"'ci_upper'",
"]",
",",
"stats",
".",
"beta",
"(",
"a",
"+",
"ac",
",",
"b",
"+",
"an",
"-",
"ac",
")",
".",
"interval",
"(",
"0.95",
")",
")",
")",
"td",
"[",
"'af'",
"]",
"=",
"(",
"a",
"+",
"ac",
")",
"/",
"(",
"a",
"+",
"b",
"+",
"an",
")",
"td",
"[",
"'af_mle'",
"]",
"=",
"np",
".",
"array",
"(",
"ac",
")",
".",
"astype",
"(",
"float",
")",
"/",
"np",
".",
"array",
"(",
"an",
")",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
"td",
")",
"[",
"[",
"'af_mle'",
",",
"'af'",
",",
"'ci_lower'",
",",
"'ci_upper'",
"]",
"]",
"if",
"type",
"(",
"ac",
")",
"==",
"pd",
".",
"Series",
":",
"out",
".",
"index",
"=",
"ac",
".",
"index",
"return",
"(",
"out",
")"
] | 29.055556 | 23.333333 |
def getmetadata(self, key=None):
"""Get the metadata that applies to this element, automatically inherited from parent elements"""
if self.metadata:
d = self.doc.submetadata[self.metadata]
elif self.parent:
d = self.parent.getmetadata()
elif self.doc:
d = self.doc.metadata
else:
return None
if key:
return d[key]
else:
return d
|
[
"def",
"getmetadata",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"self",
".",
"metadata",
":",
"d",
"=",
"self",
".",
"doc",
".",
"submetadata",
"[",
"self",
".",
"metadata",
"]",
"elif",
"self",
".",
"parent",
":",
"d",
"=",
"self",
".",
"parent",
".",
"getmetadata",
"(",
")",
"elif",
"self",
".",
"doc",
":",
"d",
"=",
"self",
".",
"doc",
".",
"metadata",
"else",
":",
"return",
"None",
"if",
"key",
":",
"return",
"d",
"[",
"key",
"]",
"else",
":",
"return",
"d"
] | 31.857143 | 14.785714 |
def ge(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._binary_op("ge", other, axis=axis, level=level)
|
[
"def",
"ge",
"(",
"self",
",",
"other",
",",
"axis",
"=",
"\"columns\"",
",",
"level",
"=",
"None",
")",
":",
"return",
"self",
".",
"_binary_op",
"(",
"\"ge\"",
",",
"other",
",",
"axis",
"=",
"axis",
",",
"level",
"=",
"level",
")"
] | 39 | 19.5 |
def args(**kwargs):
""" allows us to temporarily override all the special keyword parameters in
a with context """
kwargs_str = ",".join(["%s=%r" % (k,v) for k,v in kwargs.items()])
raise DeprecationWarning("""
sh.args() has been deprecated because it was never thread safe. use the
following instead:
sh2 = sh({kwargs})
sh2.your_command()
or
sh2 = sh({kwargs})
from sh2 import your_command
your_command()
""".format(kwargs=kwargs_str))
|
[
"def",
"args",
"(",
"*",
"*",
"kwargs",
")",
":",
"kwargs_str",
"=",
"\",\"",
".",
"join",
"(",
"[",
"\"%s=%r\"",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
")",
"raise",
"DeprecationWarning",
"(",
"\"\"\"\n\nsh.args() has been deprecated because it was never thread safe. use the\nfollowing instead:\n\n sh2 = sh({kwargs})\n sh2.your_command()\n\nor\n\n sh2 = sh({kwargs})\n from sh2 import your_command\n your_command()\n\n\"\"\"",
".",
"format",
"(",
"kwargs",
"=",
"kwargs_str",
")",
")"
] | 21.904762 | 24.142857 |
def setup_hds(self):
""" setup modflow head save file observations for given kper (zero-based
stress period index) and k (zero-based layer index) pairs using the
kperk argument.
Note
----
this can setup a shit-ton of observations
this is useful for dataworth analyses or for monitoring
water levels as forecasts
"""
if self.hds_kperk is None or len(self.hds_kperk) == 0:
return
from .gw_utils import setup_hds_obs
# if len(self.hds_kperk) == 2:
# try:
# if len(self.hds_kperk[0] == 2):
# pass
# except:
# self.hds_kperk = [self.hds_kperk]
oc = self.m.get_package("OC")
if oc is None:
raise Exception("can't find OC package in model to setup hds grid obs")
if not oc.savehead:
raise Exception("OC not saving hds, can't setup grid obs")
hds_unit = oc.iuhead
hds_file = self.m.get_output(unit=hds_unit)
assert os.path.exists(os.path.join(self.org_model_ws,hds_file)),\
"couldn't find existing hds file {0} in org_model_ws".format(hds_file)
shutil.copy2(os.path.join(self.org_model_ws,hds_file),
os.path.join(self.m.model_ws,hds_file))
inact = None
if self.m.lpf is not None:
inact = self.m.lpf.hdry
elif self.m.upw is not None:
inact = self.m.upw.hdry
if inact is None:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x
else:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x
print(self.hds_kperk)
frun_line, df = setup_hds_obs(os.path.join(self.m.model_ws,hds_file),
kperk_pairs=self.hds_kperk,skip=skip)
self.obs_dfs["hds"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_hds_obs('{0}')".format(hds_file))
self.tmp_files.append(hds_file)
|
[
"def",
"setup_hds",
"(",
"self",
")",
":",
"if",
"self",
".",
"hds_kperk",
"is",
"None",
"or",
"len",
"(",
"self",
".",
"hds_kperk",
")",
"==",
"0",
":",
"return",
"from",
".",
"gw_utils",
"import",
"setup_hds_obs",
"# if len(self.hds_kperk) == 2:",
"# try:",
"# if len(self.hds_kperk[0] == 2):",
"# pass",
"# except:",
"# self.hds_kperk = [self.hds_kperk]",
"oc",
"=",
"self",
".",
"m",
".",
"get_package",
"(",
"\"OC\"",
")",
"if",
"oc",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"can't find OC package in model to setup hds grid obs\"",
")",
"if",
"not",
"oc",
".",
"savehead",
":",
"raise",
"Exception",
"(",
"\"OC not saving hds, can't setup grid obs\"",
")",
"hds_unit",
"=",
"oc",
".",
"iuhead",
"hds_file",
"=",
"self",
".",
"m",
".",
"get_output",
"(",
"unit",
"=",
"hds_unit",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"org_model_ws",
",",
"hds_file",
")",
")",
",",
"\"couldn't find existing hds file {0} in org_model_ws\"",
".",
"format",
"(",
"hds_file",
")",
"shutil",
".",
"copy2",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"org_model_ws",
",",
"hds_file",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"m",
".",
"model_ws",
",",
"hds_file",
")",
")",
"inact",
"=",
"None",
"if",
"self",
".",
"m",
".",
"lpf",
"is",
"not",
"None",
":",
"inact",
"=",
"self",
".",
"m",
".",
"lpf",
".",
"hdry",
"elif",
"self",
".",
"m",
".",
"upw",
"is",
"not",
"None",
":",
"inact",
"=",
"self",
".",
"m",
".",
"upw",
".",
"hdry",
"if",
"inact",
"is",
"None",
":",
"skip",
"=",
"lambda",
"x",
":",
"np",
".",
"NaN",
"if",
"x",
"==",
"self",
".",
"m",
".",
"bas6",
".",
"hnoflo",
"else",
"x",
"else",
":",
"skip",
"=",
"lambda",
"x",
":",
"np",
".",
"NaN",
"if",
"x",
"==",
"self",
".",
"m",
".",
"bas6",
".",
"hnoflo",
"or",
"x",
"==",
"inact",
"else",
"x",
"print",
"(",
"self",
".",
"hds_kperk",
")",
"frun_line",
",",
"df",
"=",
"setup_hds_obs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"m",
".",
"model_ws",
",",
"hds_file",
")",
",",
"kperk_pairs",
"=",
"self",
".",
"hds_kperk",
",",
"skip",
"=",
"skip",
")",
"self",
".",
"obs_dfs",
"[",
"\"hds\"",
"]",
"=",
"df",
"self",
".",
"frun_post_lines",
".",
"append",
"(",
"\"pyemu.gw_utils.apply_hds_obs('{0}')\"",
".",
"format",
"(",
"hds_file",
")",
")",
"self",
".",
"tmp_files",
".",
"append",
"(",
"hds_file",
")"
] | 39.68 | 20.38 |
def count(self):
"""Total count of the matching items.
It sums up the count of partial results, and returns the total count of
matching items in the table.
"""
count = 0
operation = self._get_operation()
kwargs = self.kwargs.copy()
kwargs['select'] = 'COUNT'
limit = kwargs.get('limit', None)
while True:
result = operation(self.model.get_table_name(), **kwargs)
count += result['Count']
if limit is not None:
limit -= result['Count']
last_evaluated_key = result.get('LastEvaluatedKey', None)
if not self._prepare_next_fetch(kwargs, last_evaluated_key, limit):
break
return count
|
[
"def",
"count",
"(",
"self",
")",
":",
"count",
"=",
"0",
"operation",
"=",
"self",
".",
"_get_operation",
"(",
")",
"kwargs",
"=",
"self",
".",
"kwargs",
".",
"copy",
"(",
")",
"kwargs",
"[",
"'select'",
"]",
"=",
"'COUNT'",
"limit",
"=",
"kwargs",
".",
"get",
"(",
"'limit'",
",",
"None",
")",
"while",
"True",
":",
"result",
"=",
"operation",
"(",
"self",
".",
"model",
".",
"get_table_name",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"count",
"+=",
"result",
"[",
"'Count'",
"]",
"if",
"limit",
"is",
"not",
"None",
":",
"limit",
"-=",
"result",
"[",
"'Count'",
"]",
"last_evaluated_key",
"=",
"result",
".",
"get",
"(",
"'LastEvaluatedKey'",
",",
"None",
")",
"if",
"not",
"self",
".",
"_prepare_next_fetch",
"(",
"kwargs",
",",
"last_evaluated_key",
",",
"limit",
")",
":",
"break",
"return",
"count"
] | 35.285714 | 16.714286 |
def get_assessment_notification_session_for_bank(self, assessment_receiver, bank_id):
"""Gets the ``OsidSession`` associated with the assessment notification service for the given bank.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.AssessmentNotificationSession) - ``an
_assessment_notification_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``assessment_receiver`` or ``bank_id`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_assessment_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_assessment_notification():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.ItemNotificationSession(bank_id, runtime=self._runtime, receiver=assessment_receiver)
|
[
"def",
"get_assessment_notification_session_for_bank",
"(",
"self",
",",
"assessment_receiver",
",",
"bank_id",
")",
":",
"if",
"not",
"self",
".",
"supports_assessment_notification",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"##",
"# Also include check to see if the catalog Id is found otherwise raise errors.NotFound",
"##",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"ItemNotificationSession",
"(",
"bank_id",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"receiver",
"=",
"assessment_receiver",
")"
] | 51.444444 | 22.777778 |
def load_probe_file(recording, probe_file, channel_map=None, channel_groups=None):
'''Loads channel information into recording extractor. If a .prb file is given,
then 'location' and 'group' information for each channel is stored. If a .csv
file is given, then it will only store 'location'
Parameters
----------
recording: RecordingExtractor
The recording extractor to channel information
probe_file: str
Path to probe file. Either .prb or .csv
Returns
---------
subRecordingExtractor
'''
probe_file = Path(probe_file)
if probe_file.suffix == '.prb':
probe_dict = read_python(probe_file)
if 'channel_groups' in probe_dict.keys():
ordered_channels = np.array([], dtype=int)
groups = sorted(probe_dict['channel_groups'].keys())
for cgroup_id in groups:
cgroup = probe_dict['channel_groups'][cgroup_id]
for key_prop, prop_val in cgroup.items():
if key_prop == 'channels':
ordered_channels = np.concatenate((ordered_channels, prop_val))
if list(ordered_channels) == recording.get_channel_ids():
subrecording = recording
else:
if not np.all([chan in recording.get_channel_ids() for chan in ordered_channels]):
print('Some channel in PRB file are in original recording')
present_ordered_channels = [chan for chan in ordered_channels if chan in recording.get_channel_ids()]
subrecording = SubRecordingExtractor(recording, channel_ids=present_ordered_channels)
for cgroup_id in groups:
cgroup = probe_dict['channel_groups'][cgroup_id]
if 'channels' not in cgroup.keys() and len(groups) > 1:
raise Exception("If more than one 'channel_group' is in the probe file, the 'channels' field"
"for each channel group is required")
elif 'channels' not in cgroup.keys():
channels_in_group = subrecording.get_num_channels()
else:
channels_in_group = len(cgroup['channels'])
for key_prop, prop_val in cgroup.items():
if key_prop == 'channels':
for i_ch, prop in enumerate(prop_val):
if prop in subrecording.get_channel_ids():
subrecording.set_channel_property(prop, 'group', int(cgroup_id))
elif key_prop == 'geometry' or key_prop == 'location':
if isinstance(prop_val, dict):
if len(prop_val.keys()) == channels_in_group:
print('geometry in PRB have not the same length as channel in group')
for (i_ch, prop) in prop_val.items():
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', prop)
elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group:
for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', prop)
else:
if isinstance(prop_val, dict) and len(prop_val.keys()) == channels_in_group:
for (i_ch, prop) in prop_val.items():
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, key_prop, prop)
elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group:
for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, key_prop, prop)
# create dummy locations
if 'geometry' not in cgroup.keys() and 'location' not in cgroup.keys():
for i, chan in enumerate(subrecording.get_channel_ids()):
subrecording.set_channel_property(chan, 'location', [i, 0])
else:
raise AttributeError("'.prb' file should contain the 'channel_groups' field")
elif probe_file.suffix == '.csv':
if channel_map is not None:
assert np.all([chan in channel_map for chan in recording.get_channel_ids()]), \
"all channel_ids in 'channel_map' must be in the original recording channel ids"
subrecording = SubRecordingExtractor(recording, channel_ids=channel_map)
else:
subrecording = recording
with probe_file.open() as csvfile:
posreader = csv.reader(csvfile)
row_count = 0
loaded_pos = []
for pos in (posreader):
row_count += 1
loaded_pos.append(pos)
assert len(subrecording.get_channel_ids()) == row_count, "The .csv file must contain as many " \
"rows as the number of channels in the recordings"
for i_ch, pos in zip(subrecording.get_channel_ids(), loaded_pos):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', list(np.array(pos).astype(float)))
if channel_groups is not None and len(channel_groups) == len(subrecording.get_channel_ids()):
for i_ch, chg in zip(subrecording.get_channel_ids(), channel_groups):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'group', chg)
else:
raise NotImplementedError("Only .csv and .prb probe files can be loaded.")
return subrecording
|
[
"def",
"load_probe_file",
"(",
"recording",
",",
"probe_file",
",",
"channel_map",
"=",
"None",
",",
"channel_groups",
"=",
"None",
")",
":",
"probe_file",
"=",
"Path",
"(",
"probe_file",
")",
"if",
"probe_file",
".",
"suffix",
"==",
"'.prb'",
":",
"probe_dict",
"=",
"read_python",
"(",
"probe_file",
")",
"if",
"'channel_groups'",
"in",
"probe_dict",
".",
"keys",
"(",
")",
":",
"ordered_channels",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"int",
")",
"groups",
"=",
"sorted",
"(",
"probe_dict",
"[",
"'channel_groups'",
"]",
".",
"keys",
"(",
")",
")",
"for",
"cgroup_id",
"in",
"groups",
":",
"cgroup",
"=",
"probe_dict",
"[",
"'channel_groups'",
"]",
"[",
"cgroup_id",
"]",
"for",
"key_prop",
",",
"prop_val",
"in",
"cgroup",
".",
"items",
"(",
")",
":",
"if",
"key_prop",
"==",
"'channels'",
":",
"ordered_channels",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ordered_channels",
",",
"prop_val",
")",
")",
"if",
"list",
"(",
"ordered_channels",
")",
"==",
"recording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
"=",
"recording",
"else",
":",
"if",
"not",
"np",
".",
"all",
"(",
"[",
"chan",
"in",
"recording",
".",
"get_channel_ids",
"(",
")",
"for",
"chan",
"in",
"ordered_channels",
"]",
")",
":",
"print",
"(",
"'Some channel in PRB file are in original recording'",
")",
"present_ordered_channels",
"=",
"[",
"chan",
"for",
"chan",
"in",
"ordered_channels",
"if",
"chan",
"in",
"recording",
".",
"get_channel_ids",
"(",
")",
"]",
"subrecording",
"=",
"SubRecordingExtractor",
"(",
"recording",
",",
"channel_ids",
"=",
"present_ordered_channels",
")",
"for",
"cgroup_id",
"in",
"groups",
":",
"cgroup",
"=",
"probe_dict",
"[",
"'channel_groups'",
"]",
"[",
"cgroup_id",
"]",
"if",
"'channels'",
"not",
"in",
"cgroup",
".",
"keys",
"(",
")",
"and",
"len",
"(",
"groups",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"\"If more than one 'channel_group' is in the probe file, the 'channels' field\"",
"\"for each channel group is required\"",
")",
"elif",
"'channels'",
"not",
"in",
"cgroup",
".",
"keys",
"(",
")",
":",
"channels_in_group",
"=",
"subrecording",
".",
"get_num_channels",
"(",
")",
"else",
":",
"channels_in_group",
"=",
"len",
"(",
"cgroup",
"[",
"'channels'",
"]",
")",
"for",
"key_prop",
",",
"prop_val",
"in",
"cgroup",
".",
"items",
"(",
")",
":",
"if",
"key_prop",
"==",
"'channels'",
":",
"for",
"i_ch",
",",
"prop",
"in",
"enumerate",
"(",
"prop_val",
")",
":",
"if",
"prop",
"in",
"subrecording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"prop",
",",
"'group'",
",",
"int",
"(",
"cgroup_id",
")",
")",
"elif",
"key_prop",
"==",
"'geometry'",
"or",
"key_prop",
"==",
"'location'",
":",
"if",
"isinstance",
"(",
"prop_val",
",",
"dict",
")",
":",
"if",
"len",
"(",
"prop_val",
".",
"keys",
"(",
")",
")",
"==",
"channels_in_group",
":",
"print",
"(",
"'geometry in PRB have not the same length as channel in group'",
")",
"for",
"(",
"i_ch",
",",
"prop",
")",
"in",
"prop_val",
".",
"items",
"(",
")",
":",
"if",
"i_ch",
"in",
"subrecording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"i_ch",
",",
"'location'",
",",
"prop",
")",
"elif",
"isinstance",
"(",
"prop_val",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
"and",
"len",
"(",
"prop_val",
")",
"==",
"channels_in_group",
":",
"for",
"(",
"i_ch",
",",
"prop",
")",
"in",
"zip",
"(",
"subrecording",
".",
"get_channel_ids",
"(",
")",
",",
"prop_val",
")",
":",
"if",
"i_ch",
"in",
"subrecording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"i_ch",
",",
"'location'",
",",
"prop",
")",
"else",
":",
"if",
"isinstance",
"(",
"prop_val",
",",
"dict",
")",
"and",
"len",
"(",
"prop_val",
".",
"keys",
"(",
")",
")",
"==",
"channels_in_group",
":",
"for",
"(",
"i_ch",
",",
"prop",
")",
"in",
"prop_val",
".",
"items",
"(",
")",
":",
"if",
"i_ch",
"in",
"subrecording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"i_ch",
",",
"key_prop",
",",
"prop",
")",
"elif",
"isinstance",
"(",
"prop_val",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
"and",
"len",
"(",
"prop_val",
")",
"==",
"channels_in_group",
":",
"for",
"(",
"i_ch",
",",
"prop",
")",
"in",
"zip",
"(",
"subrecording",
".",
"get_channel_ids",
"(",
")",
",",
"prop_val",
")",
":",
"if",
"i_ch",
"in",
"subrecording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"i_ch",
",",
"key_prop",
",",
"prop",
")",
"# create dummy locations",
"if",
"'geometry'",
"not",
"in",
"cgroup",
".",
"keys",
"(",
")",
"and",
"'location'",
"not",
"in",
"cgroup",
".",
"keys",
"(",
")",
":",
"for",
"i",
",",
"chan",
"in",
"enumerate",
"(",
"subrecording",
".",
"get_channel_ids",
"(",
")",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"chan",
",",
"'location'",
",",
"[",
"i",
",",
"0",
"]",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"\"'.prb' file should contain the 'channel_groups' field\"",
")",
"elif",
"probe_file",
".",
"suffix",
"==",
"'.csv'",
":",
"if",
"channel_map",
"is",
"not",
"None",
":",
"assert",
"np",
".",
"all",
"(",
"[",
"chan",
"in",
"channel_map",
"for",
"chan",
"in",
"recording",
".",
"get_channel_ids",
"(",
")",
"]",
")",
",",
"\"all channel_ids in 'channel_map' must be in the original recording channel ids\"",
"subrecording",
"=",
"SubRecordingExtractor",
"(",
"recording",
",",
"channel_ids",
"=",
"channel_map",
")",
"else",
":",
"subrecording",
"=",
"recording",
"with",
"probe_file",
".",
"open",
"(",
")",
"as",
"csvfile",
":",
"posreader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
")",
"row_count",
"=",
"0",
"loaded_pos",
"=",
"[",
"]",
"for",
"pos",
"in",
"(",
"posreader",
")",
":",
"row_count",
"+=",
"1",
"loaded_pos",
".",
"append",
"(",
"pos",
")",
"assert",
"len",
"(",
"subrecording",
".",
"get_channel_ids",
"(",
")",
")",
"==",
"row_count",
",",
"\"The .csv file must contain as many \"",
"\"rows as the number of channels in the recordings\"",
"for",
"i_ch",
",",
"pos",
"in",
"zip",
"(",
"subrecording",
".",
"get_channel_ids",
"(",
")",
",",
"loaded_pos",
")",
":",
"if",
"i_ch",
"in",
"subrecording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"i_ch",
",",
"'location'",
",",
"list",
"(",
"np",
".",
"array",
"(",
"pos",
")",
".",
"astype",
"(",
"float",
")",
")",
")",
"if",
"channel_groups",
"is",
"not",
"None",
"and",
"len",
"(",
"channel_groups",
")",
"==",
"len",
"(",
"subrecording",
".",
"get_channel_ids",
"(",
")",
")",
":",
"for",
"i_ch",
",",
"chg",
"in",
"zip",
"(",
"subrecording",
".",
"get_channel_ids",
"(",
")",
",",
"channel_groups",
")",
":",
"if",
"i_ch",
"in",
"subrecording",
".",
"get_channel_ids",
"(",
")",
":",
"subrecording",
".",
"set_channel_property",
"(",
"i_ch",
",",
"'group'",
",",
"chg",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Only .csv and .prb probe files can be loaded.\"",
")",
"return",
"subrecording"
] | 59.598039 | 30.970588 |
def _parse_json(self, page, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
places = page.get('results', [])
if not len(places):
self._check_status(page.get('status'))
return None
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted_address')
latitude = place['geometry']['location']['lat']
longitude = place['geometry']['location']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(places[0])
else:
return [parse_place(place) for place in places]
|
[
"def",
"_parse_json",
"(",
"self",
",",
"page",
",",
"exactly_one",
"=",
"True",
")",
":",
"places",
"=",
"page",
".",
"get",
"(",
"'results'",
",",
"[",
"]",
")",
"if",
"not",
"len",
"(",
"places",
")",
":",
"self",
".",
"_check_status",
"(",
"page",
".",
"get",
"(",
"'status'",
")",
")",
"return",
"None",
"def",
"parse_place",
"(",
"place",
")",
":",
"'''Get the location, lat, lng from a single json place.'''",
"location",
"=",
"place",
".",
"get",
"(",
"'formatted_address'",
")",
"latitude",
"=",
"place",
"[",
"'geometry'",
"]",
"[",
"'location'",
"]",
"[",
"'lat'",
"]",
"longitude",
"=",
"place",
"[",
"'geometry'",
"]",
"[",
"'location'",
"]",
"[",
"'lng'",
"]",
"return",
"Location",
"(",
"location",
",",
"(",
"latitude",
",",
"longitude",
")",
",",
"place",
")",
"if",
"exactly_one",
":",
"return",
"parse_place",
"(",
"places",
"[",
"0",
"]",
")",
"else",
":",
"return",
"[",
"parse_place",
"(",
"place",
")",
"for",
"place",
"in",
"places",
"]"
] | 38.473684 | 19.842105 |
def reindex(report):
"""Reindex report so that 'TOTAL' is the last row"""
index = list(report.index)
i = index.index('TOTAL')
return report.reindex(index[:i] + index[i+1:] + ['TOTAL'])
|
[
"def",
"reindex",
"(",
"report",
")",
":",
"index",
"=",
"list",
"(",
"report",
".",
"index",
")",
"i",
"=",
"index",
".",
"index",
"(",
"'TOTAL'",
")",
"return",
"report",
".",
"reindex",
"(",
"index",
"[",
":",
"i",
"]",
"+",
"index",
"[",
"i",
"+",
"1",
":",
"]",
"+",
"[",
"'TOTAL'",
"]",
")"
] | 39.2 | 12.8 |
def print_settings_example():
"""
You can use settings to get additional information from the user via their
dependencies.io configuration file. Settings will be automatically injected as
env variables with the "SETTING_" prefix.
All settings will be passed as strings. More complex types will be json
encoded. You should always provide defaults, if possible.
"""
SETTING_EXAMPLE_LIST = json.loads(os.getenv('SETTING_EXAMPLE_LIST', '[]'))
SETTING_EXAMPLE_STRING = os.getenv('SETTING_EXAMPLE_STRING', 'default')
print('List setting values: {}'.format(SETTING_EXAMPLE_LIST))
print('String setting value: {}'.format(SETTING_EXAMPLE_STRING))
|
[
"def",
"print_settings_example",
"(",
")",
":",
"SETTING_EXAMPLE_LIST",
"=",
"json",
".",
"loads",
"(",
"os",
".",
"getenv",
"(",
"'SETTING_EXAMPLE_LIST'",
",",
"'[]'",
")",
")",
"SETTING_EXAMPLE_STRING",
"=",
"os",
".",
"getenv",
"(",
"'SETTING_EXAMPLE_STRING'",
",",
"'default'",
")",
"print",
"(",
"'List setting values: {}'",
".",
"format",
"(",
"SETTING_EXAMPLE_LIST",
")",
")",
"print",
"(",
"'String setting value: {}'",
".",
"format",
"(",
"SETTING_EXAMPLE_STRING",
")",
")"
] | 47.857143 | 25.571429 |
def newton(power_sum, elementary_symmetric_polynomial, order):
r'''
Given two lists of values, the first list being the `power sum`s of a
polynomial, and the second being expressions of the roots of the
polynomial as found by Viete's Formula, use information from the longer list to
fill out the shorter list using Newton's Identities.
.. note::
Updates are done **in place**
Parameters
----------
power_sum: list of float
elementary_symmetric_polynomial: list of float
order: int
The number of terms to expand to when updating `elementary_symmetric_polynomial`
See Also
--------
https://en.wikipedia.org/wiki/Newton%27s_identities[https://en.wikipedia.org/wiki/Newton%27s_identities]
'''
if len(power_sum) > len(elementary_symmetric_polynomial):
_update_elementary_symmetric_polynomial(power_sum, elementary_symmetric_polynomial, order)
elif len(power_sum) < len(elementary_symmetric_polynomial):
_update_power_sum(power_sum, elementary_symmetric_polynomial, order)
|
[
"def",
"newton",
"(",
"power_sum",
",",
"elementary_symmetric_polynomial",
",",
"order",
")",
":",
"if",
"len",
"(",
"power_sum",
")",
">",
"len",
"(",
"elementary_symmetric_polynomial",
")",
":",
"_update_elementary_symmetric_polynomial",
"(",
"power_sum",
",",
"elementary_symmetric_polynomial",
",",
"order",
")",
"elif",
"len",
"(",
"power_sum",
")",
"<",
"len",
"(",
"elementary_symmetric_polynomial",
")",
":",
"_update_power_sum",
"(",
"power_sum",
",",
"elementary_symmetric_polynomial",
",",
"order",
")"
] | 41.8 | 30.68 |
def _partition_spec(self, shape, partition_info):
"""Build magic (and sparsely documented) shapes_and_slices spec string."""
if partition_info is None:
return '' # Empty string indicates a non-partitioned tensor.
ssi = tf.Variable.SaveSliceInfo(
full_name=self._var_name,
full_shape=partition_info.full_shape,
var_offset=partition_info.var_offset,
var_shape=shape)
return ssi.spec
|
[
"def",
"_partition_spec",
"(",
"self",
",",
"shape",
",",
"partition_info",
")",
":",
"if",
"partition_info",
"is",
"None",
":",
"return",
"''",
"# Empty string indicates a non-partitioned tensor.",
"ssi",
"=",
"tf",
".",
"Variable",
".",
"SaveSliceInfo",
"(",
"full_name",
"=",
"self",
".",
"_var_name",
",",
"full_shape",
"=",
"partition_info",
".",
"full_shape",
",",
"var_offset",
"=",
"partition_info",
".",
"var_offset",
",",
"var_shape",
"=",
"shape",
")",
"return",
"ssi",
".",
"spec"
] | 42.6 | 10.4 |
def do_container(self, element, decl, pseudo):
"""Implement setting tag for new wrapper element."""
value = serialize(decl.value).strip()
if '|' in value:
namespace, tag = value.split('|', 1)
try:
namespace = self.css_namespaces[namespace]
except KeyError:
log(WARN, u'undefined namespace prefix: {}'.format(
namespace).encode('utf-8'))
value = tag
else:
value = etree.QName(namespace, tag)
step = self.state[self.state['current_step']]
actions = step['actions']
actions.append(('tag', value))
|
[
"def",
"do_container",
"(",
"self",
",",
"element",
",",
"decl",
",",
"pseudo",
")",
":",
"value",
"=",
"serialize",
"(",
"decl",
".",
"value",
")",
".",
"strip",
"(",
")",
"if",
"'|'",
"in",
"value",
":",
"namespace",
",",
"tag",
"=",
"value",
".",
"split",
"(",
"'|'",
",",
"1",
")",
"try",
":",
"namespace",
"=",
"self",
".",
"css_namespaces",
"[",
"namespace",
"]",
"except",
"KeyError",
":",
"log",
"(",
"WARN",
",",
"u'undefined namespace prefix: {}'",
".",
"format",
"(",
"namespace",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"value",
"=",
"tag",
"else",
":",
"value",
"=",
"etree",
".",
"QName",
"(",
"namespace",
",",
"tag",
")",
"step",
"=",
"self",
".",
"state",
"[",
"self",
".",
"state",
"[",
"'current_step'",
"]",
"]",
"actions",
"=",
"step",
"[",
"'actions'",
"]",
"actions",
".",
"append",
"(",
"(",
"'tag'",
",",
"value",
")",
")"
] | 34.631579 | 16.421053 |
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w")
for idx, section in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
|
[
"def",
"to_csv",
"(",
"self",
",",
"fileobj",
"=",
"sys",
".",
"stdout",
")",
":",
"openclose",
"=",
"is_string",
"(",
"fileobj",
")",
"if",
"openclose",
":",
"fileobj",
"=",
"open",
"(",
"fileobj",
",",
"\"w\"",
")",
"for",
"idx",
",",
"section",
"in",
"enumerate",
"(",
"self",
".",
"sections",
")",
":",
"fileobj",
".",
"write",
"(",
"section",
".",
"to_csvline",
"(",
"with_header",
"=",
"(",
"idx",
"==",
"0",
")",
")",
")",
"fileobj",
".",
"flush",
"(",
")",
"if",
"openclose",
":",
"fileobj",
".",
"close",
"(",
")"
] | 29.769231 | 18.076923 |
def create_title(article, language, title, slug=None, description=None,
page_title=None, menu_title=None, meta_description=None,
creation_date=None, image=None):
"""
Create an article title.
"""
# validate article
assert isinstance(article, Article)
# validate language:
assert language in get_language_list(article.tree.node.site_id)
# validate creation date
if creation_date:
assert isinstance(creation_date, datetime.date)
# set default slug:
if not slug:
slug = settings.CMS_ARTICLES_SLUG_FORMAT.format(
now=creation_date or now(),
slug=slugify(title),
)
# find unused slug:
base_slug = slug
qs = Title.objects.filter(language=language)
used_slugs = list(s for s in qs.values_list('slug', flat=True) if s.startswith(base_slug))
i = 1
while slug in used_slugs:
slug = '%s-%s' % (base_slug, i)
i += 1
# create title
title = Title.objects.create(
article=article,
language=language,
title=title,
slug=slug,
description=description,
page_title=page_title,
menu_title=menu_title,
meta_description=meta_description,
image=image,
)
return title
|
[
"def",
"create_title",
"(",
"article",
",",
"language",
",",
"title",
",",
"slug",
"=",
"None",
",",
"description",
"=",
"None",
",",
"page_title",
"=",
"None",
",",
"menu_title",
"=",
"None",
",",
"meta_description",
"=",
"None",
",",
"creation_date",
"=",
"None",
",",
"image",
"=",
"None",
")",
":",
"# validate article",
"assert",
"isinstance",
"(",
"article",
",",
"Article",
")",
"# validate language:",
"assert",
"language",
"in",
"get_language_list",
"(",
"article",
".",
"tree",
".",
"node",
".",
"site_id",
")",
"# validate creation date",
"if",
"creation_date",
":",
"assert",
"isinstance",
"(",
"creation_date",
",",
"datetime",
".",
"date",
")",
"# set default slug:",
"if",
"not",
"slug",
":",
"slug",
"=",
"settings",
".",
"CMS_ARTICLES_SLUG_FORMAT",
".",
"format",
"(",
"now",
"=",
"creation_date",
"or",
"now",
"(",
")",
",",
"slug",
"=",
"slugify",
"(",
"title",
")",
",",
")",
"# find unused slug:",
"base_slug",
"=",
"slug",
"qs",
"=",
"Title",
".",
"objects",
".",
"filter",
"(",
"language",
"=",
"language",
")",
"used_slugs",
"=",
"list",
"(",
"s",
"for",
"s",
"in",
"qs",
".",
"values_list",
"(",
"'slug'",
",",
"flat",
"=",
"True",
")",
"if",
"s",
".",
"startswith",
"(",
"base_slug",
")",
")",
"i",
"=",
"1",
"while",
"slug",
"in",
"used_slugs",
":",
"slug",
"=",
"'%s-%s'",
"%",
"(",
"base_slug",
",",
"i",
")",
"i",
"+=",
"1",
"# create title",
"title",
"=",
"Title",
".",
"objects",
".",
"create",
"(",
"article",
"=",
"article",
",",
"language",
"=",
"language",
",",
"title",
"=",
"title",
",",
"slug",
"=",
"slug",
",",
"description",
"=",
"description",
",",
"page_title",
"=",
"page_title",
",",
"menu_title",
"=",
"menu_title",
",",
"meta_description",
"=",
"meta_description",
",",
"image",
"=",
"image",
",",
")",
"return",
"title"
] | 27.304348 | 19.73913 |
def change_parameters(self,params):
"""
Utility function for changing the approximate distribution parameters
"""
no_of_params = 0
for core_param in range(len(self.q)):
for approx_param in range(self.q[core_param].param_no):
self.q[core_param].vi_change_param(approx_param, params[no_of_params])
no_of_params += 1
|
[
"def",
"change_parameters",
"(",
"self",
",",
"params",
")",
":",
"no_of_params",
"=",
"0",
"for",
"core_param",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"q",
")",
")",
":",
"for",
"approx_param",
"in",
"range",
"(",
"self",
".",
"q",
"[",
"core_param",
"]",
".",
"param_no",
")",
":",
"self",
".",
"q",
"[",
"core_param",
"]",
".",
"vi_change_param",
"(",
"approx_param",
",",
"params",
"[",
"no_of_params",
"]",
")",
"no_of_params",
"+=",
"1"
] | 43.222222 | 15.888889 |
def command(state, args):
"""Register watching regexp for an anime."""
args = parser.parse_args(args[1:])
aid = state.results.parse_aid(args.aid, default_key='db')
if args.query:
# Use regexp provided by user.
regexp = '.*'.join(args.query)
else:
# Make default regexp.
title = query.select.lookup(state.db, aid, fields=['title']).title
# Replace non-word, non-whitespace with whitespace.
regexp = re.sub(r'[^\w\s]', ' ', title)
# Split on whitespace and join with wildcard regexp.
regexp = '.*?'.join(re.escape(x) for x in regexp.split())
# Append episode matching regexp.
regexp = '.*?'.join((
regexp,
r'\b(?P<ep>[0-9]+)(v[0-9]+)?',
))
query.files.set_regexp(state.db, aid, regexp)
|
[
"def",
"command",
"(",
"state",
",",
"args",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"[",
"1",
":",
"]",
")",
"aid",
"=",
"state",
".",
"results",
".",
"parse_aid",
"(",
"args",
".",
"aid",
",",
"default_key",
"=",
"'db'",
")",
"if",
"args",
".",
"query",
":",
"# Use regexp provided by user.",
"regexp",
"=",
"'.*'",
".",
"join",
"(",
"args",
".",
"query",
")",
"else",
":",
"# Make default regexp.",
"title",
"=",
"query",
".",
"select",
".",
"lookup",
"(",
"state",
".",
"db",
",",
"aid",
",",
"fields",
"=",
"[",
"'title'",
"]",
")",
".",
"title",
"# Replace non-word, non-whitespace with whitespace.",
"regexp",
"=",
"re",
".",
"sub",
"(",
"r'[^\\w\\s]'",
",",
"' '",
",",
"title",
")",
"# Split on whitespace and join with wildcard regexp.",
"regexp",
"=",
"'.*?'",
".",
"join",
"(",
"re",
".",
"escape",
"(",
"x",
")",
"for",
"x",
"in",
"regexp",
".",
"split",
"(",
")",
")",
"# Append episode matching regexp.",
"regexp",
"=",
"'.*?'",
".",
"join",
"(",
"(",
"regexp",
",",
"r'\\b(?P<ep>[0-9]+)(v[0-9]+)?'",
",",
")",
")",
"query",
".",
"files",
".",
"set_regexp",
"(",
"state",
".",
"db",
",",
"aid",
",",
"regexp",
")"
] | 40 | 14.2 |
def _spintaylor_aligned_prec_swapper(**p):
"""
SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin
waveforms. This construct chooses between the aligned-twospin TaylorF2 model
and the precessing singlespin SpinTaylorF2 models. If aligned spins are
given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In
the case of nonaligned doublespin systems the code will fail at the
waveform generator level.
"""
orig_approximant = p['approximant']
if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \
p['spin1y'] == 0:
p['approximant'] = 'TaylorF2'
else:
p['approximant'] = 'SpinTaylorF2'
hp, hc = _lalsim_fd_waveform(**p)
p['approximant'] = orig_approximant
return hp, hc
|
[
"def",
"_spintaylor_aligned_prec_swapper",
"(",
"*",
"*",
"p",
")",
":",
"orig_approximant",
"=",
"p",
"[",
"'approximant'",
"]",
"if",
"p",
"[",
"'spin2x'",
"]",
"==",
"0",
"and",
"p",
"[",
"'spin2y'",
"]",
"==",
"0",
"and",
"p",
"[",
"'spin1x'",
"]",
"==",
"0",
"and",
"p",
"[",
"'spin1y'",
"]",
"==",
"0",
":",
"p",
"[",
"'approximant'",
"]",
"=",
"'TaylorF2'",
"else",
":",
"p",
"[",
"'approximant'",
"]",
"=",
"'SpinTaylorF2'",
"hp",
",",
"hc",
"=",
"_lalsim_fd_waveform",
"(",
"*",
"*",
"p",
")",
"p",
"[",
"'approximant'",
"]",
"=",
"orig_approximant",
"return",
"hp",
",",
"hc"
] | 46.277778 | 18.055556 |
def move_cursor_one_letter(self, letter=RIGHT):
"""Move the cursor of one letter to the right (1) or the the left."""
assert letter in (self.RIGHT, self.LEFT)
if letter == self.RIGHT:
self.cursor += 1
if self.cursor > len(self.text):
self.cursor -= 1
else:
self.cursor -= 1
if self.cursor < 0:
self.cursor += 1
|
[
"def",
"move_cursor_one_letter",
"(",
"self",
",",
"letter",
"=",
"RIGHT",
")",
":",
"assert",
"letter",
"in",
"(",
"self",
".",
"RIGHT",
",",
"self",
".",
"LEFT",
")",
"if",
"letter",
"==",
"self",
".",
"RIGHT",
":",
"self",
".",
"cursor",
"+=",
"1",
"if",
"self",
".",
"cursor",
">",
"len",
"(",
"self",
".",
"text",
")",
":",
"self",
".",
"cursor",
"-=",
"1",
"else",
":",
"self",
".",
"cursor",
"-=",
"1",
"if",
"self",
".",
"cursor",
"<",
"0",
":",
"self",
".",
"cursor",
"+=",
"1"
] | 34.333333 | 11.916667 |
def _patch_for_tf1_12(tf):
"""Monkey patch tf 1.12 so tfds can use it."""
tf.io.gfile = tf.gfile
tf.io.gfile.copy = tf.gfile.Copy
tf.io.gfile.exists = tf.gfile.Exists
tf.io.gfile.glob = tf.gfile.Glob
tf.io.gfile.isdir = tf.gfile.IsDirectory
tf.io.gfile.listdir = tf.gfile.ListDirectory
tf.io.gfile.makedirs = tf.gfile.MakeDirs
tf.io.gfile.mkdir = tf.gfile.MkDir
tf.io.gfile.remove = tf.gfile.Remove
tf.io.gfile.rename = tf.gfile.Rename
tf.io.gfile.rmtree = tf.gfile.DeleteRecursively
tf.io.gfile.stat = tf.gfile.Stat
tf.io.gfile.walk = tf.gfile.Walk
tf.io.gfile.GFile = tf.gfile.GFile
tf.data.experimental = tf.contrib.data
tf.compat.v1 = types.ModuleType("tf.compat.v1")
tf.compat.v1.assert_greater = tf.assert_greater
tf.compat.v1.placeholder = tf.placeholder
tf.compat.v1.ConfigProto = tf.ConfigProto
tf.compat.v1.Session = tf.Session
tf.compat.v1.enable_eager_execution = tf.enable_eager_execution
tf.compat.v1.io = tf.io
tf.compat.v1.data = tf.data
tf.compat.v1.data.Dataset = tf.data.Dataset
tf.compat.v1.data.make_one_shot_iterator = (
lambda ds: ds.make_one_shot_iterator())
tf.compat.v1.train = tf.train
tf.compat.v1.global_variables_initializer = tf.global_variables_initializer
tf.compat.v1.test = tf.test
tf.compat.v1.test.get_temp_dir = tf.test.get_temp_dir
tf.nest = tf.contrib.framework.nest
|
[
"def",
"_patch_for_tf1_12",
"(",
"tf",
")",
":",
"tf",
".",
"io",
".",
"gfile",
"=",
"tf",
".",
"gfile",
"tf",
".",
"io",
".",
"gfile",
".",
"copy",
"=",
"tf",
".",
"gfile",
".",
"Copy",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"=",
"tf",
".",
"gfile",
".",
"Exists",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"=",
"tf",
".",
"gfile",
".",
"Glob",
"tf",
".",
"io",
".",
"gfile",
".",
"isdir",
"=",
"tf",
".",
"gfile",
".",
"IsDirectory",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"=",
"tf",
".",
"gfile",
".",
"ListDirectory",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"=",
"tf",
".",
"gfile",
".",
"MakeDirs",
"tf",
".",
"io",
".",
"gfile",
".",
"mkdir",
"=",
"tf",
".",
"gfile",
".",
"MkDir",
"tf",
".",
"io",
".",
"gfile",
".",
"remove",
"=",
"tf",
".",
"gfile",
".",
"Remove",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"=",
"tf",
".",
"gfile",
".",
"Rename",
"tf",
".",
"io",
".",
"gfile",
".",
"rmtree",
"=",
"tf",
".",
"gfile",
".",
"DeleteRecursively",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"=",
"tf",
".",
"gfile",
".",
"Stat",
"tf",
".",
"io",
".",
"gfile",
".",
"walk",
"=",
"tf",
".",
"gfile",
".",
"Walk",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"tf",
".",
"data",
".",
"experimental",
"=",
"tf",
".",
"contrib",
".",
"data",
"tf",
".",
"compat",
".",
"v1",
"=",
"types",
".",
"ModuleType",
"(",
"\"tf.compat.v1\"",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_greater",
"=",
"tf",
".",
"assert_greater",
"tf",
".",
"compat",
".",
"v1",
".",
"placeholder",
"=",
"tf",
".",
"placeholder",
"tf",
".",
"compat",
".",
"v1",
".",
"ConfigProto",
"=",
"tf",
".",
"ConfigProto",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
"=",
"tf",
".",
"Session",
"tf",
".",
"compat",
".",
"v1",
".",
"enable_eager_execution",
"=",
"tf",
".",
"enable_eager_execution",
"tf",
".",
"compat",
".",
"v1",
".",
"io",
"=",
"tf",
".",
"io",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
"=",
"tf",
".",
"data",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"Dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"=",
"(",
"lambda",
"ds",
":",
"ds",
".",
"make_one_shot_iterator",
"(",
")",
")",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
"=",
"tf",
".",
"train",
"tf",
".",
"compat",
".",
"v1",
".",
"global_variables_initializer",
"=",
"tf",
".",
"global_variables_initializer",
"tf",
".",
"compat",
".",
"v1",
".",
"test",
"=",
"tf",
".",
"test",
"tf",
".",
"compat",
".",
"v1",
".",
"test",
".",
"get_temp_dir",
"=",
"tf",
".",
"test",
".",
"get_temp_dir",
"tf",
".",
"nest",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"nest"
] | 40.666667 | 7.818182 |
def present_active_subjunctive(self):
"""
Strong verbs
I
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["líta", "lítr", "leit", "litu", "litinn"])
>>> verb.present_active_subjunctive()
['líta', 'lítir', 'líti', 'lítim', 'lítið', 'líti']
II
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["bjóða", "býðr", "bauð", "buðu", "boðinn"])
>>> verb.present_active_subjunctive()
['bjóða', 'bjóðir', 'bjóði', 'bjóðim', 'bjóðið', 'bjóði']
III
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["verða", "verðr", "varð", "urðu", "orðinn"])
>>> verb.present_active_subjunctive()
['verða', 'verðir', 'verði', 'verðim', 'verðið', 'verði']
IV
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["bera", "berr", "bar", "báru", "borinn"])
>>> verb.present_active_subjunctive()
['bera', 'berir', 'beri', 'berim', 'berið', 'beri']
V
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["gefa", "gefr", "gaf", "gáfu", "gefinn"])
>>> verb.present_active_subjunctive()
['gefa', 'gefir', 'gefi', 'gefim', 'gefið', 'gefi']
VI
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["fara", "ferr", "fór", "fóru", "farinn"])
>>> verb.present_active_subjunctive()
['fara', 'farir', 'fari', 'farim', 'farið', 'fari']
VII
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["ráða", "ræðr", "réð", "réðu", "ráðinn"])
>>> verb.present_active_subjunctive()
['ráða', 'ráðir', 'ráði', 'ráðim', 'ráðið', 'ráði']
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["vera", "a", "a", "a", "a"])
>>> verb.present_active_subjunctive()
['sé', 'sér', 'sé', 'sém', 'séð', 'sé']
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["sjá", "a", "a", "a", "a"])
>>> verb.present_active_subjunctive()
['sjá', 'sér', 'sé', 'sém', 'séð', 'sé']
:return:
"""
if self.sng == "vera":
forms = ["sé", "sér", "sé", "sém", "séð", "sé"]
return forms
elif self.sng == "sjá":
forms = ["sjá", "sér", "sé", "sém", "séð", "sé"]
return forms
else:
subjunctive_root = self.sng[:-1] if self.sng[-1] == "a" else self.sng
forms = [subjunctive_root + "a"]
subjunctive_root = subjunctive_root[:-1] if subjunctive_root[-1] == "j" else subjunctive_root
forms.append(subjunctive_root + "ir")
forms.append(subjunctive_root + "i")
forms.append(subjunctive_root + "im")
forms.append(subjunctive_root + "ið")
forms.append(subjunctive_root + "i")
return forms
|
[
"def",
"present_active_subjunctive",
"(",
"self",
")",
":",
"if",
"self",
".",
"sng",
"==",
"\"vera\"",
":",
"forms",
"=",
"[",
"\"sé\",",
" ",
"sér\", ",
"\"",
"é\", \"",
"s",
"m\", \"s",
"é",
"\", \"sé\"",
"]",
"",
"",
"return",
"forms",
"elif",
"self",
".",
"sng",
"==",
"\"sjá\":",
"",
"forms",
"=",
"[",
"\"sjá\",",
" ",
"sér\", ",
"\"",
"é\", \"",
"s",
"m\", \"s",
"é",
"\", \"sé\"",
"]",
"",
"",
"return",
"forms",
"else",
":",
"subjunctive_root",
"=",
"self",
".",
"sng",
"[",
":",
"-",
"1",
"]",
"if",
"self",
".",
"sng",
"[",
"-",
"1",
"]",
"==",
"\"a\"",
"else",
"self",
".",
"sng",
"forms",
"=",
"[",
"subjunctive_root",
"+",
"\"a\"",
"]",
"subjunctive_root",
"=",
"subjunctive_root",
"[",
":",
"-",
"1",
"]",
"if",
"subjunctive_root",
"[",
"-",
"1",
"]",
"==",
"\"j\"",
"else",
"subjunctive_root",
"forms",
".",
"append",
"(",
"subjunctive_root",
"+",
"\"ir\"",
")",
"forms",
".",
"append",
"(",
"subjunctive_root",
"+",
"\"i\"",
")",
"forms",
".",
"append",
"(",
"subjunctive_root",
"+",
"\"im\"",
")",
"forms",
".",
"append",
"(",
"subjunctive_root",
"+",
"\"ið\")",
"",
"forms",
".",
"append",
"(",
"subjunctive_root",
"+",
"\"i\"",
")",
"return",
"forms"
] | 37.513158 | 20.381579 |
def cmd(send, msg, args):
"""Evaluates mathmatical expressions.
Syntax: {command} <expression>
"""
if not msg:
send("Calculate what?")
return
if "!" in msg:
args['do_kick'](args['target'], args['nick'], "hacking")
return
msg += '\n'
proc = subprocess.Popen(['dc'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
try:
output = proc.communicate(msg, timeout=5)[0].splitlines()
except subprocess.TimeoutExpired:
proc.terminate()
send("Execution took too long, you might have better luck with WolframAlpha.")
return
if not output:
send("No output found, did you forget to specify 'p'?")
elif len(output) > 3:
send("Your output is too long, have you tried mental math?")
else:
for line in output:
send(line)
|
[
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"if",
"not",
"msg",
":",
"send",
"(",
"\"Calculate what?\"",
")",
"return",
"if",
"\"!\"",
"in",
"msg",
":",
"args",
"[",
"'do_kick'",
"]",
"(",
"args",
"[",
"'target'",
"]",
",",
"args",
"[",
"'nick'",
"]",
",",
"\"hacking\"",
")",
"return",
"msg",
"+=",
"'\\n'",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'dc'",
"]",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"universal_newlines",
"=",
"True",
")",
"try",
":",
"output",
"=",
"proc",
".",
"communicate",
"(",
"msg",
",",
"timeout",
"=",
"5",
")",
"[",
"0",
"]",
".",
"splitlines",
"(",
")",
"except",
"subprocess",
".",
"TimeoutExpired",
":",
"proc",
".",
"terminate",
"(",
")",
"send",
"(",
"\"Execution took too long, you might have better luck with WolframAlpha.\"",
")",
"return",
"if",
"not",
"output",
":",
"send",
"(",
"\"No output found, did you forget to specify 'p'?\"",
")",
"elif",
"len",
"(",
"output",
")",
">",
"3",
":",
"send",
"(",
"\"Your output is too long, have you tried mental math?\"",
")",
"else",
":",
"for",
"line",
"in",
"output",
":",
"send",
"(",
"line",
")"
] | 30.241379 | 25.137931 |
def removeService(self, service):
"""
Removes a service from the gateway.
@param service: Either the name or t of the service to remove from the
gateway, or .
@type service: C{callable} or a class instance
@raise NameError: Service not found.
"""
for name, wrapper in self.services.iteritems():
if service in (name, wrapper.service):
del self.services[name]
return
raise NameError("Service %r not found" % (service,))
|
[
"def",
"removeService",
"(",
"self",
",",
"service",
")",
":",
"for",
"name",
",",
"wrapper",
"in",
"self",
".",
"services",
".",
"iteritems",
"(",
")",
":",
"if",
"service",
"in",
"(",
"name",
",",
"wrapper",
".",
"service",
")",
":",
"del",
"self",
".",
"services",
"[",
"name",
"]",
"return",
"raise",
"NameError",
"(",
"\"Service %r not found\"",
"%",
"(",
"service",
",",
")",
")"
] | 35.8 | 14.2 |
def hpsplit(self, data: ['SASdata', str] = None,
cls: [str, list] = None,
code: str = None,
grow: str = None,
id: str = None,
input: [str, list, dict] = None,
model: str = None,
out: [str, bool, 'SASdata'] = None,
partition: str = None,
performance: str = None,
prune: str = None,
rules: str = None,
target: [str, list, dict] = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the HPSPLIT procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=stathpug&docsetTarget=stathpug_hpsplit_syntax.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm cls: The cls variable can be a string or list type. It refers to the categorical, or nominal variables.
:parm code: The code variable can only be a string type.
:parm grow: The grow variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm model: The model variable can only be a string type.
:parm out: The out variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm partition: The partition variable can only be a string type.
:parm performance: The performance variable can only be a string type.
:parm prune: The prune variable can only be a string type.
:parm rules: The rules variable can only be a string type.
:parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
"""
|
[
"def",
"hpsplit",
"(",
"self",
",",
"data",
":",
"[",
"'SASdata'",
",",
"str",
"]",
"=",
"None",
",",
"cls",
":",
"[",
"str",
",",
"list",
"]",
"=",
"None",
",",
"code",
":",
"str",
"=",
"None",
",",
"grow",
":",
"str",
"=",
"None",
",",
"id",
":",
"str",
"=",
"None",
",",
"input",
":",
"[",
"str",
",",
"list",
",",
"dict",
"]",
"=",
"None",
",",
"model",
":",
"str",
"=",
"None",
",",
"out",
":",
"[",
"str",
",",
"bool",
",",
"'SASdata'",
"]",
"=",
"None",
",",
"partition",
":",
"str",
"=",
"None",
",",
"performance",
":",
"str",
"=",
"None",
",",
"prune",
":",
"str",
"=",
"None",
",",
"rules",
":",
"str",
"=",
"None",
",",
"target",
":",
"[",
"str",
",",
"list",
",",
"dict",
"]",
"=",
"None",
",",
"procopts",
":",
"str",
"=",
"None",
",",
"stmtpassthrough",
":",
"str",
"=",
"None",
",",
"*",
"*",
"kwargs",
":",
"dict",
")",
"->",
"'SASresults'",
":"
] | 58.538462 | 27.461538 |
def validate_or_raise(self, *a, **k):
"""Some people would condemn this whole module screaming:
"Don't return success codes, use exceptions!"
This method allows them to be happy, too.
"""
validate, err = self.validate(*a, **k)
if err:
raise ValidationException(err)
return validate
|
[
"def",
"validate_or_raise",
"(",
"self",
",",
"*",
"a",
",",
"*",
"*",
"k",
")",
":",
"validate",
",",
"err",
"=",
"self",
".",
"validate",
"(",
"*",
"a",
",",
"*",
"*",
"k",
")",
"if",
"err",
":",
"raise",
"ValidationException",
"(",
"err",
")",
"return",
"validate"
] | 30.416667 | 14.25 |
def ensure_state(default_getter, exc_class, default_msg=None):
"""Create a decorator factory function."""
def decorator(getter=default_getter, msg=default_msg):
def ensure_decorator(f):
@wraps(f)
def inner(self, *args, **kwargs):
if not getter(self):
raise exc_class(msg) if msg else exc_class()
return f(self, *args, **kwargs)
return inner
return ensure_decorator
return decorator
|
[
"def",
"ensure_state",
"(",
"default_getter",
",",
"exc_class",
",",
"default_msg",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"getter",
"=",
"default_getter",
",",
"msg",
"=",
"default_msg",
")",
":",
"def",
"ensure_decorator",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"inner",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"getter",
"(",
"self",
")",
":",
"raise",
"exc_class",
"(",
"msg",
")",
"if",
"msg",
"else",
"exc_class",
"(",
")",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"inner",
"return",
"ensure_decorator",
"return",
"decorator"
] | 40.5 | 12.666667 |
def get_mail_keys(message, complete=True):
"""
Given an email.message.Message, return a set with all email parts to get
Args:
message (email.message.Message): email message object
complete (bool): if True returns all email headers
Returns:
set with all email parts
"""
if complete:
log.debug("Get all headers")
all_headers_keys = {i.lower() for i in message.keys()}
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS | all_headers_keys
else:
log.debug("Get only mains headers")
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS
log.debug("All parts to get: {}".format(", ".join(all_parts)))
return all_parts
|
[
"def",
"get_mail_keys",
"(",
"message",
",",
"complete",
"=",
"True",
")",
":",
"if",
"complete",
":",
"log",
".",
"debug",
"(",
"\"Get all headers\"",
")",
"all_headers_keys",
"=",
"{",
"i",
".",
"lower",
"(",
")",
"for",
"i",
"in",
"message",
".",
"keys",
"(",
")",
"}",
"all_parts",
"=",
"ADDRESSES_HEADERS",
"|",
"OTHERS_PARTS",
"|",
"all_headers_keys",
"else",
":",
"log",
".",
"debug",
"(",
"\"Get only mains headers\"",
")",
"all_parts",
"=",
"ADDRESSES_HEADERS",
"|",
"OTHERS_PARTS",
"log",
".",
"debug",
"(",
"\"All parts to get: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"all_parts",
")",
")",
")",
"return",
"all_parts"
] | 30.863636 | 21.681818 |
def convert_reshape(node, **kwargs):
"""Map MXNet's Reshape operator attributes to onnx's Reshape operator.
Converts output shape attribute to output shape tensor
and return multiple created nodes.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
output_shape_list = convert_string_to_list(attrs["shape"])
initializer = kwargs["initializer"]
output_shape_np = np.array(output_shape_list, dtype='int64')
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]
dims = np.shape(output_shape_np)
output_shape_name = "reshape_attr_tensor" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=output_shape_name,
data_type=data_type,
dims=dims,
vals=output_shape_list,
raw=False,
)
)
input_nodes.append(output_shape_name)
not_supported_shape = [-2, -3, -4]
for val in output_shape_list:
if val in not_supported_shape:
raise AttributeError("Reshape: Shape value not supported in ONNX", val)
reshape_node = onnx.helper.make_node(
"Reshape",
input_nodes,
[name],
name=name
)
return [tensor_node, reshape_node]
|
[
"def",
"convert_reshape",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
",",
"input_nodes",
",",
"attrs",
"=",
"get_inputs",
"(",
"node",
",",
"kwargs",
")",
"output_shape_list",
"=",
"convert_string_to_list",
"(",
"attrs",
"[",
"\"shape\"",
"]",
")",
"initializer",
"=",
"kwargs",
"[",
"\"initializer\"",
"]",
"output_shape_np",
"=",
"np",
".",
"array",
"(",
"output_shape_list",
",",
"dtype",
"=",
"'int64'",
")",
"data_type",
"=",
"onnx",
".",
"mapping",
".",
"NP_TYPE_TO_TENSOR_TYPE",
"[",
"output_shape_np",
".",
"dtype",
"]",
"dims",
"=",
"np",
".",
"shape",
"(",
"output_shape_np",
")",
"output_shape_name",
"=",
"\"reshape_attr_tensor\"",
"+",
"str",
"(",
"kwargs",
"[",
"\"idx\"",
"]",
")",
"tensor_node",
"=",
"onnx",
".",
"helper",
".",
"make_tensor_value_info",
"(",
"output_shape_name",
",",
"data_type",
",",
"dims",
")",
"initializer",
".",
"append",
"(",
"onnx",
".",
"helper",
".",
"make_tensor",
"(",
"name",
"=",
"output_shape_name",
",",
"data_type",
"=",
"data_type",
",",
"dims",
"=",
"dims",
",",
"vals",
"=",
"output_shape_list",
",",
"raw",
"=",
"False",
",",
")",
")",
"input_nodes",
".",
"append",
"(",
"output_shape_name",
")",
"not_supported_shape",
"=",
"[",
"-",
"2",
",",
"-",
"3",
",",
"-",
"4",
"]",
"for",
"val",
"in",
"output_shape_list",
":",
"if",
"val",
"in",
"not_supported_shape",
":",
"raise",
"AttributeError",
"(",
"\"Reshape: Shape value not supported in ONNX\"",
",",
"val",
")",
"reshape_node",
"=",
"onnx",
".",
"helper",
".",
"make_node",
"(",
"\"Reshape\"",
",",
"input_nodes",
",",
"[",
"name",
"]",
",",
"name",
"=",
"name",
")",
"return",
"[",
"tensor_node",
",",
"reshape_node",
"]"
] | 30.209302 | 20.604651 |
def delete(self, queue='', if_unused=False, if_empty=False):
"""Delete a Queue.
:param str queue: Queue name
:param bool if_unused: Delete only if unused
:param bool if_empty: Delete only if empty
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not isinstance(if_unused, bool):
raise AMQPInvalidArgument('if_unused should be a boolean')
elif not isinstance(if_empty, bool):
raise AMQPInvalidArgument('if_empty should be a boolean')
delete_frame = pamqp_queue.Delete(queue=queue, if_unused=if_unused,
if_empty=if_empty)
return self._channel.rpc_request(delete_frame)
|
[
"def",
"delete",
"(",
"self",
",",
"queue",
"=",
"''",
",",
"if_unused",
"=",
"False",
",",
"if_empty",
"=",
"False",
")",
":",
"if",
"not",
"compatibility",
".",
"is_string",
"(",
"queue",
")",
":",
"raise",
"AMQPInvalidArgument",
"(",
"'queue should be a string'",
")",
"elif",
"not",
"isinstance",
"(",
"if_unused",
",",
"bool",
")",
":",
"raise",
"AMQPInvalidArgument",
"(",
"'if_unused should be a boolean'",
")",
"elif",
"not",
"isinstance",
"(",
"if_empty",
",",
"bool",
")",
":",
"raise",
"AMQPInvalidArgument",
"(",
"'if_empty should be a boolean'",
")",
"delete_frame",
"=",
"pamqp_queue",
".",
"Delete",
"(",
"queue",
"=",
"queue",
",",
"if_unused",
"=",
"if_unused",
",",
"if_empty",
"=",
"if_empty",
")",
"return",
"self",
".",
"_channel",
".",
"rpc_request",
"(",
"delete_frame",
")"
] | 43.083333 | 20.208333 |
def _get_node_estimations(self, node_attr, node_id):
"""
Returns the data nodes estimations and `wait_inputs` flag.
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data node's id.
:type node_id: str
:returns:
- node estimations with minimum distance from the starting node, and
- `wait_inputs` flag
:rtype: (dict[str, T], bool)
"""
# Get data node estimations.
estimations = self._wf_pred[node_id]
wait_in = node_attr['wait_inputs'] # Namespace shortcut.
# Check if node has multiple estimations and it is not waiting inputs.
if len(estimations) > 1 and not self._wait_in.get(node_id, wait_in):
# Namespace shortcuts.
dist, edg_length, adj = self.dist, self._edge_length, self.dmap.adj
est = [] # Estimations' heap.
for k, v in estimations.items(): # Calculate length.
if k is not START:
d = dist[k] + edg_length(adj[k][node_id], node_attr)
heapq.heappush(est, (d, k, v))
# The estimation with minimum distance from the starting node.
estimations = {est[0][1]: est[0][2]}
# Remove unused workflow edges.
self.workflow.remove_edges_from([(v[1], node_id) for v in est[1:]])
return estimations, wait_in
|
[
"def",
"_get_node_estimations",
"(",
"self",
",",
"node_attr",
",",
"node_id",
")",
":",
"# Get data node estimations.",
"estimations",
"=",
"self",
".",
"_wf_pred",
"[",
"node_id",
"]",
"wait_in",
"=",
"node_attr",
"[",
"'wait_inputs'",
"]",
"# Namespace shortcut.",
"# Check if node has multiple estimations and it is not waiting inputs.",
"if",
"len",
"(",
"estimations",
")",
">",
"1",
"and",
"not",
"self",
".",
"_wait_in",
".",
"get",
"(",
"node_id",
",",
"wait_in",
")",
":",
"# Namespace shortcuts.",
"dist",
",",
"edg_length",
",",
"adj",
"=",
"self",
".",
"dist",
",",
"self",
".",
"_edge_length",
",",
"self",
".",
"dmap",
".",
"adj",
"est",
"=",
"[",
"]",
"# Estimations' heap.",
"for",
"k",
",",
"v",
"in",
"estimations",
".",
"items",
"(",
")",
":",
"# Calculate length.",
"if",
"k",
"is",
"not",
"START",
":",
"d",
"=",
"dist",
"[",
"k",
"]",
"+",
"edg_length",
"(",
"adj",
"[",
"k",
"]",
"[",
"node_id",
"]",
",",
"node_attr",
")",
"heapq",
".",
"heappush",
"(",
"est",
",",
"(",
"d",
",",
"k",
",",
"v",
")",
")",
"# The estimation with minimum distance from the starting node.",
"estimations",
"=",
"{",
"est",
"[",
"0",
"]",
"[",
"1",
"]",
":",
"est",
"[",
"0",
"]",
"[",
"2",
"]",
"}",
"# Remove unused workflow edges.",
"self",
".",
"workflow",
".",
"remove_edges_from",
"(",
"[",
"(",
"v",
"[",
"1",
"]",
",",
"node_id",
")",
"for",
"v",
"in",
"est",
"[",
"1",
":",
"]",
"]",
")",
"return",
"estimations",
",",
"wait_in"
] | 33.27907 | 22.813953 |
def get_stored_content_length(headers):
"""Return the content length (in bytes) of the object as stored in GCS.
x-goog-stored-content-length should always be present except when called via
the local dev_appserver. Therefore if it is not present we default to the
standard content-length header.
Args:
headers: a dict of headers from the http response.
Returns:
the stored content length.
"""
length = headers.get('x-goog-stored-content-length')
if length is None:
length = headers.get('content-length')
return length
|
[
"def",
"get_stored_content_length",
"(",
"headers",
")",
":",
"length",
"=",
"headers",
".",
"get",
"(",
"'x-goog-stored-content-length'",
")",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"headers",
".",
"get",
"(",
"'content-length'",
")",
"return",
"length"
] | 31.470588 | 20.529412 |
def get_tracks(self, catalog, cache=True):
"""Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song('SOWDASQ12A6310F24F')
>>> s.get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:8445818',
u'id': u'TRJGNNY12903CC625C',
u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'}
>>>
"""
if not (cache and ('tracks' in self.cache) and (catalog in [td['catalog'] for td in self.cache['tracks']])):
kwargs = {
'bucket':['tracks', 'id:%s' % catalog],
}
response = self.get_attribute('profile', **kwargs)
if not 'tracks' in self.cache:
self.cache['tracks'] = []
# don't blow away the cache for other catalogs
potential_tracks = response['songs'][0].get('tracks', [])
existing_track_ids = [tr['foreign_id'] for tr in self.cache['tracks']]
new_tds = filter(lambda tr: tr['foreign_id'] not in existing_track_ids, potential_tracks)
self.cache['tracks'].extend(new_tds)
return filter(lambda tr: tr['catalog']==util.map_idspace(catalog), self.cache['tracks'])
|
[
"def",
"get_tracks",
"(",
"self",
",",
"catalog",
",",
"cache",
"=",
"True",
")",
":",
"if",
"not",
"(",
"cache",
"and",
"(",
"'tracks'",
"in",
"self",
".",
"cache",
")",
"and",
"(",
"catalog",
"in",
"[",
"td",
"[",
"'catalog'",
"]",
"for",
"td",
"in",
"self",
".",
"cache",
"[",
"'tracks'",
"]",
"]",
")",
")",
":",
"kwargs",
"=",
"{",
"'bucket'",
":",
"[",
"'tracks'",
",",
"'id:%s'",
"%",
"catalog",
"]",
",",
"}",
"response",
"=",
"self",
".",
"get_attribute",
"(",
"'profile'",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"'tracks'",
"in",
"self",
".",
"cache",
":",
"self",
".",
"cache",
"[",
"'tracks'",
"]",
"=",
"[",
"]",
"# don't blow away the cache for other catalogs",
"potential_tracks",
"=",
"response",
"[",
"'songs'",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"'tracks'",
",",
"[",
"]",
")",
"existing_track_ids",
"=",
"[",
"tr",
"[",
"'foreign_id'",
"]",
"for",
"tr",
"in",
"self",
".",
"cache",
"[",
"'tracks'",
"]",
"]",
"new_tds",
"=",
"filter",
"(",
"lambda",
"tr",
":",
"tr",
"[",
"'foreign_id'",
"]",
"not",
"in",
"existing_track_ids",
",",
"potential_tracks",
")",
"self",
".",
"cache",
"[",
"'tracks'",
"]",
".",
"extend",
"(",
"new_tds",
")",
"return",
"filter",
"(",
"lambda",
"tr",
":",
"tr",
"[",
"'catalog'",
"]",
"==",
"util",
".",
"map_idspace",
"(",
"catalog",
")",
",",
"self",
".",
"cache",
"[",
"'tracks'",
"]",
")"
] | 46.176471 | 24.588235 |
def execute(self, string, max_tacts=None):
"""Execute algorithm (if max_times = None, there can be forever loop)."""
counter = 0
self.last_rule = None
while True:
string = self.execute_once(string)
if self.last_rule is None or self.last_rule[2]:
break
counter += 1
if max_tacts is not None and counter >= max_tacts:
raise TimeoutError("algorithm hasn't been stopped")
return string
|
[
"def",
"execute",
"(",
"self",
",",
"string",
",",
"max_tacts",
"=",
"None",
")",
":",
"counter",
"=",
"0",
"self",
".",
"last_rule",
"=",
"None",
"while",
"True",
":",
"string",
"=",
"self",
".",
"execute_once",
"(",
"string",
")",
"if",
"self",
".",
"last_rule",
"is",
"None",
"or",
"self",
".",
"last_rule",
"[",
"2",
"]",
":",
"break",
"counter",
"+=",
"1",
"if",
"max_tacts",
"is",
"not",
"None",
"and",
"counter",
">=",
"max_tacts",
":",
"raise",
"TimeoutError",
"(",
"\"algorithm hasn't been stopped\"",
")",
"return",
"string"
] | 35 | 18.785714 |
async def rawmsg(self, command, *args, **kwargs):
""" Send raw message. """
message = str(self._create_message(command, *args, **kwargs))
await self._send(message)
|
[
"async",
"def",
"rawmsg",
"(",
"self",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"message",
"=",
"str",
"(",
"self",
".",
"_create_message",
"(",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"await",
"self",
".",
"_send",
"(",
"message",
")"
] | 46 | 11.25 |
def iterate(self, shuffle=True):
'''Iterate over batches in the dataset.
This method generates ``iteration_size`` batches from the dataset and
then returns.
Parameters
----------
shuffle : bool, optional
Shuffle the batches in this dataset if the iteration reaches the end
of the batch list. Defaults to True.
Yields
------
batches : data batches
A sequence of batches---often from a training, validation, or test
dataset.
'''
for _ in range(self.iteration_size):
if self._callable is not None:
yield self._callable()
else:
yield self._next_batch(shuffle)
|
[
"def",
"iterate",
"(",
"self",
",",
"shuffle",
"=",
"True",
")",
":",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"iteration_size",
")",
":",
"if",
"self",
".",
"_callable",
"is",
"not",
"None",
":",
"yield",
"self",
".",
"_callable",
"(",
")",
"else",
":",
"yield",
"self",
".",
"_next_batch",
"(",
"shuffle",
")"
] | 31.652174 | 20.782609 |
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
|
[
"def",
"rebuild_method",
"(",
"self",
",",
"prepared_request",
",",
"response",
")",
":",
"method",
"=",
"prepared_request",
".",
"method",
"# http://tools.ietf.org/html/rfc7231#section-6.4.4",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"see_other",
"and",
"method",
"!=",
"'HEAD'",
":",
"method",
"=",
"'GET'",
"# Do what the browsers do, despite standards...",
"# First, turn 302s into GETs.",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"found",
"and",
"method",
"!=",
"'HEAD'",
":",
"method",
"=",
"'GET'",
"# Second, if a POST is responded to with a 301, turn it into a GET.",
"# This bizarre behaviour is explained in Issue 1704.",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"moved",
"and",
"method",
"==",
"'POST'",
":",
"method",
"=",
"'GET'",
"prepared_request",
".",
"method",
"=",
"method"
] | 40.285714 | 19.285714 |
def write(self, chars, output, format='png'):
"""Generate and write an image CAPTCHA data to the output.
:param chars: text to be generated.
:param output: output destination.
:param format: image file format
"""
im = self.generate_image(chars)
return im.save(output, format=format)
|
[
"def",
"write",
"(",
"self",
",",
"chars",
",",
"output",
",",
"format",
"=",
"'png'",
")",
":",
"im",
"=",
"self",
".",
"generate_image",
"(",
"chars",
")",
"return",
"im",
".",
"save",
"(",
"output",
",",
"format",
"=",
"format",
")"
] | 36.777778 | 6.222222 |
def dynamize_last_evaluated_key(self, last_evaluated_key):
"""
Convert a last_evaluated_key parameter into the data structure
required for Layer1.
"""
d = None
if last_evaluated_key:
hash_key = last_evaluated_key['HashKeyElement']
d = {'HashKeyElement': self.dynamize_value(hash_key)}
if 'RangeKeyElement' in last_evaluated_key:
range_key = last_evaluated_key['RangeKeyElement']
d['RangeKeyElement'] = self.dynamize_value(range_key)
return d
|
[
"def",
"dynamize_last_evaluated_key",
"(",
"self",
",",
"last_evaluated_key",
")",
":",
"d",
"=",
"None",
"if",
"last_evaluated_key",
":",
"hash_key",
"=",
"last_evaluated_key",
"[",
"'HashKeyElement'",
"]",
"d",
"=",
"{",
"'HashKeyElement'",
":",
"self",
".",
"dynamize_value",
"(",
"hash_key",
")",
"}",
"if",
"'RangeKeyElement'",
"in",
"last_evaluated_key",
":",
"range_key",
"=",
"last_evaluated_key",
"[",
"'RangeKeyElement'",
"]",
"d",
"[",
"'RangeKeyElement'",
"]",
"=",
"self",
".",
"dynamize_value",
"(",
"range_key",
")",
"return",
"d"
] | 42.538462 | 17.769231 |
def dir(self, filetype, **kwargs):
"""Return the directory containing a file of a given type.
Parameters
----------
filetype : str
File type parameter.
Returns
-------
dir : str
Directory containing the file.
"""
full = kwargs.get('full', None)
if not full:
full = self.full(filetype, **kwargs)
return os.path.dirname(full)
|
[
"def",
"dir",
"(",
"self",
",",
"filetype",
",",
"*",
"*",
"kwargs",
")",
":",
"full",
"=",
"kwargs",
".",
"get",
"(",
"'full'",
",",
"None",
")",
"if",
"not",
"full",
":",
"full",
"=",
"self",
".",
"full",
"(",
"filetype",
",",
"*",
"*",
"kwargs",
")",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"full",
")"
] | 22.789474 | 18.105263 |
async def stepper_config(self, steps_per_revolution, stepper_pins):
"""
Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value.
"""
data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 0x7f,
(steps_per_revolution >> 7) & 0x7f]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
await self._send_sysex(PrivateConstants.STEPPER_DATA, data)
|
[
"async",
"def",
"stepper_config",
"(",
"self",
",",
"steps_per_revolution",
",",
"stepper_pins",
")",
":",
"data",
"=",
"[",
"PrivateConstants",
".",
"STEPPER_CONFIGURE",
",",
"steps_per_revolution",
"&",
"0x7f",
",",
"(",
"steps_per_revolution",
">>",
"7",
")",
"&",
"0x7f",
"]",
"for",
"pin",
"in",
"range",
"(",
"len",
"(",
"stepper_pins",
")",
")",
":",
"data",
".",
"append",
"(",
"stepper_pins",
"[",
"pin",
"]",
")",
"await",
"self",
".",
"_send_sysex",
"(",
"PrivateConstants",
".",
"STEPPER_DATA",
",",
"data",
")"
] | 40.1875 | 19.8125 |
def log_to_stream(stream=sys.stderr, level=logging.NOTSET,
fmt=logging.BASIC_FORMAT):
""" Add :class:`logging.StreamHandler` to logger which logs to a stream.
:param stream. Stream to log to, default STDERR.
:param level: Log level, default NOTSET.
:param fmt: String with log format, default is BASIC_FORMAT.
"""
fmt = Formatter(fmt)
handler = StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
log.addHandler(handler)
|
[
"def",
"log_to_stream",
"(",
"stream",
"=",
"sys",
".",
"stderr",
",",
"level",
"=",
"logging",
".",
"NOTSET",
",",
"fmt",
"=",
"logging",
".",
"BASIC_FORMAT",
")",
":",
"fmt",
"=",
"Formatter",
"(",
"fmt",
")",
"handler",
"=",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"fmt",
")",
"handler",
".",
"setLevel",
"(",
"level",
")",
"log",
".",
"addHandler",
"(",
"handler",
")"
] | 34.357143 | 14.714286 |
def _prm_read_pandas(self, pd_node, full_name):
"""Reads a DataFrame from dis.
:param pd_node:
hdf5 node storing the pandas DataFrame
:param full_name:
Full name of the parameter or result whose data is to be loaded
:return:
Data to load
"""
try:
name = pd_node._v_name
pathname = pd_node._v_pathname
pandas_store = self._hdf5store
pandas_data = pandas_store.get(pathname)
return pandas_data
except:
self._logger.error('Failed loading `%s` of `%s`.' % (pd_node._v_name, full_name))
raise
|
[
"def",
"_prm_read_pandas",
"(",
"self",
",",
"pd_node",
",",
"full_name",
")",
":",
"try",
":",
"name",
"=",
"pd_node",
".",
"_v_name",
"pathname",
"=",
"pd_node",
".",
"_v_pathname",
"pandas_store",
"=",
"self",
".",
"_hdf5store",
"pandas_data",
"=",
"pandas_store",
".",
"get",
"(",
"pathname",
")",
"return",
"pandas_data",
"except",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"'Failed loading `%s` of `%s`.'",
"%",
"(",
"pd_node",
".",
"_v_name",
",",
"full_name",
")",
")",
"raise"
] | 25.84 | 22.6 |
def prod(x, axis=None, keepdims=False):
"""Reduction along axes with product operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which product is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
Note:
Backward computation is not accurate in a zero value input.
"""
from .function_bases import prod as prod_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return prod_base(x, axis, keepdims)
|
[
"def",
"prod",
"(",
"x",
",",
"axis",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"from",
".",
"function_bases",
"import",
"prod",
"as",
"prod_base",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"range",
"(",
"x",
".",
"ndim",
")",
"elif",
"not",
"hasattr",
"(",
"axis",
",",
"'__iter__'",
")",
":",
"axis",
"=",
"[",
"axis",
"]",
"return",
"prod_base",
"(",
"x",
",",
"axis",
",",
"keepdims",
")"
] | 32.818182 | 21.818182 |
def _hasViewChangeQuorum(self):
# This method should just be present for master instance.
"""
Checks whether n-f nodes completed view change and whether one
of them is the next primary
"""
num_of_ready_nodes = len(self._view_change_done)
diff = self.quorum - num_of_ready_nodes
if diff > 0:
logger.info('{} needs {} ViewChangeDone messages'.format(self, diff))
return False
logger.info("{} got view change quorum ({} >= {})".
format(self.name, num_of_ready_nodes, self.quorum))
return True
|
[
"def",
"_hasViewChangeQuorum",
"(",
"self",
")",
":",
"# This method should just be present for master instance.",
"num_of_ready_nodes",
"=",
"len",
"(",
"self",
".",
"_view_change_done",
")",
"diff",
"=",
"self",
".",
"quorum",
"-",
"num_of_ready_nodes",
"if",
"diff",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'{} needs {} ViewChangeDone messages'",
".",
"format",
"(",
"self",
",",
"diff",
")",
")",
"return",
"False",
"logger",
".",
"info",
"(",
"\"{} got view change quorum ({} >= {})\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"num_of_ready_nodes",
",",
"self",
".",
"quorum",
")",
")",
"return",
"True"
] | 40 | 18.666667 |
def _parse_sections(self):
""" parse sections and TOC """
def _list_to_dict(_dict, path, sec):
tmp = _dict
for elm in path[:-1]:
tmp = tmp[elm]
tmp[sec] = OrderedDict()
self._sections = list()
section_regexp = r"\n==* .* ==*\n" # '== {STUFF_NOT_\n} =='
found_obj = re.findall(section_regexp, self.content)
res = OrderedDict()
path = list()
last_depth = 0
for obj in found_obj:
depth = obj.count("=") / 2 # this gets us to the single side...
depth -= 2 # now, we can calculate depth
sec = obj.lstrip("\n= ").rstrip(" =\n")
if depth == 0:
last_depth = 0
path = [sec]
res[sec] = OrderedDict()
elif depth > last_depth:
last_depth = depth
path.append(sec)
_list_to_dict(res, path, sec)
elif depth < last_depth:
# path.pop()
while last_depth > depth:
path.pop()
last_depth -= 1
path.pop()
path.append(sec)
_list_to_dict(res, path, sec)
last_depth = depth
else:
path.pop()
path.append(sec)
_list_to_dict(res, path, sec)
last_depth = depth
self._sections.append(sec)
self._table_of_contents = res
|
[
"def",
"_parse_sections",
"(",
"self",
")",
":",
"def",
"_list_to_dict",
"(",
"_dict",
",",
"path",
",",
"sec",
")",
":",
"tmp",
"=",
"_dict",
"for",
"elm",
"in",
"path",
"[",
":",
"-",
"1",
"]",
":",
"tmp",
"=",
"tmp",
"[",
"elm",
"]",
"tmp",
"[",
"sec",
"]",
"=",
"OrderedDict",
"(",
")",
"self",
".",
"_sections",
"=",
"list",
"(",
")",
"section_regexp",
"=",
"r\"\\n==* .* ==*\\n\"",
"# '== {STUFF_NOT_\\n} =='",
"found_obj",
"=",
"re",
".",
"findall",
"(",
"section_regexp",
",",
"self",
".",
"content",
")",
"res",
"=",
"OrderedDict",
"(",
")",
"path",
"=",
"list",
"(",
")",
"last_depth",
"=",
"0",
"for",
"obj",
"in",
"found_obj",
":",
"depth",
"=",
"obj",
".",
"count",
"(",
"\"=\"",
")",
"/",
"2",
"# this gets us to the single side...",
"depth",
"-=",
"2",
"# now, we can calculate depth",
"sec",
"=",
"obj",
".",
"lstrip",
"(",
"\"\\n= \"",
")",
".",
"rstrip",
"(",
"\" =\\n\"",
")",
"if",
"depth",
"==",
"0",
":",
"last_depth",
"=",
"0",
"path",
"=",
"[",
"sec",
"]",
"res",
"[",
"sec",
"]",
"=",
"OrderedDict",
"(",
")",
"elif",
"depth",
">",
"last_depth",
":",
"last_depth",
"=",
"depth",
"path",
".",
"append",
"(",
"sec",
")",
"_list_to_dict",
"(",
"res",
",",
"path",
",",
"sec",
")",
"elif",
"depth",
"<",
"last_depth",
":",
"# path.pop()",
"while",
"last_depth",
">",
"depth",
":",
"path",
".",
"pop",
"(",
")",
"last_depth",
"-=",
"1",
"path",
".",
"pop",
"(",
")",
"path",
".",
"append",
"(",
"sec",
")",
"_list_to_dict",
"(",
"res",
",",
"path",
",",
"sec",
")",
"last_depth",
"=",
"depth",
"else",
":",
"path",
".",
"pop",
"(",
")",
"path",
".",
"append",
"(",
"sec",
")",
"_list_to_dict",
"(",
"res",
",",
"path",
",",
"sec",
")",
"last_depth",
"=",
"depth",
"self",
".",
"_sections",
".",
"append",
"(",
"sec",
")",
"self",
".",
"_table_of_contents",
"=",
"res"
] | 32.065217 | 13.456522 |
def index_open(self, index):
'''
Opens the speicified index.
http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html
> ElasticSearch().index_open('my_index')
'''
request = self.session
url = 'http://%s:%s/%s/_open' % (self.host, self.port, index)
response = request.post(url,None)
return response
|
[
"def",
"index_open",
"(",
"self",
",",
"index",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s/_open'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"index",
")",
"response",
"=",
"request",
".",
"post",
"(",
"url",
",",
"None",
")",
"return",
"response"
] | 34.727273 | 20.545455 |
def get_data(self):
"""Gets the asset content data.
return: (osid.transport.DataInputStream) - the length of the
content data
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if not bool(self._my_map['data']):
raise errors.IllegalState('no data')
dbase = JSONClientValidated('repository',
runtime=self._runtime).raw()
filesys = gridfs.GridFS(dbase)
return DataInputStream(filesys.get(self._my_map['data']))
|
[
"def",
"get_data",
"(",
"self",
")",
":",
"if",
"not",
"bool",
"(",
"self",
".",
"_my_map",
"[",
"'data'",
"]",
")",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'no data'",
")",
"dbase",
"=",
"JSONClientValidated",
"(",
"'repository'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
".",
"raw",
"(",
")",
"filesys",
"=",
"gridfs",
".",
"GridFS",
"(",
"dbase",
")",
"return",
"DataInputStream",
"(",
"filesys",
".",
"get",
"(",
"self",
".",
"_my_map",
"[",
"'data'",
"]",
")",
")"
] | 39.866667 | 17.2 |
def poll_until_valid(authzr, clock, client, timeout=300.0):
"""
Poll an authorization until it is in a state other than pending or
processing.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param clock: The ``IReactorTime`` implementation to use; usually the
reactor, when not testing.
:param .Client client: The ACME client.
:param float timeout: Maximum time to poll in seconds, before giving up.
:raises txacme.client.AuthorizationFailed: if the authorization is no
longer in the pending, processing, or valid states.
:raises: ``twisted.internet.defer.CancelledError`` if the authorization was
still in pending or processing state when the timeout was reached.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
:return: A deferred firing when the authorization has completed/failed; if
the authorization is valid, the authorization resource will be
returned.
"""
def repoll(result):
authzr, retry_after = result
if authzr.body.status in {STATUS_PENDING, STATUS_PROCESSING}:
return (
deferLater(clock, retry_after, lambda: None)
.addCallback(lambda _: client.poll(authzr))
.addCallback(repoll)
)
if authzr.body.status != STATUS_VALID:
raise AuthorizationFailed(authzr)
return authzr
def cancel_timeout(result):
if timeout_call.active():
timeout_call.cancel()
return result
d = client.poll(authzr).addCallback(repoll)
timeout_call = clock.callLater(timeout, d.cancel)
d.addBoth(cancel_timeout)
return d
|
[
"def",
"poll_until_valid",
"(",
"authzr",
",",
"clock",
",",
"client",
",",
"timeout",
"=",
"300.0",
")",
":",
"def",
"repoll",
"(",
"result",
")",
":",
"authzr",
",",
"retry_after",
"=",
"result",
"if",
"authzr",
".",
"body",
".",
"status",
"in",
"{",
"STATUS_PENDING",
",",
"STATUS_PROCESSING",
"}",
":",
"return",
"(",
"deferLater",
"(",
"clock",
",",
"retry_after",
",",
"lambda",
":",
"None",
")",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"client",
".",
"poll",
"(",
"authzr",
")",
")",
".",
"addCallback",
"(",
"repoll",
")",
")",
"if",
"authzr",
".",
"body",
".",
"status",
"!=",
"STATUS_VALID",
":",
"raise",
"AuthorizationFailed",
"(",
"authzr",
")",
"return",
"authzr",
"def",
"cancel_timeout",
"(",
"result",
")",
":",
"if",
"timeout_call",
".",
"active",
"(",
")",
":",
"timeout_call",
".",
"cancel",
"(",
")",
"return",
"result",
"d",
"=",
"client",
".",
"poll",
"(",
"authzr",
")",
".",
"addCallback",
"(",
"repoll",
")",
"timeout_call",
"=",
"clock",
".",
"callLater",
"(",
"timeout",
",",
"d",
".",
"cancel",
")",
"d",
".",
"addBoth",
"(",
"cancel_timeout",
")",
"return",
"d"
] | 40.142857 | 20.761905 |
def fix_line_numbers(body):
r"""Recomputes all line numbers based on the number of \n characters."""
maxline = 0
for node in body.pre_order():
maxline += node.prefix.count('\n')
if isinstance(node, Leaf):
node.lineno = maxline
maxline += str(node.value).count('\n')
|
[
"def",
"fix_line_numbers",
"(",
"body",
")",
":",
"maxline",
"=",
"0",
"for",
"node",
"in",
"body",
".",
"pre_order",
"(",
")",
":",
"maxline",
"+=",
"node",
".",
"prefix",
".",
"count",
"(",
"'\\n'",
")",
"if",
"isinstance",
"(",
"node",
",",
"Leaf",
")",
":",
"node",
".",
"lineno",
"=",
"maxline",
"maxline",
"+=",
"str",
"(",
"node",
".",
"value",
")",
".",
"count",
"(",
"'\\n'",
")"
] | 38.75 | 8.75 |
def remove_in_progress_check(self, check):
"""Remove check from check in progress
:param check: Check to remove
:type check: alignak.objects.check.Check
:return: None
"""
# The check is consumed, update the in_checking properties
if check in self.checks_in_progress:
self.checks_in_progress.remove(check)
self.update_in_checking()
|
[
"def",
"remove_in_progress_check",
"(",
"self",
",",
"check",
")",
":",
"# The check is consumed, update the in_checking properties",
"if",
"check",
"in",
"self",
".",
"checks_in_progress",
":",
"self",
".",
"checks_in_progress",
".",
"remove",
"(",
"check",
")",
"self",
".",
"update_in_checking",
"(",
")"
] | 36.090909 | 10.727273 |
def unique_str(self):
""" A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-safe (no \/:*?"<>|).
..note::Because of length/readability restrictions, this fxn ignores
wkt.
Example output:
"-180.000_0.250_0.000_90.000_0.000_-0.251_512_612_2013-05-21_12_32_52.945000"
"""
unique_str = "_".join(["%.3f" % f for f in self.geotransform] +
["%d" % d for d in self.x_size, self.y_size]
)
if self.date is not None:
unique_str += '_' + str(self.date)
if self.time is not None:
unique_str += '_' + str(self.time)
return unique_str.replace(':', '_')
|
[
"def",
"unique_str",
"(",
"self",
")",
":",
"unique_str",
"=",
"\"_\"",
".",
"join",
"(",
"[",
"\"%.3f\"",
"%",
"f",
"for",
"f",
"in",
"self",
".",
"geotransform",
"]",
"+",
"[",
"\"%d\"",
"%",
"d",
"for",
"d",
"in",
"self",
".",
"x_size",
",",
"self",
".",
"y_size",
"]",
")",
"if",
"self",
".",
"date",
"is",
"not",
"None",
":",
"unique_str",
"+=",
"'_'",
"+",
"str",
"(",
"self",
".",
"date",
")",
"if",
"self",
".",
"time",
"is",
"not",
"None",
":",
"unique_str",
"+=",
"'_'",
"+",
"str",
"(",
"self",
".",
"time",
")",
"return",
"unique_str",
".",
"replace",
"(",
"':'",
",",
"'_'",
")"
] | 41.391304 | 22.565217 |
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)])
|
[
"def",
"include",
"(",
"d",
",",
"e",
")",
":",
"return",
"(",
"d",
",",
"[",
"f",
"for",
"f",
"in",
"glob",
".",
"glob",
"(",
"'%s/%s'",
"%",
"(",
"d",
",",
"e",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
"]",
")"
] | 30.142857 | 22.428571 |
def cleanup_a_alpha_and_derivatives(self):
r'''Removes properties set by `setup_a_alpha_and_derivatives`; run by
`GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for
every component'''
del(self.a, self.kappa, self.kappa0, self.kappa1, self.kappa2, self.kappa3, self.Tc)
|
[
"def",
"cleanup_a_alpha_and_derivatives",
"(",
"self",
")",
":",
"del",
"(",
"self",
".",
"a",
",",
"self",
".",
"kappa",
",",
"self",
".",
"kappa0",
",",
"self",
".",
"kappa1",
",",
"self",
".",
"kappa2",
",",
"self",
".",
"kappa3",
",",
"self",
".",
"Tc",
")"
] | 62.8 | 28.4 |
async def _skip(self, ctx):
""" Skips the current track. """
player = self.bot.lavalink.players.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing.')
await player.skip()
await ctx.send('⏭ | Skipped.')
|
[
"async",
"def",
"_skip",
"(",
"self",
",",
"ctx",
")",
":",
"player",
"=",
"self",
".",
"bot",
".",
"lavalink",
".",
"players",
".",
"get",
"(",
"ctx",
".",
"guild",
".",
"id",
")",
"if",
"not",
"player",
".",
"is_playing",
":",
"return",
"await",
"ctx",
".",
"send",
"(",
"'Not playing.'",
")",
"await",
"player",
".",
"skip",
"(",
")",
"await",
"ctx",
".",
"send",
"(",
"'⏭ | Skipped.')\r",
""
] | 31.333333 | 15.666667 |
def validate_specs_from_path(specs_path):
"""
Validates Dusty specs at the given path. The following checks are performed:
-That the given path exists
-That there are bundles in the given path
-That the fields in the specs match those allowed in our schemas
-That references to apps, libs, and services point at defined specs
-That there are no cycles in app and lib dependencies
"""
# Validation of fields with schemer is now down implicitly through get_specs_from_path
# We are dealing with Dusty_Specs class in this file
log_to_client("Validating specs at path {}".format(specs_path))
if not os.path.exists(specs_path):
raise RuntimeError("Specs path not found: {}".format(specs_path))
specs = get_specs_from_path(specs_path)
_check_bare_minimum(specs)
_validate_spec_names(specs)
_validate_cycle_free(specs)
log_to_client("Validation Complete!")
|
[
"def",
"validate_specs_from_path",
"(",
"specs_path",
")",
":",
"# Validation of fields with schemer is now down implicitly through get_specs_from_path",
"# We are dealing with Dusty_Specs class in this file",
"log_to_client",
"(",
"\"Validating specs at path {}\"",
".",
"format",
"(",
"specs_path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"specs_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Specs path not found: {}\"",
".",
"format",
"(",
"specs_path",
")",
")",
"specs",
"=",
"get_specs_from_path",
"(",
"specs_path",
")",
"_check_bare_minimum",
"(",
"specs",
")",
"_validate_spec_names",
"(",
"specs",
")",
"_validate_cycle_free",
"(",
"specs",
")",
"log_to_client",
"(",
"\"Validation Complete!\"",
")"
] | 48.789474 | 15.947368 |
def _draw(self):
"""Draw all the things"""
self._compute()
self._compute_x_labels()
self._compute_x_labels_major()
self._compute_y_labels()
self._compute_y_labels_major()
self._compute_secondary()
self._post_compute()
self._compute_margin()
self._decorate()
if self.series and self._has_data() and self._values:
self._plot()
else:
self.svg.draw_no_data()
|
[
"def",
"_draw",
"(",
"self",
")",
":",
"self",
".",
"_compute",
"(",
")",
"self",
".",
"_compute_x_labels",
"(",
")",
"self",
".",
"_compute_x_labels_major",
"(",
")",
"self",
".",
"_compute_y_labels",
"(",
")",
"self",
".",
"_compute_y_labels_major",
"(",
")",
"self",
".",
"_compute_secondary",
"(",
")",
"self",
".",
"_post_compute",
"(",
")",
"self",
".",
"_compute_margin",
"(",
")",
"self",
".",
"_decorate",
"(",
")",
"if",
"self",
".",
"series",
"and",
"self",
".",
"_has_data",
"(",
")",
"and",
"self",
".",
"_values",
":",
"self",
".",
"_plot",
"(",
")",
"else",
":",
"self",
".",
"svg",
".",
"draw_no_data",
"(",
")"
] | 30.666667 | 11.666667 |
def _scalePoints(points, scale=1, convertToInteger=True):
"""
Scale points and optionally convert them to integers.
"""
if convertToInteger:
points = [
(int(round(x * scale)), int(round(y * scale)))
for (x, y) in points
]
else:
points = [(x * scale, y * scale) for (x, y) in points]
return points
|
[
"def",
"_scalePoints",
"(",
"points",
",",
"scale",
"=",
"1",
",",
"convertToInteger",
"=",
"True",
")",
":",
"if",
"convertToInteger",
":",
"points",
"=",
"[",
"(",
"int",
"(",
"round",
"(",
"x",
"*",
"scale",
")",
")",
",",
"int",
"(",
"round",
"(",
"y",
"*",
"scale",
")",
")",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"points",
"]",
"else",
":",
"points",
"=",
"[",
"(",
"x",
"*",
"scale",
",",
"y",
"*",
"scale",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"points",
"]",
"return",
"points"
] | 29.75 | 17.083333 |
def set_hvac_mode(self, index, hvac_mode):
''' possible hvac modes are auto, auxHeatOnly, cool, heat, off '''
body = {"selection": {"selectionType": "thermostats",
"selectionMatch": self.thermostats[index]['identifier']},
"thermostat": {
"settings": {
"hvacMode": hvac_mode
}
}}
log_msg_action = "set HVAC mode"
return self.make_request(body, log_msg_action)
|
[
"def",
"set_hvac_mode",
"(",
"self",
",",
"index",
",",
"hvac_mode",
")",
":",
"body",
"=",
"{",
"\"selection\"",
":",
"{",
"\"selectionType\"",
":",
"\"thermostats\"",
",",
"\"selectionMatch\"",
":",
"self",
".",
"thermostats",
"[",
"index",
"]",
"[",
"'identifier'",
"]",
"}",
",",
"\"thermostat\"",
":",
"{",
"\"settings\"",
":",
"{",
"\"hvacMode\"",
":",
"hvac_mode",
"}",
"}",
"}",
"log_msg_action",
"=",
"\"set HVAC mode\"",
"return",
"self",
".",
"make_request",
"(",
"body",
",",
"log_msg_action",
")"
] | 52.363636 | 14.727273 |
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
|
[
"def",
"add_edge",
"(",
"self",
",",
"x",
",",
"y",
",",
"label",
"=",
"None",
")",
":",
"self",
".",
"adjacency_list",
"[",
"x",
"]",
".",
"append",
"(",
"(",
"y",
",",
"label",
")",
")",
"# multiple edges are allowed, so be careful",
"if",
"x",
"not",
"in",
"self",
".",
"reverse_list",
"[",
"y",
"]",
":",
"self",
".",
"reverse_list",
"[",
"y",
"]",
".",
"append",
"(",
"x",
")"
] | 45.428571 | 14.285714 |
def create(self, name=None, **kwargs):
"""Create a new project.
:param name: The name of the project.
:returns: An instance of the newly create project.
:rtype: renku.models.projects.Project
"""
data = self._client.api.create_project({'name': name})
return self.Meta.model(data, client=self._client, collection=self)
|
[
"def",
"create",
"(",
"self",
",",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"_client",
".",
"api",
".",
"create_project",
"(",
"{",
"'name'",
":",
"name",
"}",
")",
"return",
"self",
".",
"Meta",
".",
"model",
"(",
"data",
",",
"client",
"=",
"self",
".",
"_client",
",",
"collection",
"=",
"self",
")"
] | 40.555556 | 14 |
def _get_properties(self, rule, scope, block):
"""
Implements properties and variables extraction and assignment
"""
prop, raw_value = (_prop_split_re.split(block.prop, 1) + [None])[:2]
if raw_value is not None:
raw_value = raw_value.strip()
try:
is_var = (block.prop[len(prop)] == '=')
except IndexError:
is_var = False
if is_var:
warn_deprecated(rule, "Assignment with = is deprecated; use : instead.")
calculator = self._make_calculator(rule.namespace)
prop = prop.strip()
prop = calculator.do_glob_math(prop)
if not prop:
return
_prop = (scope or '') + prop
if is_var or prop.startswith('$') and raw_value is not None:
# Pop off any flags: !default, !global
is_default = False
is_global = True # eventually sass will default this to false
while True:
splits = raw_value.rsplit(None, 1)
if len(splits) < 2 or not splits[1].startswith('!'):
break
raw_value, flag = splits
if flag == '!default':
is_default = True
elif flag == '!global':
is_global = True
else:
raise ValueError("Unrecognized flag: {0}".format(flag))
# Variable assignment
_prop = normalize_var(_prop)
try:
existing_value = rule.namespace.variable(_prop)
except KeyError:
existing_value = None
is_defined = existing_value is not None and not existing_value.is_null
if is_default and is_defined:
pass
else:
if is_defined and prop.startswith('$') and prop[1].isupper():
log.warn("Constant %r redefined", prop)
# Variable assignment is an expression, so it always performs
# real division
value = calculator.calculate(raw_value, divide=True)
rule.namespace.set_variable(
_prop, value, local_only=not is_global)
else:
# Regular property destined for output
_prop = calculator.apply_vars(_prop)
if raw_value is None:
value = None
else:
value = calculator.calculate(raw_value)
if value is None:
pass
elif isinstance(value, six.string_types):
# TODO kill this branch
pass
else:
if value.is_null:
return
style = rule.legacy_compiler_options.get(
'style', self.compiler.output_style)
compress = style == 'compressed'
value = value.render(compress=compress)
rule.properties.append((_prop, value))
|
[
"def",
"_get_properties",
"(",
"self",
",",
"rule",
",",
"scope",
",",
"block",
")",
":",
"prop",
",",
"raw_value",
"=",
"(",
"_prop_split_re",
".",
"split",
"(",
"block",
".",
"prop",
",",
"1",
")",
"+",
"[",
"None",
"]",
")",
"[",
":",
"2",
"]",
"if",
"raw_value",
"is",
"not",
"None",
":",
"raw_value",
"=",
"raw_value",
".",
"strip",
"(",
")",
"try",
":",
"is_var",
"=",
"(",
"block",
".",
"prop",
"[",
"len",
"(",
"prop",
")",
"]",
"==",
"'='",
")",
"except",
"IndexError",
":",
"is_var",
"=",
"False",
"if",
"is_var",
":",
"warn_deprecated",
"(",
"rule",
",",
"\"Assignment with = is deprecated; use : instead.\"",
")",
"calculator",
"=",
"self",
".",
"_make_calculator",
"(",
"rule",
".",
"namespace",
")",
"prop",
"=",
"prop",
".",
"strip",
"(",
")",
"prop",
"=",
"calculator",
".",
"do_glob_math",
"(",
"prop",
")",
"if",
"not",
"prop",
":",
"return",
"_prop",
"=",
"(",
"scope",
"or",
"''",
")",
"+",
"prop",
"if",
"is_var",
"or",
"prop",
".",
"startswith",
"(",
"'$'",
")",
"and",
"raw_value",
"is",
"not",
"None",
":",
"# Pop off any flags: !default, !global",
"is_default",
"=",
"False",
"is_global",
"=",
"True",
"# eventually sass will default this to false",
"while",
"True",
":",
"splits",
"=",
"raw_value",
".",
"rsplit",
"(",
"None",
",",
"1",
")",
"if",
"len",
"(",
"splits",
")",
"<",
"2",
"or",
"not",
"splits",
"[",
"1",
"]",
".",
"startswith",
"(",
"'!'",
")",
":",
"break",
"raw_value",
",",
"flag",
"=",
"splits",
"if",
"flag",
"==",
"'!default'",
":",
"is_default",
"=",
"True",
"elif",
"flag",
"==",
"'!global'",
":",
"is_global",
"=",
"True",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized flag: {0}\"",
".",
"format",
"(",
"flag",
")",
")",
"# Variable assignment",
"_prop",
"=",
"normalize_var",
"(",
"_prop",
")",
"try",
":",
"existing_value",
"=",
"rule",
".",
"namespace",
".",
"variable",
"(",
"_prop",
")",
"except",
"KeyError",
":",
"existing_value",
"=",
"None",
"is_defined",
"=",
"existing_value",
"is",
"not",
"None",
"and",
"not",
"existing_value",
".",
"is_null",
"if",
"is_default",
"and",
"is_defined",
":",
"pass",
"else",
":",
"if",
"is_defined",
"and",
"prop",
".",
"startswith",
"(",
"'$'",
")",
"and",
"prop",
"[",
"1",
"]",
".",
"isupper",
"(",
")",
":",
"log",
".",
"warn",
"(",
"\"Constant %r redefined\"",
",",
"prop",
")",
"# Variable assignment is an expression, so it always performs",
"# real division",
"value",
"=",
"calculator",
".",
"calculate",
"(",
"raw_value",
",",
"divide",
"=",
"True",
")",
"rule",
".",
"namespace",
".",
"set_variable",
"(",
"_prop",
",",
"value",
",",
"local_only",
"=",
"not",
"is_global",
")",
"else",
":",
"# Regular property destined for output",
"_prop",
"=",
"calculator",
".",
"apply_vars",
"(",
"_prop",
")",
"if",
"raw_value",
"is",
"None",
":",
"value",
"=",
"None",
"else",
":",
"value",
"=",
"calculator",
".",
"calculate",
"(",
"raw_value",
")",
"if",
"value",
"is",
"None",
":",
"pass",
"elif",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"# TODO kill this branch",
"pass",
"else",
":",
"if",
"value",
".",
"is_null",
":",
"return",
"style",
"=",
"rule",
".",
"legacy_compiler_options",
".",
"get",
"(",
"'style'",
",",
"self",
".",
"compiler",
".",
"output_style",
")",
"compress",
"=",
"style",
"==",
"'compressed'",
"value",
"=",
"value",
".",
"render",
"(",
"compress",
"=",
"compress",
")",
"rule",
".",
"properties",
".",
"append",
"(",
"(",
"_prop",
",",
"value",
")",
")"
] | 36.962025 | 17.898734 |
def _get_fields_for_class(schema_graph, graphql_types, field_type_overrides, hidden_classes,
cls_name):
"""Return a dict from field name to GraphQL field type, for the specified graph class."""
properties = schema_graph.get_element_by_class_name(cls_name).properties
# Add leaf GraphQL fields (class properties).
all_properties = {
property_name: _property_descriptor_to_graphql_type(property_obj)
for property_name, property_obj in six.iteritems(properties)
}
result = {
property_name: graphql_representation
for property_name, graphql_representation in six.iteritems(all_properties)
if graphql_representation is not None
}
# Add edge GraphQL fields (edges to other vertex classes).
schema_element = schema_graph.get_element_by_class_name(cls_name)
outbound_edges = (
('out_{}'.format(out_edge_name),
schema_graph.get_element_by_class_name(out_edge_name).properties[
EDGE_DESTINATION_PROPERTY_NAME].qualifier)
for out_edge_name in schema_element.out_connections
)
inbound_edges = (
('in_{}'.format(in_edge_name),
schema_graph.get_element_by_class_name(in_edge_name).properties[
EDGE_SOURCE_PROPERTY_NAME].qualifier)
for in_edge_name in schema_element.in_connections
)
for field_name, to_type_name in chain(outbound_edges, inbound_edges):
edge_endpoint_type_name = None
subclasses = schema_graph.get_subclass_set(to_type_name)
to_type_abstract = schema_graph.get_element_by_class_name(to_type_name).abstract
if not to_type_abstract and len(subclasses) > 1:
# If the edge endpoint type has no subclasses, it can't be coerced into any other type.
# If the edge endpoint type is abstract (an interface type), we can already
# coerce it to the proper type with a GraphQL fragment. However, if the endpoint type
# is non-abstract and has subclasses, we need to return its subclasses as an union type.
# This is because GraphQL fragments cannot be applied on concrete types, and
# GraphQL does not support inheritance of concrete types.
type_names_to_union = [
subclass
for subclass in subclasses
if subclass not in hidden_classes
]
if type_names_to_union:
edge_endpoint_type_name = _get_union_type_name(type_names_to_union)
else:
if to_type_name not in hidden_classes:
edge_endpoint_type_name = to_type_name
if edge_endpoint_type_name is not None:
# If we decided to not hide this edge due to its endpoint type being non-representable,
# represent the edge field as the GraphQL type List(edge_endpoint_type_name).
result[field_name] = GraphQLList(graphql_types[edge_endpoint_type_name])
for field_name, field_type in six.iteritems(field_type_overrides):
if field_name not in result:
raise AssertionError(u'Attempting to override field "{}" from class "{}", but the '
u'class does not contain said field'.format(field_name, cls_name))
else:
result[field_name] = field_type
return result
|
[
"def",
"_get_fields_for_class",
"(",
"schema_graph",
",",
"graphql_types",
",",
"field_type_overrides",
",",
"hidden_classes",
",",
"cls_name",
")",
":",
"properties",
"=",
"schema_graph",
".",
"get_element_by_class_name",
"(",
"cls_name",
")",
".",
"properties",
"# Add leaf GraphQL fields (class properties).",
"all_properties",
"=",
"{",
"property_name",
":",
"_property_descriptor_to_graphql_type",
"(",
"property_obj",
")",
"for",
"property_name",
",",
"property_obj",
"in",
"six",
".",
"iteritems",
"(",
"properties",
")",
"}",
"result",
"=",
"{",
"property_name",
":",
"graphql_representation",
"for",
"property_name",
",",
"graphql_representation",
"in",
"six",
".",
"iteritems",
"(",
"all_properties",
")",
"if",
"graphql_representation",
"is",
"not",
"None",
"}",
"# Add edge GraphQL fields (edges to other vertex classes).",
"schema_element",
"=",
"schema_graph",
".",
"get_element_by_class_name",
"(",
"cls_name",
")",
"outbound_edges",
"=",
"(",
"(",
"'out_{}'",
".",
"format",
"(",
"out_edge_name",
")",
",",
"schema_graph",
".",
"get_element_by_class_name",
"(",
"out_edge_name",
")",
".",
"properties",
"[",
"EDGE_DESTINATION_PROPERTY_NAME",
"]",
".",
"qualifier",
")",
"for",
"out_edge_name",
"in",
"schema_element",
".",
"out_connections",
")",
"inbound_edges",
"=",
"(",
"(",
"'in_{}'",
".",
"format",
"(",
"in_edge_name",
")",
",",
"schema_graph",
".",
"get_element_by_class_name",
"(",
"in_edge_name",
")",
".",
"properties",
"[",
"EDGE_SOURCE_PROPERTY_NAME",
"]",
".",
"qualifier",
")",
"for",
"in_edge_name",
"in",
"schema_element",
".",
"in_connections",
")",
"for",
"field_name",
",",
"to_type_name",
"in",
"chain",
"(",
"outbound_edges",
",",
"inbound_edges",
")",
":",
"edge_endpoint_type_name",
"=",
"None",
"subclasses",
"=",
"schema_graph",
".",
"get_subclass_set",
"(",
"to_type_name",
")",
"to_type_abstract",
"=",
"schema_graph",
".",
"get_element_by_class_name",
"(",
"to_type_name",
")",
".",
"abstract",
"if",
"not",
"to_type_abstract",
"and",
"len",
"(",
"subclasses",
")",
">",
"1",
":",
"# If the edge endpoint type has no subclasses, it can't be coerced into any other type.",
"# If the edge endpoint type is abstract (an interface type), we can already",
"# coerce it to the proper type with a GraphQL fragment. However, if the endpoint type",
"# is non-abstract and has subclasses, we need to return its subclasses as an union type.",
"# This is because GraphQL fragments cannot be applied on concrete types, and",
"# GraphQL does not support inheritance of concrete types.",
"type_names_to_union",
"=",
"[",
"subclass",
"for",
"subclass",
"in",
"subclasses",
"if",
"subclass",
"not",
"in",
"hidden_classes",
"]",
"if",
"type_names_to_union",
":",
"edge_endpoint_type_name",
"=",
"_get_union_type_name",
"(",
"type_names_to_union",
")",
"else",
":",
"if",
"to_type_name",
"not",
"in",
"hidden_classes",
":",
"edge_endpoint_type_name",
"=",
"to_type_name",
"if",
"edge_endpoint_type_name",
"is",
"not",
"None",
":",
"# If we decided to not hide this edge due to its endpoint type being non-representable,",
"# represent the edge field as the GraphQL type List(edge_endpoint_type_name).",
"result",
"[",
"field_name",
"]",
"=",
"GraphQLList",
"(",
"graphql_types",
"[",
"edge_endpoint_type_name",
"]",
")",
"for",
"field_name",
",",
"field_type",
"in",
"six",
".",
"iteritems",
"(",
"field_type_overrides",
")",
":",
"if",
"field_name",
"not",
"in",
"result",
":",
"raise",
"AssertionError",
"(",
"u'Attempting to override field \"{}\" from class \"{}\", but the '",
"u'class does not contain said field'",
".",
"format",
"(",
"field_name",
",",
"cls_name",
")",
")",
"else",
":",
"result",
"[",
"field_name",
"]",
"=",
"field_type",
"return",
"result"
] | 49.757576 | 27.227273 |
def get_minor_version(version, remove=None):
"""Return minor version of a provided version string. Minor version is the
second component in the dot-separated version string. For non-version-like
strings this function returns ``None``.
The ``remove`` parameter is deprecated since version 1.18 and will be
removed in the future.
:param version: Version string
:type version: str
:rtype: str
"""
if remove:
warnings.warn("remove argument is deprecated", DeprecationWarning)
version_split = version.split(".")
try:
# Assume MAJOR.MINOR.REST...
return version_split[1]
except IndexError:
return None
|
[
"def",
"get_minor_version",
"(",
"version",
",",
"remove",
"=",
"None",
")",
":",
"if",
"remove",
":",
"warnings",
".",
"warn",
"(",
"\"remove argument is deprecated\"",
",",
"DeprecationWarning",
")",
"version_split",
"=",
"version",
".",
"split",
"(",
"\".\"",
")",
"try",
":",
"# Assume MAJOR.MINOR.REST...",
"return",
"version_split",
"[",
"1",
"]",
"except",
"IndexError",
":",
"return",
"None"
] | 33.1 | 18.35 |
def autofill(ctx, f):
"""
Fills your timesheet up to today, for the defined auto_fill_days.
"""
auto_fill_days = ctx.obj['settings']['auto_fill_days']
if not auto_fill_days:
ctx.obj['view'].view.err("The parameter `auto_fill_days` must be set "
"to use this command.")
return
today = datetime.date.today()
last_day = calendar.monthrange(today.year, today.month)
last_date = datetime.date(today.year, today.month, last_day[1])
timesheet_collection = get_timesheet_collection_for_context(
ctx, f
)
t = timesheet_collection.latest()
t.prefill(auto_fill_days, last_date)
t.save()
ctx.obj['view'].msg("Your entries file has been filled.")
|
[
"def",
"autofill",
"(",
"ctx",
",",
"f",
")",
":",
"auto_fill_days",
"=",
"ctx",
".",
"obj",
"[",
"'settings'",
"]",
"[",
"'auto_fill_days'",
"]",
"if",
"not",
"auto_fill_days",
":",
"ctx",
".",
"obj",
"[",
"'view'",
"]",
".",
"view",
".",
"err",
"(",
"\"The parameter `auto_fill_days` must be set \"",
"\"to use this command.\"",
")",
"return",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"last_day",
"=",
"calendar",
".",
"monthrange",
"(",
"today",
".",
"year",
",",
"today",
".",
"month",
")",
"last_date",
"=",
"datetime",
".",
"date",
"(",
"today",
".",
"year",
",",
"today",
".",
"month",
",",
"last_day",
"[",
"1",
"]",
")",
"timesheet_collection",
"=",
"get_timesheet_collection_for_context",
"(",
"ctx",
",",
"f",
")",
"t",
"=",
"timesheet_collection",
".",
"latest",
"(",
")",
"t",
".",
"prefill",
"(",
"auto_fill_days",
",",
"last_date",
")",
"t",
".",
"save",
"(",
")",
"ctx",
".",
"obj",
"[",
"'view'",
"]",
".",
"msg",
"(",
"\"Your entries file has been filled.\"",
")"
] | 31.652174 | 22.173913 |
def render_placeholder(request, placeholder, parent_object=None, template_name=None, cachable=None, limit_parent_language=True, fallback_language=None):
"""
Render a :class:`~fluent_contents.models.Placeholder` object.
Returns a :class:`~fluent_contents.models.ContentItemOutput` object
which contains the HTML output and :class:`~django.forms.Media` object.
This function also caches the complete output of the placeholder
when all individual items are cacheable.
:param request: The current request object.
:type request: :class:`~django.http.HttpRequest`
:param placeholder: The placeholder object.
:type placeholder: :class:`~fluent_contents.models.Placeholder`
:param parent_object: Optional, the parent object of the placeholder (already implied by the placeholder)
:param template_name: Optional template name used to concatenate the placeholder output.
:type template_name: str | None
:param cachable: Whether the output is cachable, otherwise the full output will not be cached.
Default: False when using a template, True otherwise.
:type cachable: bool | None
:param limit_parent_language: Whether the items should be limited to the parent language.
:type limit_parent_language: bool
:param fallback_language: The fallback language to use if there are no items in the current language. Passing ``True`` uses the default :ref:`FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE`.
:type fallback_language: bool/str
:rtype: :class:`~fluent_contents.models.ContentItemOutput`
"""
output = PlaceholderRenderingPipe(request).render_placeholder(
placeholder=placeholder,
parent_object=parent_object,
template_name=template_name,
cachable=cachable,
limit_parent_language=limit_parent_language,
fallback_language=fallback_language
)
# Wrap the result after it's stored in the cache.
if markers.is_edit_mode(request):
output.html = markers.wrap_placeholder_output(output.html, placeholder)
return output
|
[
"def",
"render_placeholder",
"(",
"request",
",",
"placeholder",
",",
"parent_object",
"=",
"None",
",",
"template_name",
"=",
"None",
",",
"cachable",
"=",
"None",
",",
"limit_parent_language",
"=",
"True",
",",
"fallback_language",
"=",
"None",
")",
":",
"output",
"=",
"PlaceholderRenderingPipe",
"(",
"request",
")",
".",
"render_placeholder",
"(",
"placeholder",
"=",
"placeholder",
",",
"parent_object",
"=",
"parent_object",
",",
"template_name",
"=",
"template_name",
",",
"cachable",
"=",
"cachable",
",",
"limit_parent_language",
"=",
"limit_parent_language",
",",
"fallback_language",
"=",
"fallback_language",
")",
"# Wrap the result after it's stored in the cache.",
"if",
"markers",
".",
"is_edit_mode",
"(",
"request",
")",
":",
"output",
".",
"html",
"=",
"markers",
".",
"wrap_placeholder_output",
"(",
"output",
".",
"html",
",",
"placeholder",
")",
"return",
"output"
] | 52.230769 | 27.820513 |
def print_help(self, classes=False):
"""Print the help for each Configurable class in self.classes.
If classes=False (the default), only flags and aliases are printed.
"""
self.print_subcommands()
self.print_options()
if classes:
if self.classes:
print "Class parameters"
print "----------------"
print
for p in wrap_paragraphs(self.keyvalue_description):
print p
print
for cls in self.classes:
cls.class_print_help()
print
else:
print "To see all available configurables, use `--help-all`"
print
|
[
"def",
"print_help",
"(",
"self",
",",
"classes",
"=",
"False",
")",
":",
"self",
".",
"print_subcommands",
"(",
")",
"self",
".",
"print_options",
"(",
")",
"if",
"classes",
":",
"if",
"self",
".",
"classes",
":",
"print",
"\"Class parameters\"",
"print",
"\"----------------\"",
"print",
"for",
"p",
"in",
"wrap_paragraphs",
"(",
"self",
".",
"keyvalue_description",
")",
":",
"print",
"p",
"print",
"for",
"cls",
"in",
"self",
".",
"classes",
":",
"cls",
".",
"class_print_help",
"(",
")",
"print",
"else",
":",
"print",
"\"To see all available configurables, use `--help-all`\"",
"print"
] | 31.173913 | 17.130435 |
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# If authenticator is not plugged in, prompt
try:
device = u2f.GetLocalU2FInterface(origin=self.origin)
except errors.NoDeviceFoundError:
print_callback('Please insert your security key and press enter...')
six.moves.input()
device = u2f.GetLocalU2FInterface(origin=self.origin)
print_callback('Please touch your security key.\n')
for challenge_item in challenge_data:
raw_challenge = challenge_item['challenge']
key = challenge_item['key']
try:
result = device.Authenticate(app_id, raw_challenge, [key])
except errors.U2FError as e:
if e.code == errors.U2FError.DEVICE_INELIGIBLE:
continue
else:
raise
client_data = self._base64encode(result.client_data.GetJson().encode())
signature_data = self._base64encode(result.signature_data)
key_handle = self._base64encode(result.key_handle)
return {
'clientData': client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
|
[
"def",
"Authenticate",
"(",
"self",
",",
"app_id",
",",
"challenge_data",
",",
"print_callback",
"=",
"sys",
".",
"stderr",
".",
"write",
")",
":",
"# If authenticator is not plugged in, prompt",
"try",
":",
"device",
"=",
"u2f",
".",
"GetLocalU2FInterface",
"(",
"origin",
"=",
"self",
".",
"origin",
")",
"except",
"errors",
".",
"NoDeviceFoundError",
":",
"print_callback",
"(",
"'Please insert your security key and press enter...'",
")",
"six",
".",
"moves",
".",
"input",
"(",
")",
"device",
"=",
"u2f",
".",
"GetLocalU2FInterface",
"(",
"origin",
"=",
"self",
".",
"origin",
")",
"print_callback",
"(",
"'Please touch your security key.\\n'",
")",
"for",
"challenge_item",
"in",
"challenge_data",
":",
"raw_challenge",
"=",
"challenge_item",
"[",
"'challenge'",
"]",
"key",
"=",
"challenge_item",
"[",
"'key'",
"]",
"try",
":",
"result",
"=",
"device",
".",
"Authenticate",
"(",
"app_id",
",",
"raw_challenge",
",",
"[",
"key",
"]",
")",
"except",
"errors",
".",
"U2FError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"errors",
".",
"U2FError",
".",
"DEVICE_INELIGIBLE",
":",
"continue",
"else",
":",
"raise",
"client_data",
"=",
"self",
".",
"_base64encode",
"(",
"result",
".",
"client_data",
".",
"GetJson",
"(",
")",
".",
"encode",
"(",
")",
")",
"signature_data",
"=",
"self",
".",
"_base64encode",
"(",
"result",
".",
"signature_data",
")",
"key_handle",
"=",
"self",
".",
"_base64encode",
"(",
"result",
".",
"key_handle",
")",
"return",
"{",
"'clientData'",
":",
"client_data",
",",
"'signatureData'",
":",
"signature_data",
",",
"'applicationId'",
":",
"app_id",
",",
"'keyHandle'",
":",
"key_handle",
",",
"}",
"raise",
"errors",
".",
"U2FError",
"(",
"errors",
".",
"U2FError",
".",
"DEVICE_INELIGIBLE",
")"
] | 33.621622 | 20.189189 |
def get_type_name(t):
""" Get a human-friendly name for the given type.
:type t: type|None
:rtype: unicode
"""
# Lookup in the mapping
try:
return __type_names[t]
except KeyError:
# Specific types
if issubclass(t, six.integer_types):
return _(u'Integer number')
# Get name from the Type itself
return six.text_type(t.__name__).capitalize()
|
[
"def",
"get_type_name",
"(",
"t",
")",
":",
"# Lookup in the mapping",
"try",
":",
"return",
"__type_names",
"[",
"t",
"]",
"except",
"KeyError",
":",
"# Specific types",
"if",
"issubclass",
"(",
"t",
",",
"six",
".",
"integer_types",
")",
":",
"return",
"_",
"(",
"u'Integer number'",
")",
"# Get name from the Type itself",
"return",
"six",
".",
"text_type",
"(",
"t",
".",
"__name__",
")",
".",
"capitalize",
"(",
")"
] | 25.375 | 15.5 |
def postinit(self, lower=None, upper=None, step=None):
"""Do some setup after initialisation.
:param lower: The lower index in the slice.
:value lower: NodeNG or None
:param upper: The upper index in the slice.
:value upper: NodeNG or None
:param step: The step to take between index.
:param step: NodeNG or None
"""
self.lower = lower
self.upper = upper
self.step = step
|
[
"def",
"postinit",
"(",
"self",
",",
"lower",
"=",
"None",
",",
"upper",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"self",
".",
"lower",
"=",
"lower",
"self",
".",
"upper",
"=",
"upper",
"self",
".",
"step",
"=",
"step"
] | 29.866667 | 15 |
def _fix_path():
"""Finds the google_appengine directory and fixes Python imports to use it."""
import os
import sys
all_paths = os.environ.get('PYTHONPATH').split(os.pathsep)
for path_dir in all_paths:
dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py')
if os.path.exists(dev_appserver_path):
logging.debug('Found appengine SDK on path!')
google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path))
sys.path.append(google_appengine)
# Use the next import will fix up sys.path even further to bring in
# any dependent lib directories that the SDK needs.
dev_appserver = __import__('dev_appserver')
sys.path.extend(dev_appserver.EXTRA_PATHS)
return
|
[
"def",
"_fix_path",
"(",
")",
":",
"import",
"os",
"import",
"sys",
"all_paths",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PYTHONPATH'",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"for",
"path_dir",
"in",
"all_paths",
":",
"dev_appserver_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_dir",
",",
"'dev_appserver.py'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dev_appserver_path",
")",
":",
"logging",
".",
"debug",
"(",
"'Found appengine SDK on path!'",
")",
"google_appengine",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"dev_appserver_path",
")",
")",
"sys",
".",
"path",
".",
"append",
"(",
"google_appengine",
")",
"# Use the next import will fix up sys.path even further to bring in",
"# any dependent lib directories that the SDK needs.",
"dev_appserver",
"=",
"__import__",
"(",
"'dev_appserver'",
")",
"sys",
".",
"path",
".",
"extend",
"(",
"dev_appserver",
".",
"EXTRA_PATHS",
")",
"return"
] | 45.1875 | 17.9375 |
def _public(self, command, **params):
"""Invoke the 'command' public API with optional params."""
params['command'] = command
response = self.session.get(self._public_url, params=params)
return response
|
[
"def",
"_public",
"(",
"self",
",",
"command",
",",
"*",
"*",
"params",
")",
":",
"params",
"[",
"'command'",
"]",
"=",
"command",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"_public_url",
",",
"params",
"=",
"params",
")",
"return",
"response"
] | 46 | 10.6 |
def word_freqs() -> List[Tuple[str, int]]:
"""
Get word frequency from Thai National Corpus (TNC)
"""
lines = list(get_corpus(_FILENAME))
listword = []
for line in lines:
listindata = line.split("\t")
listword.append((listindata[0], int(listindata[1])))
return listword
|
[
"def",
"word_freqs",
"(",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"int",
"]",
"]",
":",
"lines",
"=",
"list",
"(",
"get_corpus",
"(",
"_FILENAME",
")",
")",
"listword",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"listindata",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"listword",
".",
"append",
"(",
"(",
"listindata",
"[",
"0",
"]",
",",
"int",
"(",
"listindata",
"[",
"1",
"]",
")",
")",
")",
"return",
"listword"
] | 27.636364 | 12.909091 |
def _get_access_token(self):
"""
Get IAM access token using API key.
"""
err = 'Failed to contact IAM token service'
try:
resp = super(IAMSession, self).request(
'POST',
self._token_url,
auth=self._token_auth,
headers={'Accepts': 'application/json'},
data={
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'response_type': 'cloud_iam',
'apikey': self._api_key
}
)
err = response_to_json_dict(resp).get('errorMessage', err)
resp.raise_for_status()
return response_to_json_dict(resp)['access_token']
except KeyError:
raise CloudantException('Invalid response from IAM token service')
except RequestException:
raise CloudantException(err)
|
[
"def",
"_get_access_token",
"(",
"self",
")",
":",
"err",
"=",
"'Failed to contact IAM token service'",
"try",
":",
"resp",
"=",
"super",
"(",
"IAMSession",
",",
"self",
")",
".",
"request",
"(",
"'POST'",
",",
"self",
".",
"_token_url",
",",
"auth",
"=",
"self",
".",
"_token_auth",
",",
"headers",
"=",
"{",
"'Accepts'",
":",
"'application/json'",
"}",
",",
"data",
"=",
"{",
"'grant_type'",
":",
"'urn:ibm:params:oauth:grant-type:apikey'",
",",
"'response_type'",
":",
"'cloud_iam'",
",",
"'apikey'",
":",
"self",
".",
"_api_key",
"}",
")",
"err",
"=",
"response_to_json_dict",
"(",
"resp",
")",
".",
"get",
"(",
"'errorMessage'",
",",
"err",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"response_to_json_dict",
"(",
"resp",
")",
"[",
"'access_token'",
"]",
"except",
"KeyError",
":",
"raise",
"CloudantException",
"(",
"'Invalid response from IAM token service'",
")",
"except",
"RequestException",
":",
"raise",
"CloudantException",
"(",
"err",
")"
] | 33.925926 | 17.111111 |
def from_json(cls, data):
"""Create a header from a dictionary.
Args:
data: {
"data_type": {}, //Type of data (e.g. Temperature)
"unit": string,
"analysis_period": {} // A Ladybug AnalysisPeriod
"metadata": {}, // A dictionary of metadata
}
"""
# assign default values
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
keys = ('data_type', 'unit', 'analysis_period', 'metadata')
for key in keys:
if key not in data:
data[key] = None
data_type = DataTypeBase.from_json(data['data_type'])
ap = AnalysisPeriod.from_json(data['analysis_period'])
return cls(data_type, data['unit'], ap, data['metadata'])
|
[
"def",
"from_json",
"(",
"cls",
",",
"data",
")",
":",
"# assign default values",
"assert",
"'data_type'",
"in",
"data",
",",
"'Required keyword \"data_type\" is missing!'",
"keys",
"=",
"(",
"'data_type'",
",",
"'unit'",
",",
"'analysis_period'",
",",
"'metadata'",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"not",
"in",
"data",
":",
"data",
"[",
"key",
"]",
"=",
"None",
"data_type",
"=",
"DataTypeBase",
".",
"from_json",
"(",
"data",
"[",
"'data_type'",
"]",
")",
"ap",
"=",
"AnalysisPeriod",
".",
"from_json",
"(",
"data",
"[",
"'analysis_period'",
"]",
")",
"return",
"cls",
"(",
"data_type",
",",
"data",
"[",
"'unit'",
"]",
",",
"ap",
",",
"data",
"[",
"'metadata'",
"]",
")"
] | 38 | 20.190476 |
def flatten_zip_dataset(*args):
"""A list of examples to a dataset containing mixed examples.
Given a list of `n` dataset examples, flatten them by converting
each element into a dataset and concatenating them to convert into a
single dataset.
Args:
*args: A list containing one example each from `n` different datasets.
Returns:
flattened: A new dataset containing the examples from the list as part
of a single dataset.
"""
flattened = tf.data.Dataset.from_tensors(args[0])
for ex in args[1:]:
flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex))
return flattened
|
[
"def",
"flatten_zip_dataset",
"(",
"*",
"args",
")",
":",
"flattened",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensors",
"(",
"args",
"[",
"0",
"]",
")",
"for",
"ex",
"in",
"args",
"[",
"1",
":",
"]",
":",
"flattened",
"=",
"flattened",
".",
"concatenate",
"(",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensors",
"(",
"ex",
")",
")",
"return",
"flattened"
] | 33.555556 | 24.222222 |
def pos_tag(args):
"""Tag words with their part of speech."""
tagger = POSTagger(lang=args.lang)
tag(tagger, args)
|
[
"def",
"pos_tag",
"(",
"args",
")",
":",
"tagger",
"=",
"POSTagger",
"(",
"lang",
"=",
"args",
".",
"lang",
")",
"tag",
"(",
"tagger",
",",
"args",
")"
] | 29.25 | 11.75 |
def create_platform(platform):
'''
.. versionadded:: 2019.2.0
Create a new device platform
platform
String of device platform, e.g., ``junos``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_platform junos
'''
nb_platform = get_('dcim', 'platforms', slug=slugify(platform))
if nb_platform:
return False
else:
payload = {'name': platform, 'slug': slugify(platform)}
plat = _add('dcim', 'platforms', payload)
if plat:
return {'dcim': {'platforms': payload}}
else:
return False
|
[
"def",
"create_platform",
"(",
"platform",
")",
":",
"nb_platform",
"=",
"get_",
"(",
"'dcim'",
",",
"'platforms'",
",",
"slug",
"=",
"slugify",
"(",
"platform",
")",
")",
"if",
"nb_platform",
":",
"return",
"False",
"else",
":",
"payload",
"=",
"{",
"'name'",
":",
"platform",
",",
"'slug'",
":",
"slugify",
"(",
"platform",
")",
"}",
"plat",
"=",
"_add",
"(",
"'dcim'",
",",
"'platforms'",
",",
"payload",
")",
"if",
"plat",
":",
"return",
"{",
"'dcim'",
":",
"{",
"'platforms'",
":",
"payload",
"}",
"}",
"else",
":",
"return",
"False"
] | 23.56 | 23.64 |
def kill(config, container, *args, **kwargs):
'''
Kill a running container
:type container: string
:param container: The container id to kill
:rtype: dict
:returns: boolean
'''
err = "Unknown"
client = _get_client(config)
try:
dcontainer = _get_container_infos(config, container)['Id']
if is_running(config, dcontainer):
client.kill(dcontainer)
if not is_running(config, dcontainer):
print "Container killed."
return True
else:
print "Container not running."
return True
except Exception as e:
err = e
utils.error("Unable to kill the container: %s"%err)
return False
|
[
"def",
"kill",
"(",
"config",
",",
"container",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"err",
"=",
"\"Unknown\"",
"client",
"=",
"_get_client",
"(",
"config",
")",
"try",
":",
"dcontainer",
"=",
"_get_container_infos",
"(",
"config",
",",
"container",
")",
"[",
"'Id'",
"]",
"if",
"is_running",
"(",
"config",
",",
"dcontainer",
")",
":",
"client",
".",
"kill",
"(",
"dcontainer",
")",
"if",
"not",
"is_running",
"(",
"config",
",",
"dcontainer",
")",
":",
"print",
"\"Container killed.\"",
"return",
"True",
"else",
":",
"print",
"\"Container not running.\"",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"err",
"=",
"e",
"utils",
".",
"error",
"(",
"\"Unable to kill the container: %s\"",
"%",
"err",
")",
"return",
"False"
] | 27.192308 | 17.961538 |
def _check_request_results(self, results):
"""
Check the result of each request that we made. If a failure occurred,
but some requests succeeded, log and count the failures. If all
requests failed, raise an error.
:return:
The list of responses, with a None value for any requests that
failed.
"""
responses = []
failed_endpoints = []
for index, result_tuple in enumerate(results):
success, result = result_tuple
if success:
responses.append(result)
else:
endpoint = self.endpoints[index]
self.log.failure(
'Failed to make a request to a marathon-lb instance: '
'{endpoint}', result, LogLevel.error, endpoint=endpoint)
responses.append(None)
failed_endpoints.append(endpoint)
if len(failed_endpoints) == len(self.endpoints):
raise RuntimeError(
'Failed to make a request to all marathon-lb instances')
if failed_endpoints:
self.log.error(
'Failed to make a request to {x}/{y} marathon-lb instances: '
'{endpoints}', x=len(failed_endpoints), y=len(self.endpoints),
endpoints=failed_endpoints)
return responses
|
[
"def",
"_check_request_results",
"(",
"self",
",",
"results",
")",
":",
"responses",
"=",
"[",
"]",
"failed_endpoints",
"=",
"[",
"]",
"for",
"index",
",",
"result_tuple",
"in",
"enumerate",
"(",
"results",
")",
":",
"success",
",",
"result",
"=",
"result_tuple",
"if",
"success",
":",
"responses",
".",
"append",
"(",
"result",
")",
"else",
":",
"endpoint",
"=",
"self",
".",
"endpoints",
"[",
"index",
"]",
"self",
".",
"log",
".",
"failure",
"(",
"'Failed to make a request to a marathon-lb instance: '",
"'{endpoint}'",
",",
"result",
",",
"LogLevel",
".",
"error",
",",
"endpoint",
"=",
"endpoint",
")",
"responses",
".",
"append",
"(",
"None",
")",
"failed_endpoints",
".",
"append",
"(",
"endpoint",
")",
"if",
"len",
"(",
"failed_endpoints",
")",
"==",
"len",
"(",
"self",
".",
"endpoints",
")",
":",
"raise",
"RuntimeError",
"(",
"'Failed to make a request to all marathon-lb instances'",
")",
"if",
"failed_endpoints",
":",
"self",
".",
"log",
".",
"error",
"(",
"'Failed to make a request to {x}/{y} marathon-lb instances: '",
"'{endpoints}'",
",",
"x",
"=",
"len",
"(",
"failed_endpoints",
")",
",",
"y",
"=",
"len",
"(",
"self",
".",
"endpoints",
")",
",",
"endpoints",
"=",
"failed_endpoints",
")",
"return",
"responses"
] | 38.342857 | 19.028571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.