repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
nathankw/pulsarpy
|
pulsarpy/models.py
|
https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L500-L511
|
def index(cls):
"""Fetches all records.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
"""
res = requests.get(cls.URL, headers=HEADERS, verify=False)
res.raise_for_status()
return res.json()
|
[
"def",
"index",
"(",
"cls",
")",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"cls",
".",
"URL",
",",
"headers",
"=",
"HEADERS",
",",
"verify",
"=",
"False",
")",
"res",
".",
"raise_for_status",
"(",
")",
"return",
"res",
".",
"json",
"(",
")"
] |
Fetches all records.
Returns:
`dict`. The JSON formatted response.
Raises:
`requests.exceptions.HTTPError`: The status code is not ok.
|
[
"Fetches",
"all",
"records",
"."
] |
python
|
train
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L78-L89
|
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
|
[
"def",
"format",
"(",
"self",
",",
"source",
")",
":",
"self",
".",
"_jreader",
"=",
"self",
".",
"_jreader",
".",
"format",
"(",
"source",
")",
"return",
"self"
] |
Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
|
[
"Specifies",
"the",
"input",
"data",
"source",
"format",
"."
] |
python
|
train
|
bitesofcode/projexui
|
projexui/widgets/xsplitter.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xsplitter.py#L130-L155
|
def collapseBefore( self, handle ):
"""
Collapses the splitter before the inputed handle.
:param handle | <XSplitterHandle>
"""
self.setUpdatesEnabled(False)
# collapse all items after the current handle
if ( handle.isCollapsed() ):
self.setSizes(handle.restoreSizes())
# collapse all items before the current handle
found = False
sizes = self.sizes()
handle.storeSizes(sizes)
for c in range(self.count()):
if ( self.handle(c) == handle ):
break
sizes[c] = 0
self.setSizes(sizes)
self.setUpdatesEnabled(True)
|
[
"def",
"collapseBefore",
"(",
"self",
",",
"handle",
")",
":",
"self",
".",
"setUpdatesEnabled",
"(",
"False",
")",
"# collapse all items after the current handle",
"if",
"(",
"handle",
".",
"isCollapsed",
"(",
")",
")",
":",
"self",
".",
"setSizes",
"(",
"handle",
".",
"restoreSizes",
"(",
")",
")",
"# collapse all items before the current handle",
"found",
"=",
"False",
"sizes",
"=",
"self",
".",
"sizes",
"(",
")",
"handle",
".",
"storeSizes",
"(",
"sizes",
")",
"for",
"c",
"in",
"range",
"(",
"self",
".",
"count",
"(",
")",
")",
":",
"if",
"(",
"self",
".",
"handle",
"(",
"c",
")",
"==",
"handle",
")",
":",
"break",
"sizes",
"[",
"c",
"]",
"=",
"0",
"self",
".",
"setSizes",
"(",
"sizes",
")",
"self",
".",
"setUpdatesEnabled",
"(",
"True",
")"
] |
Collapses the splitter before the inputed handle.
:param handle | <XSplitterHandle>
|
[
"Collapses",
"the",
"splitter",
"before",
"the",
"inputed",
"handle",
".",
":",
"param",
"handle",
"|",
"<XSplitterHandle",
">"
] |
python
|
train
|
MozillaSecurity/laniakea
|
laniakea/core/providers/packet/manager.py
|
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L312-L320
|
def reboot(self, devices):
"""Reboot one or more devices.
"""
for device in devices:
self.logger.info('Rebooting: %s', device.id)
try:
device.reboot()
except packet.baseapi.Error:
raise PacketManagerException('Unable to reboot instance "{}"'.format(device.id))
|
[
"def",
"reboot",
"(",
"self",
",",
"devices",
")",
":",
"for",
"device",
"in",
"devices",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Rebooting: %s'",
",",
"device",
".",
"id",
")",
"try",
":",
"device",
".",
"reboot",
"(",
")",
"except",
"packet",
".",
"baseapi",
".",
"Error",
":",
"raise",
"PacketManagerException",
"(",
"'Unable to reboot instance \"{}\"'",
".",
"format",
"(",
"device",
".",
"id",
")",
")"
] |
Reboot one or more devices.
|
[
"Reboot",
"one",
"or",
"more",
"devices",
"."
] |
python
|
train
|
jonathf/chaospy
|
chaospy/distributions/evaluation/inverse.py
|
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/evaluation/inverse.py#L42-L88
|
def evaluate_inverse(
distribution,
u_data,
cache=None,
parameters=None
):
"""
Evaluate inverse Rosenblatt transformation.
Args:
distribution (Dist):
Distribution to evaluate.
u_data (numpy.ndarray):
Locations for where evaluate inverse transformation distribution at.
parameters (:py:data:typing.Any):
Collection of parameters to override the default ones in the
distribution.
cache (:py:data:typing.Any):
A collection of previous calculations in case the same distribution
turns up on more than one occasion.
Returns:
The cumulative distribution values of ``distribution`` at location
``u_data`` using parameters ``parameters``.
"""
if cache is None:
cache = {}
out = numpy.zeros(u_data.shape)
# Distribution self know how to handle inverse Rosenblatt.
if hasattr(distribution, "_ppf"):
parameters = load_parameters(
distribution, "_ppf", parameters=parameters, cache=cache)
out[:] = distribution._ppf(u_data.copy(), **parameters)
# Approximate inverse Rosenblatt based on cumulative distribution function.
else:
from .. import approximation
parameters = load_parameters(
distribution, "_cdf", parameters=parameters, cache=cache)
out[:] = approximation.approximate_inverse(
distribution, u_data.copy(), cache=cache.copy(), parameters=parameters)
# Store cache.
cache[distribution] = out
return out
|
[
"def",
"evaluate_inverse",
"(",
"distribution",
",",
"u_data",
",",
"cache",
"=",
"None",
",",
"parameters",
"=",
"None",
")",
":",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"{",
"}",
"out",
"=",
"numpy",
".",
"zeros",
"(",
"u_data",
".",
"shape",
")",
"# Distribution self know how to handle inverse Rosenblatt.",
"if",
"hasattr",
"(",
"distribution",
",",
"\"_ppf\"",
")",
":",
"parameters",
"=",
"load_parameters",
"(",
"distribution",
",",
"\"_ppf\"",
",",
"parameters",
"=",
"parameters",
",",
"cache",
"=",
"cache",
")",
"out",
"[",
":",
"]",
"=",
"distribution",
".",
"_ppf",
"(",
"u_data",
".",
"copy",
"(",
")",
",",
"*",
"*",
"parameters",
")",
"# Approximate inverse Rosenblatt based on cumulative distribution function.",
"else",
":",
"from",
".",
".",
"import",
"approximation",
"parameters",
"=",
"load_parameters",
"(",
"distribution",
",",
"\"_cdf\"",
",",
"parameters",
"=",
"parameters",
",",
"cache",
"=",
"cache",
")",
"out",
"[",
":",
"]",
"=",
"approximation",
".",
"approximate_inverse",
"(",
"distribution",
",",
"u_data",
".",
"copy",
"(",
")",
",",
"cache",
"=",
"cache",
".",
"copy",
"(",
")",
",",
"parameters",
"=",
"parameters",
")",
"# Store cache.",
"cache",
"[",
"distribution",
"]",
"=",
"out",
"return",
"out"
] |
Evaluate inverse Rosenblatt transformation.
Args:
distribution (Dist):
Distribution to evaluate.
u_data (numpy.ndarray):
Locations for where evaluate inverse transformation distribution at.
parameters (:py:data:typing.Any):
Collection of parameters to override the default ones in the
distribution.
cache (:py:data:typing.Any):
A collection of previous calculations in case the same distribution
turns up on more than one occasion.
Returns:
The cumulative distribution values of ``distribution`` at location
``u_data`` using parameters ``parameters``.
|
[
"Evaluate",
"inverse",
"Rosenblatt",
"transformation",
"."
] |
python
|
train
|
Erotemic/utool
|
utool/util_gridsearch.py
|
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L1837-L1882
|
def grid_search_generator(grid_basis=[], *args, **kwargs):
r"""
Iteratively yeilds individual configuration points
inside a defined basis.
Args:
grid_basis (list): a list of 2-component tuple. The named tuple looks
like this:
CommandLine:
python -m utool.util_gridsearch --test-grid_search_generator
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> grid_basis = [
... DimensionBasis('dim1', [.1, .2, .3]),
... DimensionBasis('dim2', [.1, .4, .5]),
... ]
>>> args = tuple()
>>> kwargs = {}
>>> # execute function
>>> point_list = list(grid_search_generator(grid_basis))
>>> # verify results
>>> column_lbls = ut.get_list_column(grid_basis, 0)
>>> column_list = ut.get_list_column(grid_basis, 1)
>>> first_vals = ut.get_list_column(ut.get_list_column(grid_basis, 1), 0)
>>> column_types = list(map(type, first_vals))
>>> header = 'grid search'
>>> result = ut.make_csv_table(column_list, column_lbls, header, column_types)
>>> print(result)
grid search
# num_rows=3
# dim1, dim2
0.10, 0.10
0.20, 0.40
0.30, 0.50
"""
grid_basis_ = grid_basis + list(args) + list(kwargs.items())
grid_basis_dict = OrderedDict(grid_basis_)
grid_point_iter = util_dict.iter_all_dict_combinations_ordered(grid_basis_dict)
for grid_point in grid_point_iter:
yield grid_point
|
[
"def",
"grid_search_generator",
"(",
"grid_basis",
"=",
"[",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"grid_basis_",
"=",
"grid_basis",
"+",
"list",
"(",
"args",
")",
"+",
"list",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
"grid_basis_dict",
"=",
"OrderedDict",
"(",
"grid_basis_",
")",
"grid_point_iter",
"=",
"util_dict",
".",
"iter_all_dict_combinations_ordered",
"(",
"grid_basis_dict",
")",
"for",
"grid_point",
"in",
"grid_point_iter",
":",
"yield",
"grid_point"
] |
r"""
Iteratively yeilds individual configuration points
inside a defined basis.
Args:
grid_basis (list): a list of 2-component tuple. The named tuple looks
like this:
CommandLine:
python -m utool.util_gridsearch --test-grid_search_generator
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> grid_basis = [
... DimensionBasis('dim1', [.1, .2, .3]),
... DimensionBasis('dim2', [.1, .4, .5]),
... ]
>>> args = tuple()
>>> kwargs = {}
>>> # execute function
>>> point_list = list(grid_search_generator(grid_basis))
>>> # verify results
>>> column_lbls = ut.get_list_column(grid_basis, 0)
>>> column_list = ut.get_list_column(grid_basis, 1)
>>> first_vals = ut.get_list_column(ut.get_list_column(grid_basis, 1), 0)
>>> column_types = list(map(type, first_vals))
>>> header = 'grid search'
>>> result = ut.make_csv_table(column_list, column_lbls, header, column_types)
>>> print(result)
grid search
# num_rows=3
# dim1, dim2
0.10, 0.10
0.20, 0.40
0.30, 0.50
|
[
"r",
"Iteratively",
"yeilds",
"individual",
"configuration",
"points",
"inside",
"a",
"defined",
"basis",
"."
] |
python
|
train
|
HazyResearch/fonduer
|
src/fonduer/utils/data_model_utils/visual.py
|
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L168-L189
|
def get_horz_ngrams(
mention, attrib="words", n_min=1, n_max=1, lower=True, from_sentence=True
):
"""Return all ngrams which are visually horizontally aligned with the Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_sentence: If True, returns ngrams from any horizontally aligned
Sentences, rather than just horizontally aligned ngrams themselves.
:rtype: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in _get_direction_ngrams(
"horz", span, attrib, n_min, n_max, lower, from_sentence
):
yield ngram
|
[
"def",
"get_horz_ngrams",
"(",
"mention",
",",
"attrib",
"=",
"\"words\"",
",",
"n_min",
"=",
"1",
",",
"n_max",
"=",
"1",
",",
"lower",
"=",
"True",
",",
"from_sentence",
"=",
"True",
")",
":",
"spans",
"=",
"_to_spans",
"(",
"mention",
")",
"for",
"span",
"in",
"spans",
":",
"for",
"ngram",
"in",
"_get_direction_ngrams",
"(",
"\"horz\"",
",",
"span",
",",
"attrib",
",",
"n_min",
",",
"n_max",
",",
"lower",
",",
"from_sentence",
")",
":",
"yield",
"ngram"
] |
Return all ngrams which are visually horizontally aligned with the Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_sentence: If True, returns ngrams from any horizontally aligned
Sentences, rather than just horizontally aligned ngrams themselves.
:rtype: a *generator* of ngrams
|
[
"Return",
"all",
"ngrams",
"which",
"are",
"visually",
"horizontally",
"aligned",
"with",
"the",
"Mention",
"."
] |
python
|
train
|
Gandi/pyramid_kvs
|
pyramid_kvs/perlsess.py
|
https://github.com/Gandi/pyramid_kvs/blob/36285f2e50d8181428f383f6fc1d79a34ea9ac3c/pyramid_kvs/perlsess.py#L31-L38
|
def connect(cls, settings):
""" Call that method in the pyramid configuration phase.
"""
server = serializer('json').loads(settings['kvs.perlsess'])
server.setdefault('key_prefix', 'perlsess::')
server.setdefault('codec', 'storable')
cls.cookie_name = server.pop('cookie_name', 'session_id')
cls.client = KVS(**server)
|
[
"def",
"connect",
"(",
"cls",
",",
"settings",
")",
":",
"server",
"=",
"serializer",
"(",
"'json'",
")",
".",
"loads",
"(",
"settings",
"[",
"'kvs.perlsess'",
"]",
")",
"server",
".",
"setdefault",
"(",
"'key_prefix'",
",",
"'perlsess::'",
")",
"server",
".",
"setdefault",
"(",
"'codec'",
",",
"'storable'",
")",
"cls",
".",
"cookie_name",
"=",
"server",
".",
"pop",
"(",
"'cookie_name'",
",",
"'session_id'",
")",
"cls",
".",
"client",
"=",
"KVS",
"(",
"*",
"*",
"server",
")"
] |
Call that method in the pyramid configuration phase.
|
[
"Call",
"that",
"method",
"in",
"the",
"pyramid",
"configuration",
"phase",
"."
] |
python
|
test
|
resync/resync
|
resync/list_base_with_index.py
|
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/list_base_with_index.py#L242-L270
|
def as_xml_part(self, basename="/tmp/sitemap.xml", part_number=0):
"""Return a string of component sitemap number part_number.
Used in the case of a large list that is split into component
sitemaps.
basename is used to create "index" links to the sitemapindex
Q - what timestamp should be used?
"""
if (not self.requires_multifile()):
raise ListBaseIndexError(
"Request for component sitemap for list with only %d entries when max_sitemap_entries is set to %s" %
(len(self), str(
self.max_sitemap_entries)))
start = part_number * self.max_sitemap_entries
if (start > len(self)):
raise ListBaseIndexError(
"Request for component sitemap with part_number too high, would start at entry %d yet the list has only %d entries" %
(start, len(self)))
stop = start + self.max_sitemap_entries
if (stop > len(self)):
stop = len(self)
part = ListBase(itertools.islice(self.resources, start, stop))
part.capability_name = self.capability_name
part.default_capability()
part.index = basename
s = self.new_sitemap()
return(s.resources_as_xml(part))
|
[
"def",
"as_xml_part",
"(",
"self",
",",
"basename",
"=",
"\"/tmp/sitemap.xml\"",
",",
"part_number",
"=",
"0",
")",
":",
"if",
"(",
"not",
"self",
".",
"requires_multifile",
"(",
")",
")",
":",
"raise",
"ListBaseIndexError",
"(",
"\"Request for component sitemap for list with only %d entries when max_sitemap_entries is set to %s\"",
"%",
"(",
"len",
"(",
"self",
")",
",",
"str",
"(",
"self",
".",
"max_sitemap_entries",
")",
")",
")",
"start",
"=",
"part_number",
"*",
"self",
".",
"max_sitemap_entries",
"if",
"(",
"start",
">",
"len",
"(",
"self",
")",
")",
":",
"raise",
"ListBaseIndexError",
"(",
"\"Request for component sitemap with part_number too high, would start at entry %d yet the list has only %d entries\"",
"%",
"(",
"start",
",",
"len",
"(",
"self",
")",
")",
")",
"stop",
"=",
"start",
"+",
"self",
".",
"max_sitemap_entries",
"if",
"(",
"stop",
">",
"len",
"(",
"self",
")",
")",
":",
"stop",
"=",
"len",
"(",
"self",
")",
"part",
"=",
"ListBase",
"(",
"itertools",
".",
"islice",
"(",
"self",
".",
"resources",
",",
"start",
",",
"stop",
")",
")",
"part",
".",
"capability_name",
"=",
"self",
".",
"capability_name",
"part",
".",
"default_capability",
"(",
")",
"part",
".",
"index",
"=",
"basename",
"s",
"=",
"self",
".",
"new_sitemap",
"(",
")",
"return",
"(",
"s",
".",
"resources_as_xml",
"(",
"part",
")",
")"
] |
Return a string of component sitemap number part_number.
Used in the case of a large list that is split into component
sitemaps.
basename is used to create "index" links to the sitemapindex
Q - what timestamp should be used?
|
[
"Return",
"a",
"string",
"of",
"component",
"sitemap",
"number",
"part_number",
"."
] |
python
|
train
|
devassistant/devassistant
|
devassistant/assistant_base.py
|
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/assistant_base.py#L33-L49
|
def get_subassistants(self):
"""Return list of instantiated subassistants.
Usually, this needs not be overriden in subclasses, you should just override
get_subassistant_classes
Returns:
list of instantiated subassistants
"""
if not hasattr(self, '_subassistants'):
self._subassistants = []
# we want to know, if type(self) defines 'get_subassistant_classes',
# we don't want to inherit it from superclass (would cause recursion)
if 'get_subassistant_classes' in vars(type(self)):
for a in self.get_subassistant_classes():
self._subassistants.append(a())
return self._subassistants
|
[
"def",
"get_subassistants",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_subassistants'",
")",
":",
"self",
".",
"_subassistants",
"=",
"[",
"]",
"# we want to know, if type(self) defines 'get_subassistant_classes',",
"# we don't want to inherit it from superclass (would cause recursion)",
"if",
"'get_subassistant_classes'",
"in",
"vars",
"(",
"type",
"(",
"self",
")",
")",
":",
"for",
"a",
"in",
"self",
".",
"get_subassistant_classes",
"(",
")",
":",
"self",
".",
"_subassistants",
".",
"append",
"(",
"a",
"(",
")",
")",
"return",
"self",
".",
"_subassistants"
] |
Return list of instantiated subassistants.
Usually, this needs not be overriden in subclasses, you should just override
get_subassistant_classes
Returns:
list of instantiated subassistants
|
[
"Return",
"list",
"of",
"instantiated",
"subassistants",
"."
] |
python
|
train
|
PyCQA/pylint
|
pylint/checkers/base.py
|
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/base.py#L2214-L2233
|
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
|
[
"def",
"_check_type_x_is_y",
"(",
"self",
",",
"node",
",",
"left",
",",
"operator",
",",
"right",
")",
":",
"left_func",
"=",
"utils",
".",
"safe_infer",
"(",
"left",
".",
"func",
")",
"if",
"not",
"(",
"isinstance",
"(",
"left_func",
",",
"astroid",
".",
"ClassDef",
")",
"and",
"left_func",
".",
"qname",
"(",
")",
"==",
"TYPE_QNAME",
")",
":",
"return",
"if",
"operator",
"in",
"(",
"\"is\"",
",",
"\"is not\"",
")",
"and",
"_is_one_arg_pos_call",
"(",
"right",
")",
":",
"right_func",
"=",
"utils",
".",
"safe_infer",
"(",
"right",
".",
"func",
")",
"if",
"(",
"isinstance",
"(",
"right_func",
",",
"astroid",
".",
"ClassDef",
")",
"and",
"right_func",
".",
"qname",
"(",
")",
"==",
"TYPE_QNAME",
")",
":",
"# type(x) == type(a)",
"right_arg",
"=",
"utils",
".",
"safe_infer",
"(",
"right",
".",
"args",
"[",
"0",
"]",
")",
"if",
"not",
"isinstance",
"(",
"right_arg",
",",
"LITERAL_NODE_TYPES",
")",
":",
"# not e.g. type(x) == type([])",
"return",
"self",
".",
"add_message",
"(",
"\"unidiomatic-typecheck\"",
",",
"node",
"=",
"node",
")"
] |
Check for expressions like type(x) == Y.
|
[
"Check",
"for",
"expressions",
"like",
"type",
"(",
"x",
")",
"==",
"Y",
"."
] |
python
|
test
|
underworldcode/stripy
|
stripy-src/stripy/cartesian.py
|
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L1082-L1117
|
def nearest_vertices(self, x, y, k=1, max_distance=np.inf ):
"""
Query the cKDtree for the nearest neighbours and Euclidean
distance from x,y points.
Returns 0, 0 if a cKDtree has not been constructed
(switch tree=True if you need this routine)
Parameters
----------
x : 1D array of Cartesian x coordinates
y : 1D array of Cartesian y coordinates
k : number of nearest neighbours to return
(default: 1)
max_distance : maximum Euclidean distance to search
for neighbours (default: inf)
Returns
-------
d : Euclidean distance between each point and their
nearest neighbour(s)
vert : vertices of the nearest neighbour(s)
"""
if self.tree == False or self.tree == None:
return 0, 0
xy = np.column_stack([x, y])
dxy, vertices = self._cKDtree.query(xy, k=k, distance_upper_bound=max_distance)
if k == 1: # force this to be a 2D array
vertices = np.reshape(vertices, (-1, 1))
return dxy, vertices
|
[
"def",
"nearest_vertices",
"(",
"self",
",",
"x",
",",
"y",
",",
"k",
"=",
"1",
",",
"max_distance",
"=",
"np",
".",
"inf",
")",
":",
"if",
"self",
".",
"tree",
"==",
"False",
"or",
"self",
".",
"tree",
"==",
"None",
":",
"return",
"0",
",",
"0",
"xy",
"=",
"np",
".",
"column_stack",
"(",
"[",
"x",
",",
"y",
"]",
")",
"dxy",
",",
"vertices",
"=",
"self",
".",
"_cKDtree",
".",
"query",
"(",
"xy",
",",
"k",
"=",
"k",
",",
"distance_upper_bound",
"=",
"max_distance",
")",
"if",
"k",
"==",
"1",
":",
"# force this to be a 2D array",
"vertices",
"=",
"np",
".",
"reshape",
"(",
"vertices",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"return",
"dxy",
",",
"vertices"
] |
Query the cKDtree for the nearest neighbours and Euclidean
distance from x,y points.
Returns 0, 0 if a cKDtree has not been constructed
(switch tree=True if you need this routine)
Parameters
----------
x : 1D array of Cartesian x coordinates
y : 1D array of Cartesian y coordinates
k : number of nearest neighbours to return
(default: 1)
max_distance : maximum Euclidean distance to search
for neighbours (default: inf)
Returns
-------
d : Euclidean distance between each point and their
nearest neighbour(s)
vert : vertices of the nearest neighbour(s)
|
[
"Query",
"the",
"cKDtree",
"for",
"the",
"nearest",
"neighbours",
"and",
"Euclidean",
"distance",
"from",
"x",
"y",
"points",
"."
] |
python
|
train
|
saulpw/visidata
|
visidata/vdtui.py
|
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L1306-L1310
|
def column(self, colregex):
'Return first column whose Column.name matches colregex.'
for c in self.columns:
if re.search(colregex, c.name, regex_flags()):
return c
|
[
"def",
"column",
"(",
"self",
",",
"colregex",
")",
":",
"for",
"c",
"in",
"self",
".",
"columns",
":",
"if",
"re",
".",
"search",
"(",
"colregex",
",",
"c",
".",
"name",
",",
"regex_flags",
"(",
")",
")",
":",
"return",
"c"
] |
Return first column whose Column.name matches colregex.
|
[
"Return",
"first",
"column",
"whose",
"Column",
".",
"name",
"matches",
"colregex",
"."
] |
python
|
train
|
GPflow/GPflow
|
gpflow/training/scipy_optimizer.py
|
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/scipy_optimizer.py#L54-L91
|
def minimize(self, model, session=None, var_list=None, feed_dict=None, maxiter=1000,
disp=False, initialize=False, anchor=True, step_callback=None, **kwargs):
"""
Minimizes objective function of the model.
:param model: GPflow model with objective tensor.
:param session: Session where optimization will be run.
:param var_list: List of extra variables which should be trained during optimization.
:param feed_dict: Feed dictionary of tensors passed to session run method.
:param maxiter: Number of run interation. Note: scipy optimizer can do early stopping
if model converged.
:param disp: ScipyOptimizer option. Set to True to print convergence messages.
:param initialize: If `True` model parameters will be re-initialized even if they were
initialized before for gotten session.
:param anchor: If `True` trained parameters computed during optimization at
particular session will be synchronized with internal parameter values.
:param step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
flattened into a single vector.
:type step_callback: Callable[[np.ndarray], None]
:param kwargs: This is a dictionary of extra parameters for session run method.
"""
if model is None or not isinstance(model, Model):
raise ValueError('Unknown type passed for optimization.')
if model.is_built_coherence() is Build.NO:
raise GPflowError('Model is not built.')
session = model.enquire_session(session)
self._model = model
optimizer = self.make_optimize_tensor(model, session,
var_list=var_list, maxiter=maxiter, disp=disp)
self._optimizer = optimizer
feed_dict = self._gen_feed_dict(model, feed_dict)
optimizer.minimize(session=session, feed_dict=feed_dict, step_callback=step_callback,
**kwargs)
if anchor:
model.anchor(session)
|
[
"def",
"minimize",
"(",
"self",
",",
"model",
",",
"session",
"=",
"None",
",",
"var_list",
"=",
"None",
",",
"feed_dict",
"=",
"None",
",",
"maxiter",
"=",
"1000",
",",
"disp",
"=",
"False",
",",
"initialize",
"=",
"False",
",",
"anchor",
"=",
"True",
",",
"step_callback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"model",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"model",
",",
"Model",
")",
":",
"raise",
"ValueError",
"(",
"'Unknown type passed for optimization.'",
")",
"if",
"model",
".",
"is_built_coherence",
"(",
")",
"is",
"Build",
".",
"NO",
":",
"raise",
"GPflowError",
"(",
"'Model is not built.'",
")",
"session",
"=",
"model",
".",
"enquire_session",
"(",
"session",
")",
"self",
".",
"_model",
"=",
"model",
"optimizer",
"=",
"self",
".",
"make_optimize_tensor",
"(",
"model",
",",
"session",
",",
"var_list",
"=",
"var_list",
",",
"maxiter",
"=",
"maxiter",
",",
"disp",
"=",
"disp",
")",
"self",
".",
"_optimizer",
"=",
"optimizer",
"feed_dict",
"=",
"self",
".",
"_gen_feed_dict",
"(",
"model",
",",
"feed_dict",
")",
"optimizer",
".",
"minimize",
"(",
"session",
"=",
"session",
",",
"feed_dict",
"=",
"feed_dict",
",",
"step_callback",
"=",
"step_callback",
",",
"*",
"*",
"kwargs",
")",
"if",
"anchor",
":",
"model",
".",
"anchor",
"(",
"session",
")"
] |
Minimizes objective function of the model.
:param model: GPflow model with objective tensor.
:param session: Session where optimization will be run.
:param var_list: List of extra variables which should be trained during optimization.
:param feed_dict: Feed dictionary of tensors passed to session run method.
:param maxiter: Number of run interation. Note: scipy optimizer can do early stopping
if model converged.
:param disp: ScipyOptimizer option. Set to True to print convergence messages.
:param initialize: If `True` model parameters will be re-initialized even if they were
initialized before for gotten session.
:param anchor: If `True` trained parameters computed during optimization at
particular session will be synchronized with internal parameter values.
:param step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
flattened into a single vector.
:type step_callback: Callable[[np.ndarray], None]
:param kwargs: This is a dictionary of extra parameters for session run method.
|
[
"Minimizes",
"objective",
"function",
"of",
"the",
"model",
"."
] |
python
|
train
|
google/grumpy
|
third_party/stdlib/threading.py
|
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L373-L398
|
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
|
[
"def",
"notify",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"if",
"not",
"self",
".",
"_is_owned",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"cannot notify on un-acquired lock\"",
")",
"__waiters",
"=",
"self",
".",
"__waiters",
"waiters",
"=",
"__waiters",
"[",
":",
"n",
"]",
"if",
"not",
"waiters",
":",
"if",
"__debug__",
":",
"self",
".",
"_note",
"(",
"\"%s.notify(): no waiters\"",
",",
"self",
")",
"return",
"self",
".",
"_note",
"(",
"\"%s.notify(): notifying %d waiter%s\"",
",",
"self",
",",
"n",
",",
"n",
"!=",
"1",
"and",
"\"s\"",
"or",
"\"\"",
")",
"for",
"waiter",
"in",
"waiters",
":",
"waiter",
".",
"release",
"(",
")",
"try",
":",
"__waiters",
".",
"remove",
"(",
"waiter",
")",
"except",
"ValueError",
":",
"pass"
] |
Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
|
[
"Wake",
"up",
"one",
"or",
"more",
"threads",
"waiting",
"on",
"this",
"condition",
"if",
"any",
"."
] |
python
|
valid
|
Opentrons/opentrons
|
update-server/otupdate/buildroot/ssh_key_management.py
|
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/update-server/otupdate/buildroot/ssh_key_management.py#L18-L45
|
def require_linklocal(handler):
""" Ensure the decorated is only called if the request is linklocal.
The host ip address should be in the X-Host-IP header (provided by nginx)
"""
@functools.wraps(handler)
async def decorated(request: web.Request) -> web.Response:
ipaddr_str = request.headers.get('x-host-ip')
invalid_req_data = {
'error': 'bad-interface',
'message': f'The endpoint {request.url} can only be used from '
'local connections'
}
if not ipaddr_str:
return web.json_response(
data=invalid_req_data,
status=403)
try:
addr = ipaddress.ip_address(ipaddr_str)
except ValueError:
LOG.exception(f"Couldn't parse host ip address {ipaddr_str}")
raise
if not addr.is_link_local:
return web.json_response(data=invalid_req_data, status=403)
return await handler(request)
return decorated
|
[
"def",
"require_linklocal",
"(",
"handler",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"handler",
")",
"async",
"def",
"decorated",
"(",
"request",
":",
"web",
".",
"Request",
")",
"->",
"web",
".",
"Response",
":",
"ipaddr_str",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'x-host-ip'",
")",
"invalid_req_data",
"=",
"{",
"'error'",
":",
"'bad-interface'",
",",
"'message'",
":",
"f'The endpoint {request.url} can only be used from '",
"'local connections'",
"}",
"if",
"not",
"ipaddr_str",
":",
"return",
"web",
".",
"json_response",
"(",
"data",
"=",
"invalid_req_data",
",",
"status",
"=",
"403",
")",
"try",
":",
"addr",
"=",
"ipaddress",
".",
"ip_address",
"(",
"ipaddr_str",
")",
"except",
"ValueError",
":",
"LOG",
".",
"exception",
"(",
"f\"Couldn't parse host ip address {ipaddr_str}\"",
")",
"raise",
"if",
"not",
"addr",
".",
"is_link_local",
":",
"return",
"web",
".",
"json_response",
"(",
"data",
"=",
"invalid_req_data",
",",
"status",
"=",
"403",
")",
"return",
"await",
"handler",
"(",
"request",
")",
"return",
"decorated"
] |
Ensure the decorated is only called if the request is linklocal.
The host ip address should be in the X-Host-IP header (provided by nginx)
|
[
"Ensure",
"the",
"decorated",
"is",
"only",
"called",
"if",
"the",
"request",
"is",
"linklocal",
"."
] |
python
|
train
|
nephila/djangocms-installer
|
djangocms_installer/config/__init__.py
|
https://github.com/nephila/djangocms-installer/blob/9fec66d5f8b1e9a0f3c0ec66dd777db578fab07e/djangocms_installer/config/__init__.py#L20-L337
|
def parse(args):
"""
Define the available arguments
"""
from tzlocal import get_localzone
try:
timezone = get_localzone()
if isinstance(timezone, pytz.BaseTzInfo):
timezone = timezone.zone
except Exception: # pragma: no cover
timezone = 'UTC'
if timezone == 'local':
timezone = 'UTC'
parser = argparse.ArgumentParser(description="""Bootstrap a django CMS project.
Major usage modes:
- wizard: djangocms -w -p /path/whatever project_name: ask for all the options through a
CLI wizard.
- batch: djangocms project_name: runs with the default values plus any
additional option provided (see below) with no question asked.
- config file: djangocms_installer --config-file /path/to/config.ini project_name: reads values
from an ini-style config file.
Check https://djangocms-installer.readthedocs.io/en/latest/usage.html for detailed usage
information.
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--config-file', dest='config_file', action='store',
default=None,
help='Configuration file for djangocms_installer')
parser.add_argument('--config-dump', dest='config_dump', action='store',
default=None,
help='Dump configuration file with current args')
parser.add_argument('--db', '-d', dest='db', action=DbAction,
default='sqlite://localhost/project.db',
help='Database configuration (in URL format). '
'Example: sqlite://localhost/project.db')
parser.add_argument('--i18n', '-i', dest='i18n', action='store',
choices=('yes', 'no'),
default='yes', help='Activate Django I18N / L10N setting; this is '
'automatically activated if more than '
'language is provided')
parser.add_argument('--use-tz', '-z', dest='use_timezone', action='store',
choices=('yes', 'no'),
default='yes', help='Activate Django timezone support')
parser.add_argument('--timezone', '-t', dest='timezone',
required=False, default=timezone,
action='store', help='Optional default time zone. Example: Europe/Rome')
parser.add_argument('--reversion', '-e', dest='reversion', action='store',
choices=('yes', 'no'),
default='yes', help='Install and configure reversion support '
'(only for django CMS 3.2 and 3.3)')
parser.add_argument('--permissions', dest='permissions', action='store',
choices=('yes', 'no'),
default='no', help='Activate CMS permission management')
parser.add_argument('--pip-options', help='pass custom pip options', default='')
parser.add_argument('--languages', '-l', dest='languages', action='append',
help='Languages to enable. Option can be provided multiple times, or as a '
'comma separated list. Only language codes supported by Django can '
'be used here. Example: en, fr-FR, it-IT')
parser.add_argument('--django-version', dest='django_version', action='store',
choices=data.DJANGO_SUPPORTED,
default=data.DJANGO_DEFAULT, help='Django version')
parser.add_argument('--cms-version', '-v', dest='cms_version', action='store',
choices=data.DJANGOCMS_SUPPORTED,
default=data.DJANGOCMS_DEFAULT, help='django CMS version')
parser.add_argument('--parent-dir', '-p', dest='project_directory',
default='',
action='store', help='Optional project parent directory')
parser.add_argument('--bootstrap', dest='bootstrap', action='store',
choices=('yes', 'no'),
default='no', help='Use Twitter Bootstrap Theme')
parser.add_argument('--templates', dest='templates', action='store',
default='no', help='Use custom template set')
parser.add_argument('--starting-page', dest='starting_page', action='store',
choices=('yes', 'no'),
default='no', help='Load a starting page with examples after installation '
'(english language only). Choose "no" if you use a '
'custom template set.')
parser.add_argument(dest='project_name', action='store',
help='Name of the project to be created')
# Command that lists the supported plugins in verbose description
parser.add_argument('--list-plugins', '-P', dest='plugins', action='store_true',
help='List plugins that\'s going to be installed and configured')
# Command that lists the supported plugins in verbose description
parser.add_argument('--dump-requirements', '-R', dest='dump_reqs', action='store_true',
help='It dumps the requirements that would be installed according to '
'parameters given. Together with --requirements argument is useful '
'for customizing the virtualenv')
# Advanced options. These have a predefined default and are not asked
# by config wizard.
parser.add_argument('--no-input', '-q', dest='noinput', action='store_true',
default=True, help='Don\'t run the configuration wizard, just use the '
'provided values')
parser.add_argument('--wizard', '-w', dest='wizard', action='store_true',
default=False, help='Run the configuration wizard')
parser.add_argument('--verbose', dest='verbose', action='store_true',
default=False,
help='Be more verbose and don\'t swallow subcommands output')
parser.add_argument('--filer', '-f', dest='filer', action='store_true',
default=True, help='Install and configure django-filer plugins '
'- Always enabled')
parser.add_argument('--requirements', '-r', dest='requirements_file', action='store',
default=None, help='Externally defined requirements file')
parser.add_argument('--no-deps', '-n', dest='no_deps', action='store_true',
default=False, help='Don\'t install package dependencies')
parser.add_argument('--no-plugins', dest='no_plugins', action='store_true',
default=False, help='Don\'t install plugins')
parser.add_argument('--no-db-driver', dest='no_db_driver', action='store_true',
default=False, help='Don\'t install database package')
parser.add_argument('--no-sync', '-m', dest='no_sync', action='store_true',
default=False, help='Don\'t run syncdb / migrate after bootstrapping')
parser.add_argument('--no-user', '-u', dest='no_user', action='store_true',
default=False, help='Don\'t create the admin user')
parser.add_argument('--template', dest='template', action='store',
default=None, help='The path or URL to load the django project '
'template from.')
parser.add_argument('--extra-settings', dest='extra_settings', action='store',
default=None, help='The path to an file that contains extra settings.')
parser.add_argument('--skip-empty-check', '-s', dest='skip_project_dir_check',
action='store_true',
default=False, help='Skip the check if project dir is empty.')
parser.add_argument('--delete-project-dir', '-c', dest='delete_project_dir',
action='store_true',
default=False, help='Delete project directory on creation failure.')
parser.add_argument('--utc', dest='utc',
action='store_true',
default=False, help='Use UTC timezone.')
if '--utc' in args:
for action in parser._positionals._actions:
if action.dest == 'timezone':
action.default = 'UTC'
# If config_args then pretend that config args came from the stdin and run parser again.
config_args = ini.parse_config_file(parser, args)
args = parser.parse_args(config_args + args)
if not args.wizard:
args.noinput = True
else:
args.noinput = False
if not args.project_directory:
args.project_directory = args.project_name
args.project_directory = os.path.abspath(args.project_directory)
# First of all, check if the project name is valid
if not validate_project(args.project_name):
sys.stderr.write(
'Project name "{0}" is not a valid app name, or it\'s already defined. '
'Please use only numbers, letters and underscores.\n'.format(args.project_name)
)
sys.exit(3)
# Checking the given path
setattr(args, 'project_path', os.path.join(args.project_directory, args.project_name).strip())
if not args.skip_project_dir_check:
if (os.path.exists(args.project_directory) and
[path for path in os.listdir(args.project_directory) if not path.startswith('.')]):
sys.stderr.write(
'Path "{0}" already exists and is not empty, please choose a different one\n'
'If you want to use this path anyway use the -s flag to skip this check.\n'
''.format(args.project_directory)
)
sys.exit(4)
if os.path.exists(args.project_path):
sys.stderr.write(
'Path "{0}" already exists, please choose a different one\n'.format(args.project_path)
)
sys.exit(4)
if args.config_dump and os.path.isfile(args.config_dump):
sys.stdout.write(
'Cannot dump because given configuration file "{0}" exists.\n'.format(args.config_dump)
)
sys.exit(8)
args = _manage_args(parser, args)
# what do we want here?!
# * if languages are given as multiple arguments, let's use it as is
# * if no languages are given, use a default and stop handling it further
# * if languages are given as a comma-separated list, split it and use the
# resulting list.
if not args.languages:
try:
args.languages = [locale.getdefaultlocale()[0].split('_')[0]]
except Exception: # pragma: no cover
args.languages = ['en']
elif isinstance(args.languages, six.string_types):
args.languages = args.languages.split(',')
elif len(args.languages) == 1 and isinstance(args.languages[0], six.string_types):
args.languages = args.languages[0].split(',')
args.languages = [lang.strip().lower() for lang in args.languages]
if len(args.languages) > 1:
args.i18n = 'yes'
args.aldryn = False
args.filer = True
# Convert version to numeric format for easier checking
try:
django_version, cms_version = supported_versions(args.django_version, args.cms_version)
cms_package = data.PACKAGE_MATRIX.get(
cms_version, data.PACKAGE_MATRIX[data.DJANGOCMS_LTS]
)
except RuntimeError as e: # pragma: no cover
sys.stderr.write(compat.unicode(e))
sys.exit(6)
if django_version is None: # pragma: no cover
sys.stderr.write(
'Please provide a Django supported version: {0}. Only Major.Minor '
'version selector is accepted\n'.format(', '.join(data.DJANGO_SUPPORTED))
)
sys.exit(6)
if cms_version is None: # pragma: no cover
sys.stderr.write(
'Please provide a django CMS supported version: {0}. Only Major.Minor '
'version selector is accepted\n'.format(', '.join(data.DJANGOCMS_SUPPORTED))
)
sys.exit(6)
default_settings = '{}.settings'.format(args.project_name)
env_settings = os.environ.get('DJANGO_SETTINGS_MODULE', default_settings)
if env_settings != default_settings:
sys.stderr.write(
'`DJANGO_SETTINGS_MODULE` is currently set to \'{0}\' which is not compatible with '
'djangocms installer.\nPlease unset `DJANGO_SETTINGS_MODULE` and re-run the installer '
'\n'.format(env_settings)
)
sys.exit(10)
if not getattr(args, 'requirements_file'):
requirements = []
# django CMS version check
if args.cms_version == 'develop':
requirements.append(cms_package)
warnings.warn(data.VERSION_WARNING.format('develop', 'django CMS'))
elif args.cms_version == 'rc': # pragma: no cover
requirements.append(cms_package)
elif args.cms_version == 'beta': # pragma: no cover
requirements.append(cms_package)
warnings.warn(data.VERSION_WARNING.format('beta', 'django CMS'))
else:
requirements.append(cms_package)
if args.cms_version in ('rc', 'develop'):
requirements.extend(data.REQUIREMENTS['cms-master'])
elif LooseVersion(cms_version) >= LooseVersion('3.6'):
requirements.extend(data.REQUIREMENTS['cms-3.6'])
elif LooseVersion(cms_version) >= LooseVersion('3.5'):
requirements.extend(data.REQUIREMENTS['cms-3.5'])
elif LooseVersion(cms_version) >= LooseVersion('3.4'):
requirements.extend(data.REQUIREMENTS['cms-3.4'])
if not args.no_db_driver:
requirements.append(args.db_driver)
if not args.no_plugins:
if args.cms_version in ('rc', 'develop'):
requirements.extend(data.REQUIREMENTS['plugins-master'])
elif LooseVersion(cms_version) >= LooseVersion('3.6'):
requirements.extend(data.REQUIREMENTS['plugins-3.6'])
elif LooseVersion(cms_version) >= LooseVersion('3.5'):
requirements.extend(data.REQUIREMENTS['plugins-3.5'])
elif LooseVersion(cms_version) >= LooseVersion('3.4'):
requirements.extend(data.REQUIREMENTS['plugins-3.4'])
requirements.extend(data.REQUIREMENTS['filer'])
if args.aldryn: # pragma: no cover
requirements.extend(data.REQUIREMENTS['aldryn'])
# Django version check
if args.django_version == 'develop': # pragma: no cover
requirements.append(data.DJANGO_DEVELOP)
warnings.warn(data.VERSION_WARNING.format('develop', 'Django'))
elif args.django_version == 'beta': # pragma: no cover
requirements.append(data.DJANGO_BETA)
warnings.warn(data.VERSION_WARNING.format('beta', 'Django'))
else:
requirements.append('Django<{0}'.format(less_than_version(django_version)))
if django_version == '1.8':
requirements.extend(data.REQUIREMENTS['django-1.8'])
elif django_version == '1.9':
requirements.extend(data.REQUIREMENTS['django-1.9'])
elif django_version == '1.10':
requirements.extend(data.REQUIREMENTS['django-1.10'])
elif django_version == '1.11':
requirements.extend(data.REQUIREMENTS['django-1.11'])
elif django_version == '2.0':
requirements.extend(data.REQUIREMENTS['django-2.0'])
elif django_version == '2.1':
requirements.extend(data.REQUIREMENTS['django-2.1'])
requirements.extend(data.REQUIREMENTS['default'])
setattr(args, 'requirements', '\n'.join(requirements).strip())
# Convenient shortcuts
setattr(args, 'cms_version', cms_version)
setattr(args, 'django_version', django_version)
setattr(args, 'settings_path',
os.path.join(args.project_directory, args.project_name, 'settings.py').strip())
setattr(args, 'urlconf_path',
os.path.join(args.project_directory, args.project_name, 'urls.py').strip())
if args.config_dump:
ini.dump_config_file(args.config_dump, args, parser)
return args
|
[
"def",
"parse",
"(",
"args",
")",
":",
"from",
"tzlocal",
"import",
"get_localzone",
"try",
":",
"timezone",
"=",
"get_localzone",
"(",
")",
"if",
"isinstance",
"(",
"timezone",
",",
"pytz",
".",
"BaseTzInfo",
")",
":",
"timezone",
"=",
"timezone",
".",
"zone",
"except",
"Exception",
":",
"# pragma: no cover",
"timezone",
"=",
"'UTC'",
"if",
"timezone",
"==",
"'local'",
":",
"timezone",
"=",
"'UTC'",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"\"\"Bootstrap a django CMS project.\nMajor usage modes:\n\n- wizard: djangocms -w -p /path/whatever project_name: ask for all the options through a\n CLI wizard.\n\n- batch: djangocms project_name: runs with the default values plus any\n additional option provided (see below) with no question asked.\n\n- config file: djangocms_installer --config-file /path/to/config.ini project_name: reads values\n from an ini-style config file.\n\nCheck https://djangocms-installer.readthedocs.io/en/latest/usage.html for detailed usage\ninformation.\n\"\"\"",
",",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'--config-file'",
",",
"dest",
"=",
"'config_file'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Configuration file for djangocms_installer'",
")",
"parser",
".",
"add_argument",
"(",
"'--config-dump'",
",",
"dest",
"=",
"'config_dump'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Dump configuration file with current args'",
")",
"parser",
".",
"add_argument",
"(",
"'--db'",
",",
"'-d'",
",",
"dest",
"=",
"'db'",
",",
"action",
"=",
"DbAction",
",",
"default",
"=",
"'sqlite://localhost/project.db'",
",",
"help",
"=",
"'Database configuration (in URL format). '",
"'Example: sqlite://localhost/project.db'",
")",
"parser",
".",
"add_argument",
"(",
"'--i18n'",
",",
"'-i'",
",",
"dest",
"=",
"'i18n'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"(",
"'yes'",
",",
"'no'",
")",
",",
"default",
"=",
"'yes'",
",",
"help",
"=",
"'Activate Django I18N / L10N setting; this is '",
"'automatically activated if more than '",
"'language is provided'",
")",
"parser",
".",
"add_argument",
"(",
"'--use-tz'",
",",
"'-z'",
",",
"dest",
"=",
"'use_timezone'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"(",
"'yes'",
",",
"'no'",
")",
",",
"default",
"=",
"'yes'",
",",
"help",
"=",
"'Activate Django timezone support'",
")",
"parser",
".",
"add_argument",
"(",
"'--timezone'",
",",
"'-t'",
",",
"dest",
"=",
"'timezone'",
",",
"required",
"=",
"False",
",",
"default",
"=",
"timezone",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Optional default time zone. Example: Europe/Rome'",
")",
"parser",
".",
"add_argument",
"(",
"'--reversion'",
",",
"'-e'",
",",
"dest",
"=",
"'reversion'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"(",
"'yes'",
",",
"'no'",
")",
",",
"default",
"=",
"'yes'",
",",
"help",
"=",
"'Install and configure reversion support '",
"'(only for django CMS 3.2 and 3.3)'",
")",
"parser",
".",
"add_argument",
"(",
"'--permissions'",
",",
"dest",
"=",
"'permissions'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"(",
"'yes'",
",",
"'no'",
")",
",",
"default",
"=",
"'no'",
",",
"help",
"=",
"'Activate CMS permission management'",
")",
"parser",
".",
"add_argument",
"(",
"'--pip-options'",
",",
"help",
"=",
"'pass custom pip options'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'--languages'",
",",
"'-l'",
",",
"dest",
"=",
"'languages'",
",",
"action",
"=",
"'append'",
",",
"help",
"=",
"'Languages to enable. Option can be provided multiple times, or as a '",
"'comma separated list. Only language codes supported by Django can '",
"'be used here. Example: en, fr-FR, it-IT'",
")",
"parser",
".",
"add_argument",
"(",
"'--django-version'",
",",
"dest",
"=",
"'django_version'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"data",
".",
"DJANGO_SUPPORTED",
",",
"default",
"=",
"data",
".",
"DJANGO_DEFAULT",
",",
"help",
"=",
"'Django version'",
")",
"parser",
".",
"add_argument",
"(",
"'--cms-version'",
",",
"'-v'",
",",
"dest",
"=",
"'cms_version'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"data",
".",
"DJANGOCMS_SUPPORTED",
",",
"default",
"=",
"data",
".",
"DJANGOCMS_DEFAULT",
",",
"help",
"=",
"'django CMS version'",
")",
"parser",
".",
"add_argument",
"(",
"'--parent-dir'",
",",
"'-p'",
",",
"dest",
"=",
"'project_directory'",
",",
"default",
"=",
"''",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Optional project parent directory'",
")",
"parser",
".",
"add_argument",
"(",
"'--bootstrap'",
",",
"dest",
"=",
"'bootstrap'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"(",
"'yes'",
",",
"'no'",
")",
",",
"default",
"=",
"'no'",
",",
"help",
"=",
"'Use Twitter Bootstrap Theme'",
")",
"parser",
".",
"add_argument",
"(",
"'--templates'",
",",
"dest",
"=",
"'templates'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"'no'",
",",
"help",
"=",
"'Use custom template set'",
")",
"parser",
".",
"add_argument",
"(",
"'--starting-page'",
",",
"dest",
"=",
"'starting_page'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"(",
"'yes'",
",",
"'no'",
")",
",",
"default",
"=",
"'no'",
",",
"help",
"=",
"'Load a starting page with examples after installation '",
"'(english language only). Choose \"no\" if you use a '",
"'custom template set.'",
")",
"parser",
".",
"add_argument",
"(",
"dest",
"=",
"'project_name'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Name of the project to be created'",
")",
"# Command that lists the supported plugins in verbose description",
"parser",
".",
"add_argument",
"(",
"'--list-plugins'",
",",
"'-P'",
",",
"dest",
"=",
"'plugins'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'List plugins that\\'s going to be installed and configured'",
")",
"# Command that lists the supported plugins in verbose description",
"parser",
".",
"add_argument",
"(",
"'--dump-requirements'",
",",
"'-R'",
",",
"dest",
"=",
"'dump_reqs'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'It dumps the requirements that would be installed according to '",
"'parameters given. Together with --requirements argument is useful '",
"'for customizing the virtualenv'",
")",
"# Advanced options. These have a predefined default and are not asked",
"# by config wizard.",
"parser",
".",
"add_argument",
"(",
"'--no-input'",
",",
"'-q'",
",",
"dest",
"=",
"'noinput'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"True",
",",
"help",
"=",
"'Don\\'t run the configuration wizard, just use the '",
"'provided values'",
")",
"parser",
".",
"add_argument",
"(",
"'--wizard'",
",",
"'-w'",
",",
"dest",
"=",
"'wizard'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Run the configuration wizard'",
")",
"parser",
".",
"add_argument",
"(",
"'--verbose'",
",",
"dest",
"=",
"'verbose'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Be more verbose and don\\'t swallow subcommands output'",
")",
"parser",
".",
"add_argument",
"(",
"'--filer'",
",",
"'-f'",
",",
"dest",
"=",
"'filer'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"True",
",",
"help",
"=",
"'Install and configure django-filer plugins '",
"'- Always enabled'",
")",
"parser",
".",
"add_argument",
"(",
"'--requirements'",
",",
"'-r'",
",",
"dest",
"=",
"'requirements_file'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Externally defined requirements file'",
")",
"parser",
".",
"add_argument",
"(",
"'--no-deps'",
",",
"'-n'",
",",
"dest",
"=",
"'no_deps'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Don\\'t install package dependencies'",
")",
"parser",
".",
"add_argument",
"(",
"'--no-plugins'",
",",
"dest",
"=",
"'no_plugins'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Don\\'t install plugins'",
")",
"parser",
".",
"add_argument",
"(",
"'--no-db-driver'",
",",
"dest",
"=",
"'no_db_driver'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Don\\'t install database package'",
")",
"parser",
".",
"add_argument",
"(",
"'--no-sync'",
",",
"'-m'",
",",
"dest",
"=",
"'no_sync'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Don\\'t run syncdb / migrate after bootstrapping'",
")",
"parser",
".",
"add_argument",
"(",
"'--no-user'",
",",
"'-u'",
",",
"dest",
"=",
"'no_user'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Don\\'t create the admin user'",
")",
"parser",
".",
"add_argument",
"(",
"'--template'",
",",
"dest",
"=",
"'template'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'The path or URL to load the django project '",
"'template from.'",
")",
"parser",
".",
"add_argument",
"(",
"'--extra-settings'",
",",
"dest",
"=",
"'extra_settings'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'The path to an file that contains extra settings.'",
")",
"parser",
".",
"add_argument",
"(",
"'--skip-empty-check'",
",",
"'-s'",
",",
"dest",
"=",
"'skip_project_dir_check'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Skip the check if project dir is empty.'",
")",
"parser",
".",
"add_argument",
"(",
"'--delete-project-dir'",
",",
"'-c'",
",",
"dest",
"=",
"'delete_project_dir'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Delete project directory on creation failure.'",
")",
"parser",
".",
"add_argument",
"(",
"'--utc'",
",",
"dest",
"=",
"'utc'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Use UTC timezone.'",
")",
"if",
"'--utc'",
"in",
"args",
":",
"for",
"action",
"in",
"parser",
".",
"_positionals",
".",
"_actions",
":",
"if",
"action",
".",
"dest",
"==",
"'timezone'",
":",
"action",
".",
"default",
"=",
"'UTC'",
"# If config_args then pretend that config args came from the stdin and run parser again.",
"config_args",
"=",
"ini",
".",
"parse_config_file",
"(",
"parser",
",",
"args",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"config_args",
"+",
"args",
")",
"if",
"not",
"args",
".",
"wizard",
":",
"args",
".",
"noinput",
"=",
"True",
"else",
":",
"args",
".",
"noinput",
"=",
"False",
"if",
"not",
"args",
".",
"project_directory",
":",
"args",
".",
"project_directory",
"=",
"args",
".",
"project_name",
"args",
".",
"project_directory",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"project_directory",
")",
"# First of all, check if the project name is valid",
"if",
"not",
"validate_project",
"(",
"args",
".",
"project_name",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Project name \"{0}\" is not a valid app name, or it\\'s already defined. '",
"'Please use only numbers, letters and underscores.\\n'",
".",
"format",
"(",
"args",
".",
"project_name",
")",
")",
"sys",
".",
"exit",
"(",
"3",
")",
"# Checking the given path",
"setattr",
"(",
"args",
",",
"'project_path'",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"project_directory",
",",
"args",
".",
"project_name",
")",
".",
"strip",
"(",
")",
")",
"if",
"not",
"args",
".",
"skip_project_dir_check",
":",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"project_directory",
")",
"and",
"[",
"path",
"for",
"path",
"in",
"os",
".",
"listdir",
"(",
"args",
".",
"project_directory",
")",
"if",
"not",
"path",
".",
"startswith",
"(",
"'.'",
")",
"]",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Path \"{0}\" already exists and is not empty, please choose a different one\\n'",
"'If you want to use this path anyway use the -s flag to skip this check.\\n'",
"''",
".",
"format",
"(",
"args",
".",
"project_directory",
")",
")",
"sys",
".",
"exit",
"(",
"4",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"project_path",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Path \"{0}\" already exists, please choose a different one\\n'",
".",
"format",
"(",
"args",
".",
"project_path",
")",
")",
"sys",
".",
"exit",
"(",
"4",
")",
"if",
"args",
".",
"config_dump",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"args",
".",
"config_dump",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'Cannot dump because given configuration file \"{0}\" exists.\\n'",
".",
"format",
"(",
"args",
".",
"config_dump",
")",
")",
"sys",
".",
"exit",
"(",
"8",
")",
"args",
"=",
"_manage_args",
"(",
"parser",
",",
"args",
")",
"# what do we want here?!",
"# * if languages are given as multiple arguments, let's use it as is",
"# * if no languages are given, use a default and stop handling it further",
"# * if languages are given as a comma-separated list, split it and use the",
"# resulting list.",
"if",
"not",
"args",
".",
"languages",
":",
"try",
":",
"args",
".",
"languages",
"=",
"[",
"locale",
".",
"getdefaultlocale",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"]",
"except",
"Exception",
":",
"# pragma: no cover",
"args",
".",
"languages",
"=",
"[",
"'en'",
"]",
"elif",
"isinstance",
"(",
"args",
".",
"languages",
",",
"six",
".",
"string_types",
")",
":",
"args",
".",
"languages",
"=",
"args",
".",
"languages",
".",
"split",
"(",
"','",
")",
"elif",
"len",
"(",
"args",
".",
"languages",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
".",
"languages",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"args",
".",
"languages",
"=",
"args",
".",
"languages",
"[",
"0",
"]",
".",
"split",
"(",
"','",
")",
"args",
".",
"languages",
"=",
"[",
"lang",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"lang",
"in",
"args",
".",
"languages",
"]",
"if",
"len",
"(",
"args",
".",
"languages",
")",
">",
"1",
":",
"args",
".",
"i18n",
"=",
"'yes'",
"args",
".",
"aldryn",
"=",
"False",
"args",
".",
"filer",
"=",
"True",
"# Convert version to numeric format for easier checking",
"try",
":",
"django_version",
",",
"cms_version",
"=",
"supported_versions",
"(",
"args",
".",
"django_version",
",",
"args",
".",
"cms_version",
")",
"cms_package",
"=",
"data",
".",
"PACKAGE_MATRIX",
".",
"get",
"(",
"cms_version",
",",
"data",
".",
"PACKAGE_MATRIX",
"[",
"data",
".",
"DJANGOCMS_LTS",
"]",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"# pragma: no cover",
"sys",
".",
"stderr",
".",
"write",
"(",
"compat",
".",
"unicode",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"6",
")",
"if",
"django_version",
"is",
"None",
":",
"# pragma: no cover",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Please provide a Django supported version: {0}. Only Major.Minor '",
"'version selector is accepted\\n'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"data",
".",
"DJANGO_SUPPORTED",
")",
")",
")",
"sys",
".",
"exit",
"(",
"6",
")",
"if",
"cms_version",
"is",
"None",
":",
"# pragma: no cover",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Please provide a django CMS supported version: {0}. Only Major.Minor '",
"'version selector is accepted\\n'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"data",
".",
"DJANGOCMS_SUPPORTED",
")",
")",
")",
"sys",
".",
"exit",
"(",
"6",
")",
"default_settings",
"=",
"'{}.settings'",
".",
"format",
"(",
"args",
".",
"project_name",
")",
"env_settings",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'DJANGO_SETTINGS_MODULE'",
",",
"default_settings",
")",
"if",
"env_settings",
"!=",
"default_settings",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'`DJANGO_SETTINGS_MODULE` is currently set to \\'{0}\\' which is not compatible with '",
"'djangocms installer.\\nPlease unset `DJANGO_SETTINGS_MODULE` and re-run the installer '",
"'\\n'",
".",
"format",
"(",
"env_settings",
")",
")",
"sys",
".",
"exit",
"(",
"10",
")",
"if",
"not",
"getattr",
"(",
"args",
",",
"'requirements_file'",
")",
":",
"requirements",
"=",
"[",
"]",
"# django CMS version check",
"if",
"args",
".",
"cms_version",
"==",
"'develop'",
":",
"requirements",
".",
"append",
"(",
"cms_package",
")",
"warnings",
".",
"warn",
"(",
"data",
".",
"VERSION_WARNING",
".",
"format",
"(",
"'develop'",
",",
"'django CMS'",
")",
")",
"elif",
"args",
".",
"cms_version",
"==",
"'rc'",
":",
"# pragma: no cover",
"requirements",
".",
"append",
"(",
"cms_package",
")",
"elif",
"args",
".",
"cms_version",
"==",
"'beta'",
":",
"# pragma: no cover",
"requirements",
".",
"append",
"(",
"cms_package",
")",
"warnings",
".",
"warn",
"(",
"data",
".",
"VERSION_WARNING",
".",
"format",
"(",
"'beta'",
",",
"'django CMS'",
")",
")",
"else",
":",
"requirements",
".",
"append",
"(",
"cms_package",
")",
"if",
"args",
".",
"cms_version",
"in",
"(",
"'rc'",
",",
"'develop'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'cms-master'",
"]",
")",
"elif",
"LooseVersion",
"(",
"cms_version",
")",
">=",
"LooseVersion",
"(",
"'3.6'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'cms-3.6'",
"]",
")",
"elif",
"LooseVersion",
"(",
"cms_version",
")",
">=",
"LooseVersion",
"(",
"'3.5'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'cms-3.5'",
"]",
")",
"elif",
"LooseVersion",
"(",
"cms_version",
")",
">=",
"LooseVersion",
"(",
"'3.4'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'cms-3.4'",
"]",
")",
"if",
"not",
"args",
".",
"no_db_driver",
":",
"requirements",
".",
"append",
"(",
"args",
".",
"db_driver",
")",
"if",
"not",
"args",
".",
"no_plugins",
":",
"if",
"args",
".",
"cms_version",
"in",
"(",
"'rc'",
",",
"'develop'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'plugins-master'",
"]",
")",
"elif",
"LooseVersion",
"(",
"cms_version",
")",
">=",
"LooseVersion",
"(",
"'3.6'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'plugins-3.6'",
"]",
")",
"elif",
"LooseVersion",
"(",
"cms_version",
")",
">=",
"LooseVersion",
"(",
"'3.5'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'plugins-3.5'",
"]",
")",
"elif",
"LooseVersion",
"(",
"cms_version",
")",
">=",
"LooseVersion",
"(",
"'3.4'",
")",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'plugins-3.4'",
"]",
")",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'filer'",
"]",
")",
"if",
"args",
".",
"aldryn",
":",
"# pragma: no cover",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'aldryn'",
"]",
")",
"# Django version check",
"if",
"args",
".",
"django_version",
"==",
"'develop'",
":",
"# pragma: no cover",
"requirements",
".",
"append",
"(",
"data",
".",
"DJANGO_DEVELOP",
")",
"warnings",
".",
"warn",
"(",
"data",
".",
"VERSION_WARNING",
".",
"format",
"(",
"'develop'",
",",
"'Django'",
")",
")",
"elif",
"args",
".",
"django_version",
"==",
"'beta'",
":",
"# pragma: no cover",
"requirements",
".",
"append",
"(",
"data",
".",
"DJANGO_BETA",
")",
"warnings",
".",
"warn",
"(",
"data",
".",
"VERSION_WARNING",
".",
"format",
"(",
"'beta'",
",",
"'Django'",
")",
")",
"else",
":",
"requirements",
".",
"append",
"(",
"'Django<{0}'",
".",
"format",
"(",
"less_than_version",
"(",
"django_version",
")",
")",
")",
"if",
"django_version",
"==",
"'1.8'",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'django-1.8'",
"]",
")",
"elif",
"django_version",
"==",
"'1.9'",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'django-1.9'",
"]",
")",
"elif",
"django_version",
"==",
"'1.10'",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'django-1.10'",
"]",
")",
"elif",
"django_version",
"==",
"'1.11'",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'django-1.11'",
"]",
")",
"elif",
"django_version",
"==",
"'2.0'",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'django-2.0'",
"]",
")",
"elif",
"django_version",
"==",
"'2.1'",
":",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'django-2.1'",
"]",
")",
"requirements",
".",
"extend",
"(",
"data",
".",
"REQUIREMENTS",
"[",
"'default'",
"]",
")",
"setattr",
"(",
"args",
",",
"'requirements'",
",",
"'\\n'",
".",
"join",
"(",
"requirements",
")",
".",
"strip",
"(",
")",
")",
"# Convenient shortcuts",
"setattr",
"(",
"args",
",",
"'cms_version'",
",",
"cms_version",
")",
"setattr",
"(",
"args",
",",
"'django_version'",
",",
"django_version",
")",
"setattr",
"(",
"args",
",",
"'settings_path'",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"project_directory",
",",
"args",
".",
"project_name",
",",
"'settings.py'",
")",
".",
"strip",
"(",
")",
")",
"setattr",
"(",
"args",
",",
"'urlconf_path'",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"project_directory",
",",
"args",
".",
"project_name",
",",
"'urls.py'",
")",
".",
"strip",
"(",
")",
")",
"if",
"args",
".",
"config_dump",
":",
"ini",
".",
"dump_config_file",
"(",
"args",
".",
"config_dump",
",",
"args",
",",
"parser",
")",
"return",
"args"
] |
Define the available arguments
|
[
"Define",
"the",
"available",
"arguments"
] |
python
|
valid
|
MrYsLab/PyMata
|
PyMata/pymata.py
|
https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/PyMata/pymata.py#L223-L233
|
def analog_read(self, pin):
"""
Retrieve the last analog data value received for the specified pin.
:param pin: Selected pin
:return: The last value entered into the analog response table.
"""
with self.data_lock:
data = self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE]
return data
|
[
"def",
"analog_read",
"(",
"self",
",",
"pin",
")",
":",
"with",
"self",
".",
"data_lock",
":",
"data",
"=",
"self",
".",
"_command_handler",
".",
"analog_response_table",
"[",
"pin",
"]",
"[",
"self",
".",
"_command_handler",
".",
"RESPONSE_TABLE_PIN_DATA_VALUE",
"]",
"return",
"data"
] |
Retrieve the last analog data value received for the specified pin.
:param pin: Selected pin
:return: The last value entered into the analog response table.
|
[
"Retrieve",
"the",
"last",
"analog",
"data",
"value",
"received",
"for",
"the",
"specified",
"pin",
"."
] |
python
|
valid
|
fabioz/PyDev.Debugger
|
third_party/pep8/lib2to3/lib2to3/pytree.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L183-L197
|
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
|
[
"def",
"next_sibling",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent",
"is",
"None",
":",
"return",
"None",
"# Can't use index(); we need to test by identity",
"for",
"i",
",",
"child",
"in",
"enumerate",
"(",
"self",
".",
"parent",
".",
"children",
")",
":",
"if",
"child",
"is",
"self",
":",
"try",
":",
"return",
"self",
".",
"parent",
".",
"children",
"[",
"i",
"+",
"1",
"]",
"except",
"IndexError",
":",
"return",
"None"
] |
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
|
[
"The",
"node",
"immediately",
"following",
"the",
"invocant",
"in",
"their",
"parent",
"s",
"children",
"list",
".",
"If",
"the",
"invocant",
"does",
"not",
"have",
"a",
"next",
"sibling",
"it",
"is",
"None"
] |
python
|
train
|
zsimic/runez
|
src/runez/serialize.py
|
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/serialize.py#L51-L60
|
def from_json(cls, path, fatal=True, logger=None):
"""
:param str path: Path to json file
:param bool|None fatal: Abort execution on failure if True
:param callable|None logger: Logger to use
:return: Deserialized object
"""
result = cls()
result.load(path, fatal=fatal, logger=logger)
return result
|
[
"def",
"from_json",
"(",
"cls",
",",
"path",
",",
"fatal",
"=",
"True",
",",
"logger",
"=",
"None",
")",
":",
"result",
"=",
"cls",
"(",
")",
"result",
".",
"load",
"(",
"path",
",",
"fatal",
"=",
"fatal",
",",
"logger",
"=",
"logger",
")",
"return",
"result"
] |
:param str path: Path to json file
:param bool|None fatal: Abort execution on failure if True
:param callable|None logger: Logger to use
:return: Deserialized object
|
[
":",
"param",
"str",
"path",
":",
"Path",
"to",
"json",
"file",
":",
"param",
"bool|None",
"fatal",
":",
"Abort",
"execution",
"on",
"failure",
"if",
"True",
":",
"param",
"callable|None",
"logger",
":",
"Logger",
"to",
"use",
":",
"return",
":",
"Deserialized",
"object"
] |
python
|
train
|
totalgood/nlpia
|
src/nlpia/transcoders.py
|
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L62-L71
|
def delimit_slug(slug, sep=' '):
""" Return a str of separated tokens found within a slugLike_This => 'slug Like This'
>>> delimit_slug("slugLike_ThisW/aTLA's")
'slug Like This W a TLA s'
>>> delimit_slug('slugLike_ThisW/aTLA', '|')
'slug|Like|This|W|a|TLA'
"""
hyphenated_slug = re.sub(CRE_SLUG_DELIMITTER, sep, slug)
return hyphenated_slug
|
[
"def",
"delimit_slug",
"(",
"slug",
",",
"sep",
"=",
"' '",
")",
":",
"hyphenated_slug",
"=",
"re",
".",
"sub",
"(",
"CRE_SLUG_DELIMITTER",
",",
"sep",
",",
"slug",
")",
"return",
"hyphenated_slug"
] |
Return a str of separated tokens found within a slugLike_This => 'slug Like This'
>>> delimit_slug("slugLike_ThisW/aTLA's")
'slug Like This W a TLA s'
>>> delimit_slug('slugLike_ThisW/aTLA', '|')
'slug|Like|This|W|a|TLA'
|
[
"Return",
"a",
"str",
"of",
"separated",
"tokens",
"found",
"within",
"a",
"slugLike_This",
"=",
">",
"slug",
"Like",
"This"
] |
python
|
train
|
Garee/pytodoist
|
pytodoist/api.py
|
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/api.py#L415-L427
|
def _get(self, end_point, params=None, **kwargs):
"""Send a HTTP GET request to a Todoist API end-point.
:param end_point: The Todoist API end-point.
:type end_point: str
:param params: The required request parameters.
:type params: dict
:param kwargs: Any optional parameters.
:type kwargs: dict
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
"""
return self._request(requests.get, end_point, params, **kwargs)
|
[
"def",
"_get",
"(",
"self",
",",
"end_point",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_request",
"(",
"requests",
".",
"get",
",",
"end_point",
",",
"params",
",",
"*",
"*",
"kwargs",
")"
] |
Send a HTTP GET request to a Todoist API end-point.
:param end_point: The Todoist API end-point.
:type end_point: str
:param params: The required request parameters.
:type params: dict
:param kwargs: Any optional parameters.
:type kwargs: dict
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
|
[
"Send",
"a",
"HTTP",
"GET",
"request",
"to",
"a",
"Todoist",
"API",
"end",
"-",
"point",
"."
] |
python
|
train
|
mcs07/ChemDataExtractor
|
chemdataextractor/cli/dict.py
|
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/dict.py#L277-L290
|
def prepare_jochem(ctx, jochem, output, csoutput):
"""Process and filter jochem file to produce list of names for dictionary."""
click.echo('chemdataextractor.dict.prepare_jochem')
for i, line in enumerate(jochem):
print('JC%s' % i)
if line.startswith('TM '):
if line.endswith(' @match=ci\n'):
for tokens in _make_tokens(line[3:-11]):
output.write(' '.join(tokens))
output.write('\n')
else:
for tokens in _make_tokens(line[3:-1]):
csoutput.write(' '.join(tokens))
csoutput.write('\n')
|
[
"def",
"prepare_jochem",
"(",
"ctx",
",",
"jochem",
",",
"output",
",",
"csoutput",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.dict.prepare_jochem'",
")",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"jochem",
")",
":",
"print",
"(",
"'JC%s'",
"%",
"i",
")",
"if",
"line",
".",
"startswith",
"(",
"'TM '",
")",
":",
"if",
"line",
".",
"endswith",
"(",
"'\t@match=ci\\n'",
")",
":",
"for",
"tokens",
"in",
"_make_tokens",
"(",
"line",
"[",
"3",
":",
"-",
"11",
"]",
")",
":",
"output",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"tokens",
")",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"else",
":",
"for",
"tokens",
"in",
"_make_tokens",
"(",
"line",
"[",
"3",
":",
"-",
"1",
"]",
")",
":",
"csoutput",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"tokens",
")",
")",
"csoutput",
".",
"write",
"(",
"'\\n'",
")"
] |
Process and filter jochem file to produce list of names for dictionary.
|
[
"Process",
"and",
"filter",
"jochem",
"file",
"to",
"produce",
"list",
"of",
"names",
"for",
"dictionary",
"."
] |
python
|
train
|
AndrewIngram/django-extra-views
|
extra_views/advanced.py
|
https://github.com/AndrewIngram/django-extra-views/blob/188e1bf1f15a44d9a599028d020083af9fb43ea7/extra_views/advanced.py#L61-L68
|
def forms_valid(self, form, inlines):
"""
If the form and formsets are valid, save the associated models.
"""
response = self.form_valid(form)
for formset in inlines:
formset.save()
return response
|
[
"def",
"forms_valid",
"(",
"self",
",",
"form",
",",
"inlines",
")",
":",
"response",
"=",
"self",
".",
"form_valid",
"(",
"form",
")",
"for",
"formset",
"in",
"inlines",
":",
"formset",
".",
"save",
"(",
")",
"return",
"response"
] |
If the form and formsets are valid, save the associated models.
|
[
"If",
"the",
"form",
"and",
"formsets",
"are",
"valid",
"save",
"the",
"associated",
"models",
"."
] |
python
|
valid
|
titusz/epubcheck
|
src/epubcheck/utils.py
|
https://github.com/titusz/epubcheck/blob/7adde81543d3ae7385ab7062adb76e1414d49c2e/src/epubcheck/utils.py#L13-L22
|
def java_version():
"""Call java and return version information.
:return unicode: Java version string
"""
result = subprocess.check_output(
[c.JAVA, '-version'], stderr=subprocess.STDOUT
)
first_line = result.splitlines()[0]
return first_line.decode()
|
[
"def",
"java_version",
"(",
")",
":",
"result",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"c",
".",
"JAVA",
",",
"'-version'",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"first_line",
"=",
"result",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"return",
"first_line",
".",
"decode",
"(",
")"
] |
Call java and return version information.
:return unicode: Java version string
|
[
"Call",
"java",
"and",
"return",
"version",
"information",
"."
] |
python
|
train
|
admiralobvious/vyper
|
vyper/vyper.py
|
https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L235-L237
|
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
|
[
"def",
"unmarshall_key",
"(",
"self",
",",
"key",
",",
"cls",
")",
":",
"return",
"setattr",
"(",
"cls",
",",
"key",
",",
"self",
".",
"get",
"(",
"key",
")",
")"
] |
Takes a single key and unmarshalls it into a class.
|
[
"Takes",
"a",
"single",
"key",
"and",
"unmarshalls",
"it",
"into",
"a",
"class",
"."
] |
python
|
train
|
Opentrons/opentrons
|
api/src/opentrons/legacy_api/containers/placeable.py
|
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/containers/placeable.py#L556-L566
|
def transpose(self, rows):
"""
Transposes the grid to allow for cols
"""
res = OrderedDict()
for row, cols in rows.items():
for col, cell in cols.items():
if col not in res:
res[col] = OrderedDict()
res[col][row] = cell
return res
|
[
"def",
"transpose",
"(",
"self",
",",
"rows",
")",
":",
"res",
"=",
"OrderedDict",
"(",
")",
"for",
"row",
",",
"cols",
"in",
"rows",
".",
"items",
"(",
")",
":",
"for",
"col",
",",
"cell",
"in",
"cols",
".",
"items",
"(",
")",
":",
"if",
"col",
"not",
"in",
"res",
":",
"res",
"[",
"col",
"]",
"=",
"OrderedDict",
"(",
")",
"res",
"[",
"col",
"]",
"[",
"row",
"]",
"=",
"cell",
"return",
"res"
] |
Transposes the grid to allow for cols
|
[
"Transposes",
"the",
"grid",
"to",
"allow",
"for",
"cols"
] |
python
|
train
|
rootpy/rootpy
|
rootpy/extern/byteplay2/__init__.py
|
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/extern/byteplay2/__init__.py#L439-L643
|
def _compute_stacksize(self):
"""Get a code list, compute its maximal stack usage."""
# This is done by scanning the code, and computing for each opcode
# the stack state at the opcode.
code = self.code
# A mapping from labels to their positions in the code list
label_pos = dict((op, pos)
for pos, (op, arg) in enumerate(code)
if isinstance(op, Label))
# sf_targets are the targets of SETUP_FINALLY opcodes. They are recorded
# because they have special stack behaviour. If an exception was raised
# in the block pushed by a SETUP_FINALLY opcode, the block is popped
# and 3 objects are pushed. On return or continue, the block is popped
# and 2 objects are pushed. If nothing happened, the block is popped by
# a POP_BLOCK opcode and 1 object is pushed by a (LOAD_CONST, None)
# operation.
#
# Our solution is to record the stack state of SETUP_FINALLY targets
# as having 3 objects pushed, which is the maximum. However, to make
# stack recording consistent, the get_next_stacks function will always
# yield the stack state of the target as if 1 object was pushed, but
# this will be corrected in the actual stack recording.
sf_targets = set(label_pos[arg]
for op, arg in code
if op == SETUP_FINALLY)
# What we compute - for each opcode, its stack state, as an n-tuple.
# n is the number of blocks pushed. For each block, we record the number
# of objects pushed.
stacks = [None] * len(code)
def get_next_stacks(pos, curstack):
"""Get a code position and the stack state before the operation
was done, and yield pairs (pos, curstack) for the next positions
to be explored - those are the positions to which you can get
from the given (pos, curstack).
If the given position was already explored, nothing will be yielded.
"""
op, arg = code[pos]
if isinstance(op, Label):
# We should check if we already reached a node only if it is
# a label.
if pos in sf_targets:
curstack = curstack[:-1] + (curstack[-1] + 2,)
if stacks[pos] is None:
stacks[pos] = curstack
else:
if stacks[pos] != curstack:
raise ValueError("Inconsistent code")
return
def newstack(n):
# Return a new stack, modified by adding n elements to the last
# block
if curstack[-1] + n < 0:
raise ValueError("Popped a non-existing element")
return curstack[:-1] + (curstack[-1]+n,)
if not isopcode(op):
# label or SetLineno - just continue to next line
yield pos+1, curstack
elif op in (STOP_CODE, RETURN_VALUE, RAISE_VARARGS):
# No place in particular to continue to
pass
elif op == MAKE_CLOSURE and python_version == '2.4':
# This is only relevant in Python 2.4 - in Python 2.5 the stack
# effect of MAKE_CLOSURE can be calculated from the arg.
# In Python 2.4, it depends on the number of freevars of TOS,
# which should be a code object.
if pos == 0:
raise ValueError("MAKE_CLOSURE can't be the first opcode")
lastop, lastarg = code[pos-1]
if lastop != LOAD_CONST:
raise ValueError(
"MAKE_CLOSURE should come after a LOAD_CONST op")
try:
nextrapops = len(lastarg.freevars)
except AttributeError:
try:
nextrapops = len(lastarg.co_freevars)
except AttributeError:
raise ValueError(
"MAKE_CLOSURE preceding const should "
"be a code or a Code object")
yield pos+1, newstack(-arg-nextrapops)
elif op not in hasflow:
# Simple change of stack
pop, push = getse(op, arg)
yield pos+1, newstack(push - pop)
elif op in (JUMP_FORWARD, JUMP_ABSOLUTE):
# One possibility for a jump
yield label_pos[arg], curstack
elif python_version < '2.7' and op in (JUMP_IF_FALSE, JUMP_IF_TRUE):
# Two possibilities for a jump
yield label_pos[arg], curstack
yield pos+1, curstack
elif python_version >= '2.7' and op in (POP_JUMP_IF_FALSE, POP_JUMP_IF_TRUE):
# Two possibilities for a jump
yield label_pos[arg], newstack(-1)
yield pos+1, newstack(-1)
elif python_version >= '2.7' and op in (JUMP_IF_TRUE_OR_POP, JUMP_IF_FALSE_OR_POP):
# Two possibilities for a jump
yield label_pos[arg], curstack
yield pos+1, newstack(-1)
elif op == FOR_ITER:
# FOR_ITER pushes next(TOS) on success, and pops TOS and jumps
# on failure
yield label_pos[arg], newstack(-1)
yield pos+1, newstack(1)
elif op == BREAK_LOOP:
# BREAK_LOOP jumps to a place specified on block creation, so
# it is ignored here
pass
elif op == CONTINUE_LOOP:
# CONTINUE_LOOP jumps to the beginning of a loop which should
# already ave been discovered, but we verify anyway.
# It pops a block.
if python_version == '2.6':
pos, stack = label_pos[arg], curstack[:-1]
if stacks[pos] != stack: #this could be a loop with a 'with' inside
yield pos, stack[:-1] + (stack[-1]-1,)
else:
yield pos, stack
else:
yield label_pos[arg], curstack[:-1]
elif op == SETUP_LOOP:
# We continue with a new block.
# On break, we jump to the label and return to current stack
# state.
yield label_pos[arg], curstack
yield pos+1, curstack + (0,)
elif op == SETUP_EXCEPT:
# We continue with a new block.
# On exception, we jump to the label with 3 extra objects on
# stack
yield label_pos[arg], newstack(3)
yield pos+1, curstack + (0,)
elif op == SETUP_FINALLY:
# We continue with a new block.
# On exception, we jump to the label with 3 extra objects on
# stack, but to keep stack recording consistent, we behave as
# if we add only 1 object. Extra 2 will be added to the actual
# recording.
yield label_pos[arg], newstack(1)
yield pos+1, curstack + (0,)
elif python_version == '2.7' and op == SETUP_WITH:
yield label_pos[arg], curstack
yield pos+1, newstack(-1) + (1,)
elif op == POP_BLOCK:
# Just pop the block
yield pos+1, curstack[:-1]
elif op == END_FINALLY:
# Since stack recording of SETUP_FINALLY targets is of 3 pushed
# objects (as when an exception is raised), we pop 3 objects.
yield pos+1, newstack(-3)
elif op == WITH_CLEANUP:
# Since WITH_CLEANUP is always found after SETUP_FINALLY
# targets, and the stack recording is that of a raised
# exception, we can simply pop 1 object and let END_FINALLY
# pop the remaining 3.
if python_version == '2.7':
yield pos+1, newstack(2)
else:
yield pos+1, newstack(-1)
else:
assert False, "Unhandled opcode: %r" % op
# Now comes the calculation: open_positions holds positions which are
# yet to be explored. In each step we take one open position, and
# explore it by adding the positions to which you can get from it, to
# open_positions. On the way, we update maxsize.
# open_positions is a list of tuples: (pos, stack state)
maxsize = 0
open_positions = [(0, (0,))]
while open_positions:
pos, curstack = open_positions.pop()
maxsize = max(maxsize, sum(curstack))
open_positions.extend(get_next_stacks(pos, curstack))
return maxsize
|
[
"def",
"_compute_stacksize",
"(",
"self",
")",
":",
"# This is done by scanning the code, and computing for each opcode",
"# the stack state at the opcode.",
"code",
"=",
"self",
".",
"code",
"# A mapping from labels to their positions in the code list",
"label_pos",
"=",
"dict",
"(",
"(",
"op",
",",
"pos",
")",
"for",
"pos",
",",
"(",
"op",
",",
"arg",
")",
"in",
"enumerate",
"(",
"code",
")",
"if",
"isinstance",
"(",
"op",
",",
"Label",
")",
")",
"# sf_targets are the targets of SETUP_FINALLY opcodes. They are recorded",
"# because they have special stack behaviour. If an exception was raised",
"# in the block pushed by a SETUP_FINALLY opcode, the block is popped",
"# and 3 objects are pushed. On return or continue, the block is popped",
"# and 2 objects are pushed. If nothing happened, the block is popped by",
"# a POP_BLOCK opcode and 1 object is pushed by a (LOAD_CONST, None)",
"# operation.",
"#",
"# Our solution is to record the stack state of SETUP_FINALLY targets",
"# as having 3 objects pushed, which is the maximum. However, to make",
"# stack recording consistent, the get_next_stacks function will always",
"# yield the stack state of the target as if 1 object was pushed, but",
"# this will be corrected in the actual stack recording.",
"sf_targets",
"=",
"set",
"(",
"label_pos",
"[",
"arg",
"]",
"for",
"op",
",",
"arg",
"in",
"code",
"if",
"op",
"==",
"SETUP_FINALLY",
")",
"# What we compute - for each opcode, its stack state, as an n-tuple.",
"# n is the number of blocks pushed. For each block, we record the number",
"# of objects pushed.",
"stacks",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"code",
")",
"def",
"get_next_stacks",
"(",
"pos",
",",
"curstack",
")",
":",
"\"\"\"Get a code position and the stack state before the operation\n was done, and yield pairs (pos, curstack) for the next positions\n to be explored - those are the positions to which you can get\n from the given (pos, curstack).\n\n If the given position was already explored, nothing will be yielded.\n \"\"\"",
"op",
",",
"arg",
"=",
"code",
"[",
"pos",
"]",
"if",
"isinstance",
"(",
"op",
",",
"Label",
")",
":",
"# We should check if we already reached a node only if it is",
"# a label.",
"if",
"pos",
"in",
"sf_targets",
":",
"curstack",
"=",
"curstack",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"curstack",
"[",
"-",
"1",
"]",
"+",
"2",
",",
")",
"if",
"stacks",
"[",
"pos",
"]",
"is",
"None",
":",
"stacks",
"[",
"pos",
"]",
"=",
"curstack",
"else",
":",
"if",
"stacks",
"[",
"pos",
"]",
"!=",
"curstack",
":",
"raise",
"ValueError",
"(",
"\"Inconsistent code\"",
")",
"return",
"def",
"newstack",
"(",
"n",
")",
":",
"# Return a new stack, modified by adding n elements to the last",
"# block",
"if",
"curstack",
"[",
"-",
"1",
"]",
"+",
"n",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Popped a non-existing element\"",
")",
"return",
"curstack",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"curstack",
"[",
"-",
"1",
"]",
"+",
"n",
",",
")",
"if",
"not",
"isopcode",
"(",
"op",
")",
":",
"# label or SetLineno - just continue to next line",
"yield",
"pos",
"+",
"1",
",",
"curstack",
"elif",
"op",
"in",
"(",
"STOP_CODE",
",",
"RETURN_VALUE",
",",
"RAISE_VARARGS",
")",
":",
"# No place in particular to continue to",
"pass",
"elif",
"op",
"==",
"MAKE_CLOSURE",
"and",
"python_version",
"==",
"'2.4'",
":",
"# This is only relevant in Python 2.4 - in Python 2.5 the stack",
"# effect of MAKE_CLOSURE can be calculated from the arg.",
"# In Python 2.4, it depends on the number of freevars of TOS,",
"# which should be a code object.",
"if",
"pos",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"MAKE_CLOSURE can't be the first opcode\"",
")",
"lastop",
",",
"lastarg",
"=",
"code",
"[",
"pos",
"-",
"1",
"]",
"if",
"lastop",
"!=",
"LOAD_CONST",
":",
"raise",
"ValueError",
"(",
"\"MAKE_CLOSURE should come after a LOAD_CONST op\"",
")",
"try",
":",
"nextrapops",
"=",
"len",
"(",
"lastarg",
".",
"freevars",
")",
"except",
"AttributeError",
":",
"try",
":",
"nextrapops",
"=",
"len",
"(",
"lastarg",
".",
"co_freevars",
")",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"\"MAKE_CLOSURE preceding const should \"",
"\"be a code or a Code object\"",
")",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"-",
"arg",
"-",
"nextrapops",
")",
"elif",
"op",
"not",
"in",
"hasflow",
":",
"# Simple change of stack",
"pop",
",",
"push",
"=",
"getse",
"(",
"op",
",",
"arg",
")",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"push",
"-",
"pop",
")",
"elif",
"op",
"in",
"(",
"JUMP_FORWARD",
",",
"JUMP_ABSOLUTE",
")",
":",
"# One possibility for a jump",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"curstack",
"elif",
"python_version",
"<",
"'2.7'",
"and",
"op",
"in",
"(",
"JUMP_IF_FALSE",
",",
"JUMP_IF_TRUE",
")",
":",
"# Two possibilities for a jump",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"curstack",
"yield",
"pos",
"+",
"1",
",",
"curstack",
"elif",
"python_version",
">=",
"'2.7'",
"and",
"op",
"in",
"(",
"POP_JUMP_IF_FALSE",
",",
"POP_JUMP_IF_TRUE",
")",
":",
"# Two possibilities for a jump",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"newstack",
"(",
"-",
"1",
")",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"-",
"1",
")",
"elif",
"python_version",
">=",
"'2.7'",
"and",
"op",
"in",
"(",
"JUMP_IF_TRUE_OR_POP",
",",
"JUMP_IF_FALSE_OR_POP",
")",
":",
"# Two possibilities for a jump",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"curstack",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"-",
"1",
")",
"elif",
"op",
"==",
"FOR_ITER",
":",
"# FOR_ITER pushes next(TOS) on success, and pops TOS and jumps",
"# on failure",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"newstack",
"(",
"-",
"1",
")",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"1",
")",
"elif",
"op",
"==",
"BREAK_LOOP",
":",
"# BREAK_LOOP jumps to a place specified on block creation, so",
"# it is ignored here",
"pass",
"elif",
"op",
"==",
"CONTINUE_LOOP",
":",
"# CONTINUE_LOOP jumps to the beginning of a loop which should",
"# already ave been discovered, but we verify anyway.",
"# It pops a block.",
"if",
"python_version",
"==",
"'2.6'",
":",
"pos",
",",
"stack",
"=",
"label_pos",
"[",
"arg",
"]",
",",
"curstack",
"[",
":",
"-",
"1",
"]",
"if",
"stacks",
"[",
"pos",
"]",
"!=",
"stack",
":",
"#this could be a loop with a 'with' inside",
"yield",
"pos",
",",
"stack",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"stack",
"[",
"-",
"1",
"]",
"-",
"1",
",",
")",
"else",
":",
"yield",
"pos",
",",
"stack",
"else",
":",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"curstack",
"[",
":",
"-",
"1",
"]",
"elif",
"op",
"==",
"SETUP_LOOP",
":",
"# We continue with a new block.",
"# On break, we jump to the label and return to current stack",
"# state.",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"curstack",
"yield",
"pos",
"+",
"1",
",",
"curstack",
"+",
"(",
"0",
",",
")",
"elif",
"op",
"==",
"SETUP_EXCEPT",
":",
"# We continue with a new block.",
"# On exception, we jump to the label with 3 extra objects on",
"# stack",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"newstack",
"(",
"3",
")",
"yield",
"pos",
"+",
"1",
",",
"curstack",
"+",
"(",
"0",
",",
")",
"elif",
"op",
"==",
"SETUP_FINALLY",
":",
"# We continue with a new block.",
"# On exception, we jump to the label with 3 extra objects on",
"# stack, but to keep stack recording consistent, we behave as",
"# if we add only 1 object. Extra 2 will be added to the actual",
"# recording.",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"newstack",
"(",
"1",
")",
"yield",
"pos",
"+",
"1",
",",
"curstack",
"+",
"(",
"0",
",",
")",
"elif",
"python_version",
"==",
"'2.7'",
"and",
"op",
"==",
"SETUP_WITH",
":",
"yield",
"label_pos",
"[",
"arg",
"]",
",",
"curstack",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"-",
"1",
")",
"+",
"(",
"1",
",",
")",
"elif",
"op",
"==",
"POP_BLOCK",
":",
"# Just pop the block",
"yield",
"pos",
"+",
"1",
",",
"curstack",
"[",
":",
"-",
"1",
"]",
"elif",
"op",
"==",
"END_FINALLY",
":",
"# Since stack recording of SETUP_FINALLY targets is of 3 pushed",
"# objects (as when an exception is raised), we pop 3 objects.",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"-",
"3",
")",
"elif",
"op",
"==",
"WITH_CLEANUP",
":",
"# Since WITH_CLEANUP is always found after SETUP_FINALLY",
"# targets, and the stack recording is that of a raised",
"# exception, we can simply pop 1 object and let END_FINALLY",
"# pop the remaining 3.",
"if",
"python_version",
"==",
"'2.7'",
":",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"2",
")",
"else",
":",
"yield",
"pos",
"+",
"1",
",",
"newstack",
"(",
"-",
"1",
")",
"else",
":",
"assert",
"False",
",",
"\"Unhandled opcode: %r\"",
"%",
"op",
"# Now comes the calculation: open_positions holds positions which are",
"# yet to be explored. In each step we take one open position, and",
"# explore it by adding the positions to which you can get from it, to",
"# open_positions. On the way, we update maxsize.",
"# open_positions is a list of tuples: (pos, stack state)",
"maxsize",
"=",
"0",
"open_positions",
"=",
"[",
"(",
"0",
",",
"(",
"0",
",",
")",
")",
"]",
"while",
"open_positions",
":",
"pos",
",",
"curstack",
"=",
"open_positions",
".",
"pop",
"(",
")",
"maxsize",
"=",
"max",
"(",
"maxsize",
",",
"sum",
"(",
"curstack",
")",
")",
"open_positions",
".",
"extend",
"(",
"get_next_stacks",
"(",
"pos",
",",
"curstack",
")",
")",
"return",
"maxsize"
] |
Get a code list, compute its maximal stack usage.
|
[
"Get",
"a",
"code",
"list",
"compute",
"its",
"maximal",
"stack",
"usage",
"."
] |
python
|
train
|
sentinel-hub/eo-learn
|
core/eolearn/core/utilities.py
|
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/utilities.py#L204-L212
|
def _parse_names_set(feature_names):
"""Helping function of `_parse_feature_names` that parses a set of feature names."""
feature_collection = OrderedDict()
for feature_name in feature_names:
if isinstance(feature_name, str):
feature_collection[feature_name] = ...
else:
raise ValueError('Failed to parse {}, expected string'.format(feature_name))
return feature_collection
|
[
"def",
"_parse_names_set",
"(",
"feature_names",
")",
":",
"feature_collection",
"=",
"OrderedDict",
"(",
")",
"for",
"feature_name",
"in",
"feature_names",
":",
"if",
"isinstance",
"(",
"feature_name",
",",
"str",
")",
":",
"feature_collection",
"[",
"feature_name",
"]",
"=",
"...",
"else",
":",
"raise",
"ValueError",
"(",
"'Failed to parse {}, expected string'",
".",
"format",
"(",
"feature_name",
")",
")",
"return",
"feature_collection"
] |
Helping function of `_parse_feature_names` that parses a set of feature names.
|
[
"Helping",
"function",
"of",
"_parse_feature_names",
"that",
"parses",
"a",
"set",
"of",
"feature",
"names",
"."
] |
python
|
train
|
fastai/fastai
|
fastai/datasets.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/datasets.py#L199-L204
|
def datapath4file(filename, ext:str='.tgz', archive=True):
"Return data path to `filename`, checking locally first then in the config file."
local_path = URLs.LOCAL_PATH/'data'/filename
if local_path.exists() or local_path.with_suffix(ext).exists(): return local_path
elif archive: return Config.data_archive_path() / filename
else: return Config.data_path() / filename
|
[
"def",
"datapath4file",
"(",
"filename",
",",
"ext",
":",
"str",
"=",
"'.tgz'",
",",
"archive",
"=",
"True",
")",
":",
"local_path",
"=",
"URLs",
".",
"LOCAL_PATH",
"/",
"'data'",
"/",
"filename",
"if",
"local_path",
".",
"exists",
"(",
")",
"or",
"local_path",
".",
"with_suffix",
"(",
"ext",
")",
".",
"exists",
"(",
")",
":",
"return",
"local_path",
"elif",
"archive",
":",
"return",
"Config",
".",
"data_archive_path",
"(",
")",
"/",
"filename",
"else",
":",
"return",
"Config",
".",
"data_path",
"(",
")",
"/",
"filename"
] |
Return data path to `filename`, checking locally first then in the config file.
|
[
"Return",
"data",
"path",
"to",
"filename",
"checking",
"locally",
"first",
"then",
"in",
"the",
"config",
"file",
"."
] |
python
|
train
|
KE-works/pykechain
|
pykechain/client.py
|
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L1208-L1277
|
def create_property(self, model, name, description=None, property_type=PropertyType.CHAR_VALUE, default_value=None,
unit=None, options=None):
"""Create a new property model under a given model.
Use the :class:`enums.PropertyType` to select which property type to create to ensure that you
provide the correct values to the KE-chain backend. The default is a `PropertyType.CHAR_VALUE` which is a
single line text in KE-chain.
:param model: parent model
:type model: :class:`models.Part`
:param name: property model name
:type name: basestring
:param description: property model description (optional)
:type description: basestring or None
:param property_type: choose one of the :class:`enums.PropertyType`, defaults to `PropertyType.CHAR_VALUE`.
:type property_type: basestring or None
:param default_value: (optional) default value used for part instances when creating a model.
:type default_value: any
:param unit: (optional) unit of the property
:type unit: basestring or None
:param options: (optional) property options (eg. validators or 'single selectlist choices')
:type options: basestring or None
:return: a :class:`models.Property` with category `MODEL`
:raises IllegalArgumentError: When the provided arguments are incorrect
:raises APIError: if the `Property` model could not be created
"""
if model.category != Category.MODEL:
raise IllegalArgumentError("The model should be of category MODEL")
if not property_type.endswith('_VALUE'):
warnings.warn("Please use the `PropertyType` enumeration to ensure providing correct "
"values to the backend.", UserWarning)
property_type = '{}_VALUE'.format(property_type.upper())
if property_type not in PropertyType.values():
raise IllegalArgumentError("Please provide a valid propertytype, please use one of `enums.PropertyType`. "
"Got: '{}'".format(property_type))
# because the references value only accepts a single 'model_id' in the default value, we need to convert this
# to a single value from the list of values.
if property_type in (PropertyType.REFERENCE_VALUE, PropertyType.REFERENCES_VALUE) and \
isinstance(default_value, (list, tuple)) and default_value:
default_value = default_value[0]
data = {
"name": name,
"part": model.id,
"description": description or '',
"property_type": property_type.upper(),
"value": default_value,
"unit": unit or '',
"options": options or {}
}
# # We add options after the fact only if they are available, otherwise the options will be set to null in the
# # request and that can't be handled by KE-chain.
# if options:
# data['options'] = options
response = self._request('POST', self._build_url('properties'),
json=data)
if response.status_code != requests.codes.created:
raise APIError("Could not create property")
prop = Property.create(response.json()['results'][0], client=self)
model.properties.append(prop)
return prop
|
[
"def",
"create_property",
"(",
"self",
",",
"model",
",",
"name",
",",
"description",
"=",
"None",
",",
"property_type",
"=",
"PropertyType",
".",
"CHAR_VALUE",
",",
"default_value",
"=",
"None",
",",
"unit",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"if",
"model",
".",
"category",
"!=",
"Category",
".",
"MODEL",
":",
"raise",
"IllegalArgumentError",
"(",
"\"The model should be of category MODEL\"",
")",
"if",
"not",
"property_type",
".",
"endswith",
"(",
"'_VALUE'",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Please use the `PropertyType` enumeration to ensure providing correct \"",
"\"values to the backend.\"",
",",
"UserWarning",
")",
"property_type",
"=",
"'{}_VALUE'",
".",
"format",
"(",
"property_type",
".",
"upper",
"(",
")",
")",
"if",
"property_type",
"not",
"in",
"PropertyType",
".",
"values",
"(",
")",
":",
"raise",
"IllegalArgumentError",
"(",
"\"Please provide a valid propertytype, please use one of `enums.PropertyType`. \"",
"\"Got: '{}'\"",
".",
"format",
"(",
"property_type",
")",
")",
"# because the references value only accepts a single 'model_id' in the default value, we need to convert this",
"# to a single value from the list of values.",
"if",
"property_type",
"in",
"(",
"PropertyType",
".",
"REFERENCE_VALUE",
",",
"PropertyType",
".",
"REFERENCES_VALUE",
")",
"and",
"isinstance",
"(",
"default_value",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"default_value",
":",
"default_value",
"=",
"default_value",
"[",
"0",
"]",
"data",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"part\"",
":",
"model",
".",
"id",
",",
"\"description\"",
":",
"description",
"or",
"''",
",",
"\"property_type\"",
":",
"property_type",
".",
"upper",
"(",
")",
",",
"\"value\"",
":",
"default_value",
",",
"\"unit\"",
":",
"unit",
"or",
"''",
",",
"\"options\"",
":",
"options",
"or",
"{",
"}",
"}",
"# # We add options after the fact only if they are available, otherwise the options will be set to null in the",
"# # request and that can't be handled by KE-chain.",
"# if options:",
"# data['options'] = options",
"response",
"=",
"self",
".",
"_request",
"(",
"'POST'",
",",
"self",
".",
"_build_url",
"(",
"'properties'",
")",
",",
"json",
"=",
"data",
")",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"created",
":",
"raise",
"APIError",
"(",
"\"Could not create property\"",
")",
"prop",
"=",
"Property",
".",
"create",
"(",
"response",
".",
"json",
"(",
")",
"[",
"'results'",
"]",
"[",
"0",
"]",
",",
"client",
"=",
"self",
")",
"model",
".",
"properties",
".",
"append",
"(",
"prop",
")",
"return",
"prop"
] |
Create a new property model under a given model.
Use the :class:`enums.PropertyType` to select which property type to create to ensure that you
provide the correct values to the KE-chain backend. The default is a `PropertyType.CHAR_VALUE` which is a
single line text in KE-chain.
:param model: parent model
:type model: :class:`models.Part`
:param name: property model name
:type name: basestring
:param description: property model description (optional)
:type description: basestring or None
:param property_type: choose one of the :class:`enums.PropertyType`, defaults to `PropertyType.CHAR_VALUE`.
:type property_type: basestring or None
:param default_value: (optional) default value used for part instances when creating a model.
:type default_value: any
:param unit: (optional) unit of the property
:type unit: basestring or None
:param options: (optional) property options (eg. validators or 'single selectlist choices')
:type options: basestring or None
:return: a :class:`models.Property` with category `MODEL`
:raises IllegalArgumentError: When the provided arguments are incorrect
:raises APIError: if the `Property` model could not be created
|
[
"Create",
"a",
"new",
"property",
"model",
"under",
"a",
"given",
"model",
"."
] |
python
|
train
|
clalancette/pycdlib
|
pycdlib/dr.py
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/dr.py#L584-L607
|
def new_dotdot(self, vd, parent, seqnum, rock_ridge, log_block_size,
rr_relocated_parent, xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, bool, int) -> None
'''
Create a new 'dotdot' Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
log_block_size - The logical block size to use.
rr_relocated_parent - True if this is a Rock Ridge relocated parent.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x01', parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, b'', b'', False, False, rr_relocated_parent, file_mode)
|
[
"def",
"new_dotdot",
"(",
"self",
",",
"vd",
",",
"parent",
",",
"seqnum",
",",
"rock_ridge",
",",
"log_block_size",
",",
"rr_relocated_parent",
",",
"xa",
",",
"file_mode",
")",
":",
"# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, bool, int) -> None",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Directory Record already initialized'",
")",
"self",
".",
"_new",
"(",
"vd",
",",
"b'\\x01'",
",",
"parent",
",",
"seqnum",
",",
"True",
",",
"log_block_size",
",",
"xa",
")",
"if",
"rock_ridge",
":",
"self",
".",
"_rr_new",
"(",
"rock_ridge",
",",
"b''",
",",
"b''",
",",
"False",
",",
"False",
",",
"rr_relocated_parent",
",",
"file_mode",
")"
] |
Create a new 'dotdot' Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
log_block_size - The logical block size to use.
rr_relocated_parent - True if this is a Rock Ridge relocated parent.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
|
[
"Create",
"a",
"new",
"dotdot",
"Directory",
"Record",
"."
] |
python
|
train
|
kervi/kervi-devices
|
kervi/devices/gpio/MCP230XX.py
|
https://github.com/kervi/kervi-devices/blob/c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56/kervi/devices/gpio/MCP230XX.py#L106-L114
|
def _input_pins(self, pins):
"""Read multiple pins specified in the given list and return list of pin values
GPIO.HIGH/True if the pin is pulled high, or GPIO.LOW/False if pulled low.
"""
[self._validate_channel(pin) for pin in pins]
# Get GPIO state.
gpio = self.i2c.read_list(self.GPIO, self.gpio_bytes)
# Return True if pin's bit is set.
return [(gpio[int(pin/8)] & 1 << (int(pin%8))) > 0 for pin in pins]
|
[
"def",
"_input_pins",
"(",
"self",
",",
"pins",
")",
":",
"[",
"self",
".",
"_validate_channel",
"(",
"pin",
")",
"for",
"pin",
"in",
"pins",
"]",
"# Get GPIO state.",
"gpio",
"=",
"self",
".",
"i2c",
".",
"read_list",
"(",
"self",
".",
"GPIO",
",",
"self",
".",
"gpio_bytes",
")",
"# Return True if pin's bit is set.",
"return",
"[",
"(",
"gpio",
"[",
"int",
"(",
"pin",
"/",
"8",
")",
"]",
"&",
"1",
"<<",
"(",
"int",
"(",
"pin",
"%",
"8",
")",
")",
")",
">",
"0",
"for",
"pin",
"in",
"pins",
"]"
] |
Read multiple pins specified in the given list and return list of pin values
GPIO.HIGH/True if the pin is pulled high, or GPIO.LOW/False if pulled low.
|
[
"Read",
"multiple",
"pins",
"specified",
"in",
"the",
"given",
"list",
"and",
"return",
"list",
"of",
"pin",
"values",
"GPIO",
".",
"HIGH",
"/",
"True",
"if",
"the",
"pin",
"is",
"pulled",
"high",
"or",
"GPIO",
".",
"LOW",
"/",
"False",
"if",
"pulled",
"low",
"."
] |
python
|
train
|
f3at/feat
|
src/feat/common/manhole.py
|
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/manhole.py#L85-L91
|
def help(self):
'''Prints exposed methods and their docstrings.'''
cmds = self.get_exposed_cmds()
t = text_helper.Table(fields=['command', 'doc'],
lengths=[50, 85])
return t.render((reflect.formatted_function_name(x), x.__doc__, )
for x in cmds.values())
|
[
"def",
"help",
"(",
"self",
")",
":",
"cmds",
"=",
"self",
".",
"get_exposed_cmds",
"(",
")",
"t",
"=",
"text_helper",
".",
"Table",
"(",
"fields",
"=",
"[",
"'command'",
",",
"'doc'",
"]",
",",
"lengths",
"=",
"[",
"50",
",",
"85",
"]",
")",
"return",
"t",
".",
"render",
"(",
"(",
"reflect",
".",
"formatted_function_name",
"(",
"x",
")",
",",
"x",
".",
"__doc__",
",",
")",
"for",
"x",
"in",
"cmds",
".",
"values",
"(",
")",
")"
] |
Prints exposed methods and their docstrings.
|
[
"Prints",
"exposed",
"methods",
"and",
"their",
"docstrings",
"."
] |
python
|
train
|
fhamborg/news-please
|
newsplease/helper_classes/heuristics.py
|
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/heuristics.py#L36-L52
|
def meta_contains_article_keyword(self, response, site_dict):
"""
Determines wether the response's meta data contains the keyword
'article'
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines wether the reponse's meta data contains the
keyword 'article'
"""
contains_meta = response.xpath('//meta') \
.re('(= ?["\'][^"\']*article[^"\']*["\'])')
if not contains_meta:
return False
return True
|
[
"def",
"meta_contains_article_keyword",
"(",
"self",
",",
"response",
",",
"site_dict",
")",
":",
"contains_meta",
"=",
"response",
".",
"xpath",
"(",
"'//meta'",
")",
".",
"re",
"(",
"'(= ?[\"\\'][^\"\\']*article[^\"\\']*[\"\\'])'",
")",
"if",
"not",
"contains_meta",
":",
"return",
"False",
"return",
"True"
] |
Determines wether the response's meta data contains the keyword
'article'
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines wether the reponse's meta data contains the
keyword 'article'
|
[
"Determines",
"wether",
"the",
"response",
"s",
"meta",
"data",
"contains",
"the",
"keyword",
"article"
] |
python
|
train
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/profilers/visual/pycallgraph.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L372-L397
|
def get_gdf(stop=True):
"""Returns a string containing a GDF file. Setting stop to True will cause
the trace to stop.
"""
ret = ['nodedef>name VARCHAR, label VARCHAR, hits INTEGER, ' + \
'calls_frac DOUBLE, total_time_frac DOUBLE, ' + \
'total_time DOUBLE, color VARCHAR, width DOUBLE']
for func, hits in func_count.items():
calls_frac, total_time_frac, total_time = _frac_calculation(func, hits)
col = settings['node_colour'](calls_frac, total_time_frac)
color = ','.join([str(round(float(c) * 255)) for c in col.split()])
ret.append('%s,%s,%s,%s,%s,%s,\'%s\',%s' % (func, func, hits, \
calls_frac, total_time_frac, total_time, color, \
math.log(hits * 10)))
ret.append('edgedef>node1 VARCHAR, node2 VARCHAR, color VARCHAR')
for fr_key, fr_val in call_dict.items():
if fr_key == '':
continue
for to_key, to_val in fr_val.items():
calls_frac, total_time_frac, total_time = \
_frac_calculation(to_key, to_val)
col = settings['edge_colour'](calls_frac, total_time_frac)
color = ','.join([str(round(float(c) * 255)) for c in col.split()])
ret.append('%s,%s,\'%s\'' % (fr_key, to_key, color))
ret = '\n'.join(ret)
return ret
|
[
"def",
"get_gdf",
"(",
"stop",
"=",
"True",
")",
":",
"ret",
"=",
"[",
"'nodedef>name VARCHAR, label VARCHAR, hits INTEGER, '",
"+",
"'calls_frac DOUBLE, total_time_frac DOUBLE, '",
"+",
"'total_time DOUBLE, color VARCHAR, width DOUBLE'",
"]",
"for",
"func",
",",
"hits",
"in",
"func_count",
".",
"items",
"(",
")",
":",
"calls_frac",
",",
"total_time_frac",
",",
"total_time",
"=",
"_frac_calculation",
"(",
"func",
",",
"hits",
")",
"col",
"=",
"settings",
"[",
"'node_colour'",
"]",
"(",
"calls_frac",
",",
"total_time_frac",
")",
"color",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"round",
"(",
"float",
"(",
"c",
")",
"*",
"255",
")",
")",
"for",
"c",
"in",
"col",
".",
"split",
"(",
")",
"]",
")",
"ret",
".",
"append",
"(",
"'%s,%s,%s,%s,%s,%s,\\'%s\\',%s'",
"%",
"(",
"func",
",",
"func",
",",
"hits",
",",
"calls_frac",
",",
"total_time_frac",
",",
"total_time",
",",
"color",
",",
"math",
".",
"log",
"(",
"hits",
"*",
"10",
")",
")",
")",
"ret",
".",
"append",
"(",
"'edgedef>node1 VARCHAR, node2 VARCHAR, color VARCHAR'",
")",
"for",
"fr_key",
",",
"fr_val",
"in",
"call_dict",
".",
"items",
"(",
")",
":",
"if",
"fr_key",
"==",
"''",
":",
"continue",
"for",
"to_key",
",",
"to_val",
"in",
"fr_val",
".",
"items",
"(",
")",
":",
"calls_frac",
",",
"total_time_frac",
",",
"total_time",
"=",
"_frac_calculation",
"(",
"to_key",
",",
"to_val",
")",
"col",
"=",
"settings",
"[",
"'edge_colour'",
"]",
"(",
"calls_frac",
",",
"total_time_frac",
")",
"color",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"round",
"(",
"float",
"(",
"c",
")",
"*",
"255",
")",
")",
"for",
"c",
"in",
"col",
".",
"split",
"(",
")",
"]",
")",
"ret",
".",
"append",
"(",
"'%s,%s,\\'%s\\''",
"%",
"(",
"fr_key",
",",
"to_key",
",",
"color",
")",
")",
"ret",
"=",
"'\\n'",
".",
"join",
"(",
"ret",
")",
"return",
"ret"
] |
Returns a string containing a GDF file. Setting stop to True will cause
the trace to stop.
|
[
"Returns",
"a",
"string",
"containing",
"a",
"GDF",
"file",
".",
"Setting",
"stop",
"to",
"True",
"will",
"cause",
"the",
"trace",
"to",
"stop",
"."
] |
python
|
train
|
panosl/django-currencies
|
currencies/management/commands/_yahoofinance.py
|
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/management/commands/_yahoofinance.py#L102-L109
|
def get_bulkrates(self):
"""Get & format the rates dict"""
try:
resp = get(self.bulk_url)
resp.raise_for_status()
except exceptions.RequestException as e:
raise RuntimeError(e)
return resp.json()
|
[
"def",
"get_bulkrates",
"(",
"self",
")",
":",
"try",
":",
"resp",
"=",
"get",
"(",
"self",
".",
"bulk_url",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"except",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"raise",
"RuntimeError",
"(",
"e",
")",
"return",
"resp",
".",
"json",
"(",
")"
] |
Get & format the rates dict
|
[
"Get",
"&",
"format",
"the",
"rates",
"dict"
] |
python
|
train
|
Diviyan-Kalainathan/CausalDiscoveryToolbox
|
cdt/causality/pairwise/RCC.py
|
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/RCC.py#L97-L114
|
def fit(self, x, y):
"""Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
"""
train = np.vstack((np.array([self.featurize_row(row.iloc[0],
row.iloc[1]) for idx, row in x.iterrows()]),
np.array([self.featurize_row(row.iloc[1],
row.iloc[0]) for idx, row in x.iterrows()])))
labels = np.vstack((y, -y)).ravel()
verbose = 1 if self.verbose else 0
self.clf = CLF(verbose=verbose,
min_samples_leaf=self.L,
n_estimators=self.E,
max_depth=self.max_depth,
n_jobs=self.n_jobs).fit(train, labels)
|
[
"def",
"fit",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"train",
"=",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"self",
".",
"featurize_row",
"(",
"row",
".",
"iloc",
"[",
"0",
"]",
",",
"row",
".",
"iloc",
"[",
"1",
"]",
")",
"for",
"idx",
",",
"row",
"in",
"x",
".",
"iterrows",
"(",
")",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"self",
".",
"featurize_row",
"(",
"row",
".",
"iloc",
"[",
"1",
"]",
",",
"row",
".",
"iloc",
"[",
"0",
"]",
")",
"for",
"idx",
",",
"row",
"in",
"x",
".",
"iterrows",
"(",
")",
"]",
")",
")",
")",
"labels",
"=",
"np",
".",
"vstack",
"(",
"(",
"y",
",",
"-",
"y",
")",
")",
".",
"ravel",
"(",
")",
"verbose",
"=",
"1",
"if",
"self",
".",
"verbose",
"else",
"0",
"self",
".",
"clf",
"=",
"CLF",
"(",
"verbose",
"=",
"verbose",
",",
"min_samples_leaf",
"=",
"self",
".",
"L",
",",
"n_estimators",
"=",
"self",
".",
"E",
",",
"max_depth",
"=",
"self",
".",
"max_depth",
",",
"n_jobs",
"=",
"self",
".",
"n_jobs",
")",
".",
"fit",
"(",
"train",
",",
"labels",
")"
] |
Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
|
[
"Train",
"the",
"model",
"."
] |
python
|
valid
|
agoragames/chai
|
chai/stub.py
|
https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L271-L277
|
def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy
|
[
"def",
"spy",
"(",
"self",
")",
":",
"spy",
"=",
"Spy",
"(",
"self",
")",
"self",
".",
"_expectations",
".",
"append",
"(",
"spy",
")",
"return",
"spy"
] |
Add a spy to this stub. Return the spy.
|
[
"Add",
"a",
"spy",
"to",
"this",
"stub",
".",
"Return",
"the",
"spy",
"."
] |
python
|
train
|
google/grr
|
grr/server/grr_response_server/aff4.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L3010-L3025
|
def Read(self, length):
"""Read a block of data from the file."""
result = b""
# The total available size in the file
length = int(length)
length = min(length, self.size - self.offset)
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result += data
return result
|
[
"def",
"Read",
"(",
"self",
",",
"length",
")",
":",
"result",
"=",
"b\"\"",
"# The total available size in the file",
"length",
"=",
"int",
"(",
"length",
")",
"length",
"=",
"min",
"(",
"length",
",",
"self",
".",
"size",
"-",
"self",
".",
"offset",
")",
"while",
"length",
">",
"0",
":",
"data",
"=",
"self",
".",
"_ReadPartial",
"(",
"length",
")",
"if",
"not",
"data",
":",
"break",
"length",
"-=",
"len",
"(",
"data",
")",
"result",
"+=",
"data",
"return",
"result"
] |
Read a block of data from the file.
|
[
"Read",
"a",
"block",
"of",
"data",
"from",
"the",
"file",
"."
] |
python
|
train
|
swevm/scaleio-py
|
scaleiopy/api/scaleio/provisioning/volume.py
|
https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/api/scaleio/provisioning/volume.py#L49-L61
|
def is_valid_volsize(self,volsize):
"""
Convenience method that round input to valid ScaleIO Volume size (8GB increments)
:param volsize: Size in MB
:rtype int: Valid ScaleIO Volume size rounded to nearest 8GB increment above or equal to volsize
"""
if type(volsize) is int:
size_temp = divmod(volsize, 8192)
if size_temp[1] > 0: # If not on 8GB boundary
return int((1 + size_temp[0]) * 8192) # Always round to next 8GB increment
else:
return int(volsize)
|
[
"def",
"is_valid_volsize",
"(",
"self",
",",
"volsize",
")",
":",
"if",
"type",
"(",
"volsize",
")",
"is",
"int",
":",
"size_temp",
"=",
"divmod",
"(",
"volsize",
",",
"8192",
")",
"if",
"size_temp",
"[",
"1",
"]",
">",
"0",
":",
"# If not on 8GB boundary",
"return",
"int",
"(",
"(",
"1",
"+",
"size_temp",
"[",
"0",
"]",
")",
"*",
"8192",
")",
"# Always round to next 8GB increment",
"else",
":",
"return",
"int",
"(",
"volsize",
")"
] |
Convenience method that round input to valid ScaleIO Volume size (8GB increments)
:param volsize: Size in MB
:rtype int: Valid ScaleIO Volume size rounded to nearest 8GB increment above or equal to volsize
|
[
"Convenience",
"method",
"that",
"round",
"input",
"to",
"valid",
"ScaleIO",
"Volume",
"size",
"(",
"8GB",
"increments",
")",
":",
"param",
"volsize",
":",
"Size",
"in",
"MB",
":",
"rtype",
"int",
":",
"Valid",
"ScaleIO",
"Volume",
"size",
"rounded",
"to",
"nearest",
"8GB",
"increment",
"above",
"or",
"equal",
"to",
"volsize"
] |
python
|
train
|
a1ezzz/wasp-general
|
wasp_general/command/template_command.py
|
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/command/template_command.py#L84-L97
|
def result_template(self, *command_tokens, **command_env):
""" Generate template result. command_tokens and command_env arguments are used for template
detailing
:param command_tokens: same as command_tokens in :meth:`.WCommandProto.match` and \
:meth:`.WCommandProto.exec` methods (so as :meth:`.WCommand._exec`)
:param command_env: same as command_env in :meth:`.WCommandProto.match` and \
:meth:`.WCommandProto.exec` methods (so as :meth:`.WCommand._exec`)
:return: WCommandResultTemplate
"""
result = WCommandResultTemplate(self.template())
result.update_context(**self.template_context())
return result
|
[
"def",
"result_template",
"(",
"self",
",",
"*",
"command_tokens",
",",
"*",
"*",
"command_env",
")",
":",
"result",
"=",
"WCommandResultTemplate",
"(",
"self",
".",
"template",
"(",
")",
")",
"result",
".",
"update_context",
"(",
"*",
"*",
"self",
".",
"template_context",
"(",
")",
")",
"return",
"result"
] |
Generate template result. command_tokens and command_env arguments are used for template
detailing
:param command_tokens: same as command_tokens in :meth:`.WCommandProto.match` and \
:meth:`.WCommandProto.exec` methods (so as :meth:`.WCommand._exec`)
:param command_env: same as command_env in :meth:`.WCommandProto.match` and \
:meth:`.WCommandProto.exec` methods (so as :meth:`.WCommand._exec`)
:return: WCommandResultTemplate
|
[
"Generate",
"template",
"result",
".",
"command_tokens",
"and",
"command_env",
"arguments",
"are",
"used",
"for",
"template",
"detailing"
] |
python
|
train
|
teepark/junction
|
junction/hub.py
|
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/hub.py#L265-L302
|
def send_rpc(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args:
The positional arguments to send along with the request. If the
first argument is a generator, the request will be sent in chunks
:ref:`(more info) <chunked-messages>`.
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
'''
rpc = self._dispatcher.send_rpc(service, routing_id, method,
args or (), kwargs or {}, not broadcast)
if not rpc:
raise errors.Unroutable()
return rpc
|
[
"def",
"send_rpc",
"(",
"self",
",",
"service",
",",
"routing_id",
",",
"method",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"broadcast",
"=",
"False",
")",
":",
"rpc",
"=",
"self",
".",
"_dispatcher",
".",
"send_rpc",
"(",
"service",
",",
"routing_id",
",",
"method",
",",
"args",
"or",
"(",
")",
",",
"kwargs",
"or",
"{",
"}",
",",
"not",
"broadcast",
")",
"if",
"not",
"rpc",
":",
"raise",
"errors",
".",
"Unroutable",
"(",
")",
"return",
"rpc"
] |
Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args:
The positional arguments to send along with the request. If the
first argument is a generator, the request will be sent in chunks
:ref:`(more info) <chunked-messages>`.
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
|
[
"Send",
"out",
"an",
"RPC",
"request"
] |
python
|
train
|
codemaniac/graphgit
|
graphgit/core.py
|
https://github.com/codemaniac/graphgit/blob/fd25d990ea1a32f328db07e4f57feafe03bd20a1/graphgit/core.py#L18-L96
|
def graph_repo(repo_url, output_loc, format='graphml'):
""" generates a graph for a git repository """
log = logging.getLogger("graphgit")
# repo type
local_repo = os.path.isabs(repo_url)
# repo name
repo_name = repo_url[repo_url.rfind('/')+1:repo_url.rfind('.git')] \
if not local_repo else repo_url[repo_url.rfind(os.sep)+1:]
log.info ("Processing git repository: %s" % repo_name)
# repo location
repo_loc = os.path.join(constants.REPO_DOWNLOAD_LOCATION, repo_name) \
if not local_repo else repo_url
# initialize repo
repo = None
gitt = git.Git()
try:
# check if repo is already cloned
# if local repo exists, assign
repo = git.Repo(repo_loc, odbt=git.GitCmdObjectDB)
log.info( "Repository already cloned... Going ahead and using it..." )
# TODO: check if repo is dirty and if so, update
except git.exc.NoSuchPathError:
# local repo doesn't exist. clone
try:
if local_repo:
raise Exception
log.info( "Cloning repository... this might take some time, please wait !" )
gitt.clone(repo_url, repo_loc)
log.info( "Git clone completed..." )
repo = git.Repo(repo_loc, odbt=git.GitCmdObjectDB)
except:
log.error( "Could not obtain repository: %s !" % repo_url )
sys.exit(1)
if repo is None:
log.error( "Could not obtain repository: %s !" % repo_url )
sys.exit(1)
# create a graph for the repo
G = nx.DiGraph()
# root node
G.add_node(repo_name, type=constants.NODE_TYPE_VALS['REPOSITORY'])
# branches & commits
for branch in repo.branches:
log.debug ("Processing branch %s" % branch)
G.add_node(branch, type=constants.NODE_TYPE_VALS['BRANCH'])
G.add_edge(repo_name, branch,
label=constants.EDGE_LABEL_VALS['REPOSITORY_BRANCH'])
for commit in repo.iter_commits(branch):
try:
author = safe_str(commit.author)
ts = commit.committed_date
sha = safe_str(commit)
log.debug ("%s> %s --[commit]--> %s" % (branch, author, sha))
G.add_node(author, type=constants.NODE_TYPE_VALS['PERSON'])
G.add_node(sha, ts=ts,
type=constants.NODE_TYPE_VALS['COMMIT'])
G.add_edge(author, sha,
label=constants.EDGE_LABEL_VALS['PERSON_COMMIT'])
G.add_edge(branch, sha,
label=constants.EDGE_LABEL_VALS['BRANCH_COMMIT'])
except LookupError:
log.warning('Could not process %s !' % commit)
continue
log.info( "Graph built ! saving..." )
# save graph
output_file_name = '%s.%s' % (repo_name, format)
output_file_loc = os.path.join(output_loc, output_file_name)
if format == 'graphml':
nx.write_graphml(G, output_file_loc, encoding='utf-8')
elif format == 'gexf':
nx.write_gexf(G, output_file_loc, encoding='utf-8')
else:
log.error( "Invalid output format: %s !" % format )
sys.exit(1)
log.info( "Saved to %s !" % output_file_loc )
|
[
"def",
"graph_repo",
"(",
"repo_url",
",",
"output_loc",
",",
"format",
"=",
"'graphml'",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"\"graphgit\"",
")",
"# repo type",
"local_repo",
"=",
"os",
".",
"path",
".",
"isabs",
"(",
"repo_url",
")",
"# repo name",
"repo_name",
"=",
"repo_url",
"[",
"repo_url",
".",
"rfind",
"(",
"'/'",
")",
"+",
"1",
":",
"repo_url",
".",
"rfind",
"(",
"'.git'",
")",
"]",
"if",
"not",
"local_repo",
"else",
"repo_url",
"[",
"repo_url",
".",
"rfind",
"(",
"os",
".",
"sep",
")",
"+",
"1",
":",
"]",
"log",
".",
"info",
"(",
"\"Processing git repository: %s\"",
"%",
"repo_name",
")",
"# repo location",
"repo_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"constants",
".",
"REPO_DOWNLOAD_LOCATION",
",",
"repo_name",
")",
"if",
"not",
"local_repo",
"else",
"repo_url",
"# initialize repo",
"repo",
"=",
"None",
"gitt",
"=",
"git",
".",
"Git",
"(",
")",
"try",
":",
"# check if repo is already cloned",
"# if local repo exists, assign",
"repo",
"=",
"git",
".",
"Repo",
"(",
"repo_loc",
",",
"odbt",
"=",
"git",
".",
"GitCmdObjectDB",
")",
"log",
".",
"info",
"(",
"\"Repository already cloned... Going ahead and using it...\"",
")",
"# TODO: check if repo is dirty and if so, update",
"except",
"git",
".",
"exc",
".",
"NoSuchPathError",
":",
"# local repo doesn't exist. clone",
"try",
":",
"if",
"local_repo",
":",
"raise",
"Exception",
"log",
".",
"info",
"(",
"\"Cloning repository... this might take some time, please wait !\"",
")",
"gitt",
".",
"clone",
"(",
"repo_url",
",",
"repo_loc",
")",
"log",
".",
"info",
"(",
"\"Git clone completed...\"",
")",
"repo",
"=",
"git",
".",
"Repo",
"(",
"repo_loc",
",",
"odbt",
"=",
"git",
".",
"GitCmdObjectDB",
")",
"except",
":",
"log",
".",
"error",
"(",
"\"Could not obtain repository: %s !\"",
"%",
"repo_url",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"repo",
"is",
"None",
":",
"log",
".",
"error",
"(",
"\"Could not obtain repository: %s !\"",
"%",
"repo_url",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# create a graph for the repo",
"G",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"# root node",
"G",
".",
"add_node",
"(",
"repo_name",
",",
"type",
"=",
"constants",
".",
"NODE_TYPE_VALS",
"[",
"'REPOSITORY'",
"]",
")",
"# branches & commits",
"for",
"branch",
"in",
"repo",
".",
"branches",
":",
"log",
".",
"debug",
"(",
"\"Processing branch %s\"",
"%",
"branch",
")",
"G",
".",
"add_node",
"(",
"branch",
",",
"type",
"=",
"constants",
".",
"NODE_TYPE_VALS",
"[",
"'BRANCH'",
"]",
")",
"G",
".",
"add_edge",
"(",
"repo_name",
",",
"branch",
",",
"label",
"=",
"constants",
".",
"EDGE_LABEL_VALS",
"[",
"'REPOSITORY_BRANCH'",
"]",
")",
"for",
"commit",
"in",
"repo",
".",
"iter_commits",
"(",
"branch",
")",
":",
"try",
":",
"author",
"=",
"safe_str",
"(",
"commit",
".",
"author",
")",
"ts",
"=",
"commit",
".",
"committed_date",
"sha",
"=",
"safe_str",
"(",
"commit",
")",
"log",
".",
"debug",
"(",
"\"%s> %s --[commit]--> %s\"",
"%",
"(",
"branch",
",",
"author",
",",
"sha",
")",
")",
"G",
".",
"add_node",
"(",
"author",
",",
"type",
"=",
"constants",
".",
"NODE_TYPE_VALS",
"[",
"'PERSON'",
"]",
")",
"G",
".",
"add_node",
"(",
"sha",
",",
"ts",
"=",
"ts",
",",
"type",
"=",
"constants",
".",
"NODE_TYPE_VALS",
"[",
"'COMMIT'",
"]",
")",
"G",
".",
"add_edge",
"(",
"author",
",",
"sha",
",",
"label",
"=",
"constants",
".",
"EDGE_LABEL_VALS",
"[",
"'PERSON_COMMIT'",
"]",
")",
"G",
".",
"add_edge",
"(",
"branch",
",",
"sha",
",",
"label",
"=",
"constants",
".",
"EDGE_LABEL_VALS",
"[",
"'BRANCH_COMMIT'",
"]",
")",
"except",
"LookupError",
":",
"log",
".",
"warning",
"(",
"'Could not process %s !'",
"%",
"commit",
")",
"continue",
"log",
".",
"info",
"(",
"\"Graph built ! saving...\"",
")",
"# save graph",
"output_file_name",
"=",
"'%s.%s'",
"%",
"(",
"repo_name",
",",
"format",
")",
"output_file_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_loc",
",",
"output_file_name",
")",
"if",
"format",
"==",
"'graphml'",
":",
"nx",
".",
"write_graphml",
"(",
"G",
",",
"output_file_loc",
",",
"encoding",
"=",
"'utf-8'",
")",
"elif",
"format",
"==",
"'gexf'",
":",
"nx",
".",
"write_gexf",
"(",
"G",
",",
"output_file_loc",
",",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Invalid output format: %s !\"",
"%",
"format",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"log",
".",
"info",
"(",
"\"Saved to %s !\"",
"%",
"output_file_loc",
")"
] |
generates a graph for a git repository
|
[
"generates",
"a",
"graph",
"for",
"a",
"git",
"repository"
] |
python
|
train
|
saltstack/salt
|
salt/modules/status.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/status.py#L1254-L1431
|
def netdev():
'''
.. versionchanged:: 2016.3.2
Return the network device stats for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.netdev
'''
def linux_netdev():
'''
linux specific implementation of netdev
'''
ret = {}
try:
with salt.utils.files.fopen('/proc/net/dev', 'r') as fp_:
stats = salt.utils.stringutils.to_unicode(fp_.read())
except IOError:
pass
else:
for line in stats.splitlines():
if not line:
continue
if line.find(':') < 0:
continue
comps = line.split()
# Fix lines like eth0:9999..'
comps[0] = line.split(':')[0].strip()
# Support lines both like eth0:999 and eth0: 9999
comps.insert(1, line.split(':')[1].strip().split()[0])
ret[comps[0]] = {'iface': comps[0],
'rx_bytes': _number(comps[2]),
'rx_compressed': _number(comps[8]),
'rx_drop': _number(comps[5]),
'rx_errs': _number(comps[4]),
'rx_fifo': _number(comps[6]),
'rx_frame': _number(comps[7]),
'rx_multicast': _number(comps[9]),
'rx_packets': _number(comps[3]),
'tx_bytes': _number(comps[10]),
'tx_carrier': _number(comps[16]),
'tx_colls': _number(comps[15]),
'tx_compressed': _number(comps[17]),
'tx_drop': _number(comps[13]),
'tx_errs': _number(comps[12]),
'tx_fifo': _number(comps[14]),
'tx_packets': _number(comps[11])}
return ret
def freebsd_netdev():
'''
freebsd specific implementation of netdev
'''
_dict_tree = lambda: collections.defaultdict(_dict_tree)
ret = _dict_tree()
netstat = __salt__['cmd.run']('netstat -i -n -4 -b -d').splitlines()
netstat += __salt__['cmd.run']('netstat -i -n -6 -b -d').splitlines()[1:]
header = netstat[0].split()
for line in netstat[1:]:
comps = line.split()
for i in range(4, 13): # The columns we want
ret[comps[0]][comps[2]][comps[3]][header[i]] = _number(comps[i])
return ret
def sunos_netdev():
'''
sunos specific implementation of netdev
'''
ret = {}
##NOTE: we cannot use hwaddr_interfaces here, so we grab both ip4 and ip6
for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces']:
# fetch device info
netstat_ipv4 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet'.format(dev=dev)).splitlines()
netstat_ipv6 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet6'.format(dev=dev)).splitlines()
# prepare data
netstat_ipv4[0] = netstat_ipv4[0].split()
netstat_ipv4[1] = netstat_ipv4[1].split()
netstat_ipv6[0] = netstat_ipv6[0].split()
netstat_ipv6[1] = netstat_ipv6[1].split()
# add data
ret[dev] = {}
for i in range(len(netstat_ipv4[0])-1):
if netstat_ipv4[0][i] == 'Name':
continue
if netstat_ipv4[0][i] in ['Address', 'Net/Dest']:
ret[dev]['IPv4 {field}'.format(field=netstat_ipv4[0][i])] = netstat_ipv4[1][i]
else:
ret[dev][netstat_ipv4[0][i]] = _number(netstat_ipv4[1][i])
for i in range(len(netstat_ipv6[0])-1):
if netstat_ipv6[0][i] == 'Name':
continue
if netstat_ipv6[0][i] in ['Address', 'Net/Dest']:
ret[dev]['IPv6 {field}'.format(field=netstat_ipv6[0][i])] = netstat_ipv6[1][i]
else:
ret[dev][netstat_ipv6[0][i]] = _number(netstat_ipv6[1][i])
return ret
def aix_netdev():
'''
AIX specific implementation of netdev
'''
ret = {}
fields = []
procn = None
for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces'].keys():
# fetch device info
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
#en0 1500 link#3 e2.eb.32.42.84.c 10029668 0 446490 0 0
#en0 1500 172.29.128 172.29.149.95 10029668 0 446490 0 0
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
#en0 1500 link#3 e2.eb.32.42.84.c 10029731 0 446499 0 0
netstat_ipv4 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet'.format(dev=dev)).splitlines()
netstat_ipv6 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet6'.format(dev=dev)).splitlines()
# add data
ret[dev] = []
for line in netstat_ipv4:
if line.startswith('Name'):
fields = line.split()
continue
comps = line.split()
if len(comps) < 3:
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
if comps[2].startswith('link'):
continue
procn = len(ret[dev])
ret[dev].append({})
ret[dev][procn]['ipv4'] = {}
for i in range(1, len(fields)):
if len(comps) > i:
ret[dev][procn]['ipv4'][fields[i]] = comps[i]
for line in netstat_ipv6:
if line.startswith('Name'):
fields = line.split()
continue
comps = line.split()
if len(comps) < 3:
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
if comps[2].startswith('link'):
continue
procn = len(ret[dev])
ret[dev].append({})
ret[dev][procn]['ipv6'] = {}
for i in range(1, len(fields)):
if len(comps) > i:
ret[dev][procn]['ipv6'][fields[i]] = comps[i]
return ret
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_netdev,
'FreeBSD': freebsd_netdev,
'SunOS': sunos_netdev,
'AIX': aix_netdev,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
|
[
"def",
"netdev",
"(",
")",
":",
"def",
"linux_netdev",
"(",
")",
":",
"'''\n linux specific implementation of netdev\n '''",
"ret",
"=",
"{",
"}",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'/proc/net/dev'",
",",
"'r'",
")",
"as",
"fp_",
":",
"stats",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"fp_",
".",
"read",
"(",
")",
")",
"except",
"IOError",
":",
"pass",
"else",
":",
"for",
"line",
"in",
"stats",
".",
"splitlines",
"(",
")",
":",
"if",
"not",
"line",
":",
"continue",
"if",
"line",
".",
"find",
"(",
"':'",
")",
"<",
"0",
":",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"# Fix lines like eth0:9999..'",
"comps",
"[",
"0",
"]",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"# Support lines both like eth0:999 and eth0: 9999",
"comps",
".",
"insert",
"(",
"1",
",",
"line",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"ret",
"[",
"comps",
"[",
"0",
"]",
"]",
"=",
"{",
"'iface'",
":",
"comps",
"[",
"0",
"]",
",",
"'rx_bytes'",
":",
"_number",
"(",
"comps",
"[",
"2",
"]",
")",
",",
"'rx_compressed'",
":",
"_number",
"(",
"comps",
"[",
"8",
"]",
")",
",",
"'rx_drop'",
":",
"_number",
"(",
"comps",
"[",
"5",
"]",
")",
",",
"'rx_errs'",
":",
"_number",
"(",
"comps",
"[",
"4",
"]",
")",
",",
"'rx_fifo'",
":",
"_number",
"(",
"comps",
"[",
"6",
"]",
")",
",",
"'rx_frame'",
":",
"_number",
"(",
"comps",
"[",
"7",
"]",
")",
",",
"'rx_multicast'",
":",
"_number",
"(",
"comps",
"[",
"9",
"]",
")",
",",
"'rx_packets'",
":",
"_number",
"(",
"comps",
"[",
"3",
"]",
")",
",",
"'tx_bytes'",
":",
"_number",
"(",
"comps",
"[",
"10",
"]",
")",
",",
"'tx_carrier'",
":",
"_number",
"(",
"comps",
"[",
"16",
"]",
")",
",",
"'tx_colls'",
":",
"_number",
"(",
"comps",
"[",
"15",
"]",
")",
",",
"'tx_compressed'",
":",
"_number",
"(",
"comps",
"[",
"17",
"]",
")",
",",
"'tx_drop'",
":",
"_number",
"(",
"comps",
"[",
"13",
"]",
")",
",",
"'tx_errs'",
":",
"_number",
"(",
"comps",
"[",
"12",
"]",
")",
",",
"'tx_fifo'",
":",
"_number",
"(",
"comps",
"[",
"14",
"]",
")",
",",
"'tx_packets'",
":",
"_number",
"(",
"comps",
"[",
"11",
"]",
")",
"}",
"return",
"ret",
"def",
"freebsd_netdev",
"(",
")",
":",
"'''\n freebsd specific implementation of netdev\n '''",
"_dict_tree",
"=",
"lambda",
":",
"collections",
".",
"defaultdict",
"(",
"_dict_tree",
")",
"ret",
"=",
"_dict_tree",
"(",
")",
"netstat",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'netstat -i -n -4 -b -d'",
")",
".",
"splitlines",
"(",
")",
"netstat",
"+=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'netstat -i -n -6 -b -d'",
")",
".",
"splitlines",
"(",
")",
"[",
"1",
":",
"]",
"header",
"=",
"netstat",
"[",
"0",
"]",
".",
"split",
"(",
")",
"for",
"line",
"in",
"netstat",
"[",
"1",
":",
"]",
":",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"4",
",",
"13",
")",
":",
"# The columns we want",
"ret",
"[",
"comps",
"[",
"0",
"]",
"]",
"[",
"comps",
"[",
"2",
"]",
"]",
"[",
"comps",
"[",
"3",
"]",
"]",
"[",
"header",
"[",
"i",
"]",
"]",
"=",
"_number",
"(",
"comps",
"[",
"i",
"]",
")",
"return",
"ret",
"def",
"sunos_netdev",
"(",
")",
":",
"'''\n sunos specific implementation of netdev\n '''",
"ret",
"=",
"{",
"}",
"##NOTE: we cannot use hwaddr_interfaces here, so we grab both ip4 and ip6",
"for",
"dev",
"in",
"__grains__",
"[",
"'ip4_interfaces'",
"]",
".",
"keys",
"(",
")",
"+",
"__grains__",
"[",
"'ip6_interfaces'",
"]",
":",
"# fetch device info",
"netstat_ipv4",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'netstat -i -I {dev} -n -f inet'",
".",
"format",
"(",
"dev",
"=",
"dev",
")",
")",
".",
"splitlines",
"(",
")",
"netstat_ipv6",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'netstat -i -I {dev} -n -f inet6'",
".",
"format",
"(",
"dev",
"=",
"dev",
")",
")",
".",
"splitlines",
"(",
")",
"# prepare data",
"netstat_ipv4",
"[",
"0",
"]",
"=",
"netstat_ipv4",
"[",
"0",
"]",
".",
"split",
"(",
")",
"netstat_ipv4",
"[",
"1",
"]",
"=",
"netstat_ipv4",
"[",
"1",
"]",
".",
"split",
"(",
")",
"netstat_ipv6",
"[",
"0",
"]",
"=",
"netstat_ipv6",
"[",
"0",
"]",
".",
"split",
"(",
")",
"netstat_ipv6",
"[",
"1",
"]",
"=",
"netstat_ipv6",
"[",
"1",
"]",
".",
"split",
"(",
")",
"# add data",
"ret",
"[",
"dev",
"]",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"netstat_ipv4",
"[",
"0",
"]",
")",
"-",
"1",
")",
":",
"if",
"netstat_ipv4",
"[",
"0",
"]",
"[",
"i",
"]",
"==",
"'Name'",
":",
"continue",
"if",
"netstat_ipv4",
"[",
"0",
"]",
"[",
"i",
"]",
"in",
"[",
"'Address'",
",",
"'Net/Dest'",
"]",
":",
"ret",
"[",
"dev",
"]",
"[",
"'IPv4 {field}'",
".",
"format",
"(",
"field",
"=",
"netstat_ipv4",
"[",
"0",
"]",
"[",
"i",
"]",
")",
"]",
"=",
"netstat_ipv4",
"[",
"1",
"]",
"[",
"i",
"]",
"else",
":",
"ret",
"[",
"dev",
"]",
"[",
"netstat_ipv4",
"[",
"0",
"]",
"[",
"i",
"]",
"]",
"=",
"_number",
"(",
"netstat_ipv4",
"[",
"1",
"]",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"netstat_ipv6",
"[",
"0",
"]",
")",
"-",
"1",
")",
":",
"if",
"netstat_ipv6",
"[",
"0",
"]",
"[",
"i",
"]",
"==",
"'Name'",
":",
"continue",
"if",
"netstat_ipv6",
"[",
"0",
"]",
"[",
"i",
"]",
"in",
"[",
"'Address'",
",",
"'Net/Dest'",
"]",
":",
"ret",
"[",
"dev",
"]",
"[",
"'IPv6 {field}'",
".",
"format",
"(",
"field",
"=",
"netstat_ipv6",
"[",
"0",
"]",
"[",
"i",
"]",
")",
"]",
"=",
"netstat_ipv6",
"[",
"1",
"]",
"[",
"i",
"]",
"else",
":",
"ret",
"[",
"dev",
"]",
"[",
"netstat_ipv6",
"[",
"0",
"]",
"[",
"i",
"]",
"]",
"=",
"_number",
"(",
"netstat_ipv6",
"[",
"1",
"]",
"[",
"i",
"]",
")",
"return",
"ret",
"def",
"aix_netdev",
"(",
")",
":",
"'''\n AIX specific implementation of netdev\n '''",
"ret",
"=",
"{",
"}",
"fields",
"=",
"[",
"]",
"procn",
"=",
"None",
"for",
"dev",
"in",
"__grains__",
"[",
"'ip4_interfaces'",
"]",
".",
"keys",
"(",
")",
"+",
"__grains__",
"[",
"'ip6_interfaces'",
"]",
".",
"keys",
"(",
")",
":",
"# fetch device info",
"#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6",
"#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll",
"#en0 1500 link#3 e2.eb.32.42.84.c 10029668 0 446490 0 0",
"#en0 1500 172.29.128 172.29.149.95 10029668 0 446490 0 0",
"#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6",
"#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll",
"#en0 1500 link#3 e2.eb.32.42.84.c 10029731 0 446499 0 0",
"netstat_ipv4",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'netstat -i -n -I {dev} -f inet'",
".",
"format",
"(",
"dev",
"=",
"dev",
")",
")",
".",
"splitlines",
"(",
")",
"netstat_ipv6",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'netstat -i -n -I {dev} -f inet6'",
".",
"format",
"(",
"dev",
"=",
"dev",
")",
")",
".",
"splitlines",
"(",
")",
"# add data",
"ret",
"[",
"dev",
"]",
"=",
"[",
"]",
"for",
"line",
"in",
"netstat_ipv4",
":",
"if",
"line",
".",
"startswith",
"(",
"'Name'",
")",
":",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"comps",
")",
"<",
"3",
":",
"raise",
"CommandExecutionError",
"(",
"'Insufficent data returned by command to process \\'{0}\\''",
".",
"format",
"(",
"line",
")",
")",
"if",
"comps",
"[",
"2",
"]",
".",
"startswith",
"(",
"'link'",
")",
":",
"continue",
"procn",
"=",
"len",
"(",
"ret",
"[",
"dev",
"]",
")",
"ret",
"[",
"dev",
"]",
".",
"append",
"(",
"{",
"}",
")",
"ret",
"[",
"dev",
"]",
"[",
"procn",
"]",
"[",
"'ipv4'",
"]",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"fields",
")",
")",
":",
"if",
"len",
"(",
"comps",
")",
">",
"i",
":",
"ret",
"[",
"dev",
"]",
"[",
"procn",
"]",
"[",
"'ipv4'",
"]",
"[",
"fields",
"[",
"i",
"]",
"]",
"=",
"comps",
"[",
"i",
"]",
"for",
"line",
"in",
"netstat_ipv6",
":",
"if",
"line",
".",
"startswith",
"(",
"'Name'",
")",
":",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"comps",
")",
"<",
"3",
":",
"raise",
"CommandExecutionError",
"(",
"'Insufficent data returned by command to process \\'{0}\\''",
".",
"format",
"(",
"line",
")",
")",
"if",
"comps",
"[",
"2",
"]",
".",
"startswith",
"(",
"'link'",
")",
":",
"continue",
"procn",
"=",
"len",
"(",
"ret",
"[",
"dev",
"]",
")",
"ret",
"[",
"dev",
"]",
".",
"append",
"(",
"{",
"}",
")",
"ret",
"[",
"dev",
"]",
"[",
"procn",
"]",
"[",
"'ipv6'",
"]",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"fields",
")",
")",
":",
"if",
"len",
"(",
"comps",
")",
">",
"i",
":",
"ret",
"[",
"dev",
"]",
"[",
"procn",
"]",
"[",
"'ipv6'",
"]",
"[",
"fields",
"[",
"i",
"]",
"]",
"=",
"comps",
"[",
"i",
"]",
"return",
"ret",
"# dict that returns a function that does the right thing per platform",
"get_version",
"=",
"{",
"'Linux'",
":",
"linux_netdev",
",",
"'FreeBSD'",
":",
"freebsd_netdev",
",",
"'SunOS'",
":",
"sunos_netdev",
",",
"'AIX'",
":",
"aix_netdev",
",",
"}",
"errmsg",
"=",
"'This method is unsupported on the current operating system!'",
"return",
"get_version",
".",
"get",
"(",
"__grains__",
"[",
"'kernel'",
"]",
",",
"lambda",
":",
"errmsg",
")",
"(",
")"
] |
.. versionchanged:: 2016.3.2
Return the network device stats for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.netdev
|
[
"..",
"versionchanged",
"::",
"2016",
".",
"3",
".",
"2",
"Return",
"the",
"network",
"device",
"stats",
"for",
"this",
"minion"
] |
python
|
train
|
openstates/billy
|
billy/models/bills.py
|
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/models/bills.py#L291-L300
|
def is_probably_a_voice_vote(self):
'''Guess whether this vote is a "voice vote".'''
if '+voice_vote' in self:
return True
if '+vote_type' in self:
if self['+vote_type'] == 'Voice':
return True
if 'voice vote' in self['motion'].lower():
return True
return False
|
[
"def",
"is_probably_a_voice_vote",
"(",
"self",
")",
":",
"if",
"'+voice_vote'",
"in",
"self",
":",
"return",
"True",
"if",
"'+vote_type'",
"in",
"self",
":",
"if",
"self",
"[",
"'+vote_type'",
"]",
"==",
"'Voice'",
":",
"return",
"True",
"if",
"'voice vote'",
"in",
"self",
"[",
"'motion'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"True",
"return",
"False"
] |
Guess whether this vote is a "voice vote".
|
[
"Guess",
"whether",
"this",
"vote",
"is",
"a",
"voice",
"vote",
"."
] |
python
|
train
|
xiaocong/uiautomator
|
uiautomator/__init__.py
|
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L767-L788
|
def press(self):
'''
press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode
'''
@param_to_property(
key=["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
)
def _press(key, meta=None):
if isinstance(key, int):
return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key)
else:
return self.server.jsonrpc.pressKey(str(key))
return _press
|
[
"def",
"press",
"(",
"self",
")",
":",
"@",
"param_to_property",
"(",
"key",
"=",
"[",
"\"home\"",
",",
"\"back\"",
",",
"\"left\"",
",",
"\"right\"",
",",
"\"up\"",
",",
"\"down\"",
",",
"\"center\"",
",",
"\"menu\"",
",",
"\"search\"",
",",
"\"enter\"",
",",
"\"delete\"",
",",
"\"del\"",
",",
"\"recent\"",
",",
"\"volume_up\"",
",",
"\"volume_down\"",
",",
"\"volume_mute\"",
",",
"\"camera\"",
",",
"\"power\"",
"]",
")",
"def",
"_press",
"(",
"key",
",",
"meta",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"self",
".",
"server",
".",
"jsonrpc",
".",
"pressKeyCode",
"(",
"key",
",",
"meta",
")",
"if",
"meta",
"else",
"self",
".",
"server",
".",
"jsonrpc",
".",
"pressKeyCode",
"(",
"key",
")",
"else",
":",
"return",
"self",
".",
"server",
".",
"jsonrpc",
".",
"pressKey",
"(",
"str",
"(",
"key",
")",
")",
"return",
"_press"
] |
press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode
|
[
"press",
"key",
"via",
"name",
"or",
"key",
"code",
".",
"Supported",
"key",
"name",
"includes",
":",
"home",
"back",
"left",
"right",
"up",
"down",
"center",
"menu",
"search",
"enter",
"delete",
"(",
"or",
"del",
")",
"recent",
"(",
"recent",
"apps",
")",
"volume_up",
"volume_down",
"volume_mute",
"camera",
"power",
".",
"Usage",
":",
"d",
".",
"press",
".",
"back",
"()",
"#",
"press",
"back",
"key",
"d",
".",
"press",
".",
"menu",
"()",
"#",
"press",
"home",
"key",
"d",
".",
"press",
"(",
"89",
")",
"#",
"press",
"keycode"
] |
python
|
train
|
alfred82santa/dirty-loader
|
dirty_loader/__init__.py
|
https://github.com/alfred82santa/dirty-loader/blob/0d7895e3c84a0c197d804ce31305c5cba4c512e4/dirty_loader/__init__.py#L81-L99
|
def load_class(self, classname):
"""
Loads a class looking for it in each module registered.
:param classname: Class name you want to load.
:type classname: str
:return: Class object
:rtype: type
"""
module_list = self._get_module_list()
for module in module_list:
try:
return import_class(classname, module.__name__)
except (AttributeError, ImportError):
pass
raise ImportError("Class '{0}' could not be loaded.".format(classname))
|
[
"def",
"load_class",
"(",
"self",
",",
"classname",
")",
":",
"module_list",
"=",
"self",
".",
"_get_module_list",
"(",
")",
"for",
"module",
"in",
"module_list",
":",
"try",
":",
"return",
"import_class",
"(",
"classname",
",",
"module",
".",
"__name__",
")",
"except",
"(",
"AttributeError",
",",
"ImportError",
")",
":",
"pass",
"raise",
"ImportError",
"(",
"\"Class '{0}' could not be loaded.\"",
".",
"format",
"(",
"classname",
")",
")"
] |
Loads a class looking for it in each module registered.
:param classname: Class name you want to load.
:type classname: str
:return: Class object
:rtype: type
|
[
"Loads",
"a",
"class",
"looking",
"for",
"it",
"in",
"each",
"module",
"registered",
"."
] |
python
|
train
|
taskcluster/taskcluster-client.py
|
taskcluster/aio/ec2manager.py
|
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L36-L47
|
async def listWorkerTypes(self, *args, **kwargs):
"""
See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``v1/list-worker-types.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
|
[
"async",
"def",
"listWorkerTypes",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"listWorkerTypes\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``v1/list-worker-types.json#``
This method is ``experimental``
|
[
"See",
"the",
"list",
"of",
"worker",
"types",
"which",
"are",
"known",
"to",
"be",
"managed"
] |
python
|
train
|
sbg/sevenbridges-python
|
sevenbridges/meta/transformer.py
|
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/meta/transformer.py#L38-L51
|
def to_task(task):
"""Serializes task to id string
:param task: object to serialize
:return: string id
"""
from sevenbridges.models.task import Task
if not task:
raise SbgError('Task is required!')
elif isinstance(task, Task):
return task.id
elif isinstance(task, six.string_types):
return task
else:
raise SbgError('Invalid task parameter!')
|
[
"def",
"to_task",
"(",
"task",
")",
":",
"from",
"sevenbridges",
".",
"models",
".",
"task",
"import",
"Task",
"if",
"not",
"task",
":",
"raise",
"SbgError",
"(",
"'Task is required!'",
")",
"elif",
"isinstance",
"(",
"task",
",",
"Task",
")",
":",
"return",
"task",
".",
"id",
"elif",
"isinstance",
"(",
"task",
",",
"six",
".",
"string_types",
")",
":",
"return",
"task",
"else",
":",
"raise",
"SbgError",
"(",
"'Invalid task parameter!'",
")"
] |
Serializes task to id string
:param task: object to serialize
:return: string id
|
[
"Serializes",
"task",
"to",
"id",
"string",
":",
"param",
"task",
":",
"object",
"to",
"serialize",
":",
"return",
":",
"string",
"id"
] |
python
|
train
|
rwl/pylon
|
pylon/solver.py
|
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L726-L732
|
def _consfcn(self, x):
""" Evaluates nonlinear constraints and their Jacobian for OPF.
"""
h, g = self._gh(x)
dh, dg = self._dgh(x)
return h, g, dh, dg
|
[
"def",
"_consfcn",
"(",
"self",
",",
"x",
")",
":",
"h",
",",
"g",
"=",
"self",
".",
"_gh",
"(",
"x",
")",
"dh",
",",
"dg",
"=",
"self",
".",
"_dgh",
"(",
"x",
")",
"return",
"h",
",",
"g",
",",
"dh",
",",
"dg"
] |
Evaluates nonlinear constraints and their Jacobian for OPF.
|
[
"Evaluates",
"nonlinear",
"constraints",
"and",
"their",
"Jacobian",
"for",
"OPF",
"."
] |
python
|
train
|
aholkner/bacon
|
native/Vendor/FreeType/src/tools/docmaker/tohtml.py
|
https://github.com/aholkner/bacon/blob/edf3810dcb211942d392a8637945871399b0650d/native/Vendor/FreeType/src/tools/docmaker/tohtml.py#L311-L320
|
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
else:
lines.append( self.make_html_para( item.words ) )
return string.join( lines, '\n' )
|
[
"def",
"make_html_items",
"(",
"self",
",",
"items",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"item",
".",
"lines",
":",
"lines",
".",
"append",
"(",
"self",
".",
"make_html_code",
"(",
"item",
".",
"lines",
")",
")",
"else",
":",
"lines",
".",
"append",
"(",
"self",
".",
"make_html_para",
"(",
"item",
".",
"words",
")",
")",
"return",
"string",
".",
"join",
"(",
"lines",
",",
"'\\n'",
")"
] |
convert a field's content into some valid HTML
|
[
"convert",
"a",
"field",
"s",
"content",
"into",
"some",
"valid",
"HTML"
] |
python
|
test
|
hermanschaaf/mafan
|
mafan/hanzidentifier/hanzidentifier.py
|
https://github.com/hermanschaaf/mafan/blob/373ddf299aeb2bd8413bf921c71768af7a8170ea/mafan/hanzidentifier/hanzidentifier.py#L38-L60
|
def identify(text):
"""Identify whether a string is simplified or traditional Chinese.
Returns:
None: if there are no recognizd Chinese characters.
EITHER: if the test is inconclusive.
TRAD: if the text is traditional.
SIMP: if the text is simplified.
BOTH: the text has characters recognized as being solely traditional
and other characters recognized as being solely simplified.
"""
filtered_text = set(list(text)).intersection(ALL_CHARS)
if len(filtered_text) is 0:
return None
if filtered_text.issubset(SHARED_CHARS):
return EITHER
if filtered_text.issubset(TRAD_CHARS):
return TRAD
if filtered_text.issubset(SIMP_CHARS):
return SIMP
if filtered_text.difference(TRAD_CHARS).issubset(SIMP_CHARS):
return BOTH
|
[
"def",
"identify",
"(",
"text",
")",
":",
"filtered_text",
"=",
"set",
"(",
"list",
"(",
"text",
")",
")",
".",
"intersection",
"(",
"ALL_CHARS",
")",
"if",
"len",
"(",
"filtered_text",
")",
"is",
"0",
":",
"return",
"None",
"if",
"filtered_text",
".",
"issubset",
"(",
"SHARED_CHARS",
")",
":",
"return",
"EITHER",
"if",
"filtered_text",
".",
"issubset",
"(",
"TRAD_CHARS",
")",
":",
"return",
"TRAD",
"if",
"filtered_text",
".",
"issubset",
"(",
"SIMP_CHARS",
")",
":",
"return",
"SIMP",
"if",
"filtered_text",
".",
"difference",
"(",
"TRAD_CHARS",
")",
".",
"issubset",
"(",
"SIMP_CHARS",
")",
":",
"return",
"BOTH"
] |
Identify whether a string is simplified or traditional Chinese.
Returns:
None: if there are no recognizd Chinese characters.
EITHER: if the test is inconclusive.
TRAD: if the text is traditional.
SIMP: if the text is simplified.
BOTH: the text has characters recognized as being solely traditional
and other characters recognized as being solely simplified.
|
[
"Identify",
"whether",
"a",
"string",
"is",
"simplified",
"or",
"traditional",
"Chinese",
"."
] |
python
|
train
|
exa-analytics/exa
|
exa/core/container.py
|
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/container.py#L183-L213
|
def info(self):
"""
Display information about the container's data objects (note that info
on metadata and visualization objects is also provided).
Note:
Sizes are reported in bytes.
"""
names = []
types = []
sizes = []
names.append('WIDGET')
types.append('-')
s = 0
sizes.append(s)
names.append('METADATA')
types.append('-')
s = 0
for obj in self._rel().values():
s += getsizeof(obj)
sizes.append(s)
for name, obj in self._data().items():
names.append(name[1:] if name.startswith('_') else name)
types.append('.'.join((obj.__module__, obj.__class__.__name__)))
if isinstance(obj, pd.Series):
sizes.append(obj.memory_usage())
else:
sizes.append(obj.memory_usage().sum())
inf = pd.DataFrame.from_dict({'object': names, 'type': types, 'size': sizes})
inf.set_index('object', inplace=True)
return inf.sort_index()
|
[
"def",
"info",
"(",
"self",
")",
":",
"names",
"=",
"[",
"]",
"types",
"=",
"[",
"]",
"sizes",
"=",
"[",
"]",
"names",
".",
"append",
"(",
"'WIDGET'",
")",
"types",
".",
"append",
"(",
"'-'",
")",
"s",
"=",
"0",
"sizes",
".",
"append",
"(",
"s",
")",
"names",
".",
"append",
"(",
"'METADATA'",
")",
"types",
".",
"append",
"(",
"'-'",
")",
"s",
"=",
"0",
"for",
"obj",
"in",
"self",
".",
"_rel",
"(",
")",
".",
"values",
"(",
")",
":",
"s",
"+=",
"getsizeof",
"(",
"obj",
")",
"sizes",
".",
"append",
"(",
"s",
")",
"for",
"name",
",",
"obj",
"in",
"self",
".",
"_data",
"(",
")",
".",
"items",
"(",
")",
":",
"names",
".",
"append",
"(",
"name",
"[",
"1",
":",
"]",
"if",
"name",
".",
"startswith",
"(",
"'_'",
")",
"else",
"name",
")",
"types",
".",
"append",
"(",
"'.'",
".",
"join",
"(",
"(",
"obj",
".",
"__module__",
",",
"obj",
".",
"__class__",
".",
"__name__",
")",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"pd",
".",
"Series",
")",
":",
"sizes",
".",
"append",
"(",
"obj",
".",
"memory_usage",
"(",
")",
")",
"else",
":",
"sizes",
".",
"append",
"(",
"obj",
".",
"memory_usage",
"(",
")",
".",
"sum",
"(",
")",
")",
"inf",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"{",
"'object'",
":",
"names",
",",
"'type'",
":",
"types",
",",
"'size'",
":",
"sizes",
"}",
")",
"inf",
".",
"set_index",
"(",
"'object'",
",",
"inplace",
"=",
"True",
")",
"return",
"inf",
".",
"sort_index",
"(",
")"
] |
Display information about the container's data objects (note that info
on metadata and visualization objects is also provided).
Note:
Sizes are reported in bytes.
|
[
"Display",
"information",
"about",
"the",
"container",
"s",
"data",
"objects",
"(",
"note",
"that",
"info",
"on",
"metadata",
"and",
"visualization",
"objects",
"is",
"also",
"provided",
")",
"."
] |
python
|
train
|
Kozea/cairocffi
|
cairocffi/surfaces.py
|
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L525-L536
|
def supports_mime_type(self, mime_type):
""" Return whether surface supports :obj:`mime_type`.
:param mime_type: The MIME type of the image data.
:type mime_type: ASCII string
*New in cairo 1.12.*
"""
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
return bool(cairo.cairo_surface_supports_mime_type(
self._pointer, mime_type))
|
[
"def",
"supports_mime_type",
"(",
"self",
",",
"mime_type",
")",
":",
"mime_type",
"=",
"ffi",
".",
"new",
"(",
"'char[]'",
",",
"mime_type",
".",
"encode",
"(",
"'utf8'",
")",
")",
"return",
"bool",
"(",
"cairo",
".",
"cairo_surface_supports_mime_type",
"(",
"self",
".",
"_pointer",
",",
"mime_type",
")",
")"
] |
Return whether surface supports :obj:`mime_type`.
:param mime_type: The MIME type of the image data.
:type mime_type: ASCII string
*New in cairo 1.12.*
|
[
"Return",
"whether",
"surface",
"supports",
":",
"obj",
":",
"mime_type",
"."
] |
python
|
train
|
googleapis/google-cloud-python
|
api_core/google/api_core/operation.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/operation.py#L116-L146
|
def _set_result_from_operation(self):
"""Set the result or exception from the operation if it is complete."""
# This must be done in a lock to prevent the polling thread
# and main thread from both executing the completion logic
# at the same time.
with self._completion_lock:
# If the operation isn't complete or if the result has already been
# set, do not call set_result/set_exception again.
# Note: self._result_set is set to True in set_result and
# set_exception, in case those methods are invoked directly.
if not self._operation.done or self._result_set:
return
if self._operation.HasField("response"):
response = protobuf_helpers.from_any_pb(
self._result_type, self._operation.response
)
self.set_result(response)
elif self._operation.HasField("error"):
exception = exceptions.GoogleAPICallError(
self._operation.error.message,
errors=(self._operation.error,),
response=self._operation,
)
self.set_exception(exception)
else:
exception = exceptions.GoogleAPICallError(
"Unexpected state: Long-running operation had neither "
"response nor error set."
)
self.set_exception(exception)
|
[
"def",
"_set_result_from_operation",
"(",
"self",
")",
":",
"# This must be done in a lock to prevent the polling thread",
"# and main thread from both executing the completion logic",
"# at the same time.",
"with",
"self",
".",
"_completion_lock",
":",
"# If the operation isn't complete or if the result has already been",
"# set, do not call set_result/set_exception again.",
"# Note: self._result_set is set to True in set_result and",
"# set_exception, in case those methods are invoked directly.",
"if",
"not",
"self",
".",
"_operation",
".",
"done",
"or",
"self",
".",
"_result_set",
":",
"return",
"if",
"self",
".",
"_operation",
".",
"HasField",
"(",
"\"response\"",
")",
":",
"response",
"=",
"protobuf_helpers",
".",
"from_any_pb",
"(",
"self",
".",
"_result_type",
",",
"self",
".",
"_operation",
".",
"response",
")",
"self",
".",
"set_result",
"(",
"response",
")",
"elif",
"self",
".",
"_operation",
".",
"HasField",
"(",
"\"error\"",
")",
":",
"exception",
"=",
"exceptions",
".",
"GoogleAPICallError",
"(",
"self",
".",
"_operation",
".",
"error",
".",
"message",
",",
"errors",
"=",
"(",
"self",
".",
"_operation",
".",
"error",
",",
")",
",",
"response",
"=",
"self",
".",
"_operation",
",",
")",
"self",
".",
"set_exception",
"(",
"exception",
")",
"else",
":",
"exception",
"=",
"exceptions",
".",
"GoogleAPICallError",
"(",
"\"Unexpected state: Long-running operation had neither \"",
"\"response nor error set.\"",
")",
"self",
".",
"set_exception",
"(",
"exception",
")"
] |
Set the result or exception from the operation if it is complete.
|
[
"Set",
"the",
"result",
"or",
"exception",
"from",
"the",
"operation",
"if",
"it",
"is",
"complete",
"."
] |
python
|
train
|
mdgoldberg/sportsref
|
sportsref/nfl/teams.py
|
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L322-L334
|
def off_scheme(self, year):
"""Returns the name of the offensive scheme the team ran in the given
year.
:year: Int representing the season year.
:returns: A string representing the offensive scheme.
"""
scheme_text = self._year_info_pq(year, 'Offensive Scheme').text()
m = re.search(r'Offensive Scheme[:\s]*(.+)\s*', scheme_text, re.I)
if m:
return m.group(1)
else:
return None
|
[
"def",
"off_scheme",
"(",
"self",
",",
"year",
")",
":",
"scheme_text",
"=",
"self",
".",
"_year_info_pq",
"(",
"year",
",",
"'Offensive Scheme'",
")",
".",
"text",
"(",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r'Offensive Scheme[:\\s]*(.+)\\s*'",
",",
"scheme_text",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"return",
"m",
".",
"group",
"(",
"1",
")",
"else",
":",
"return",
"None"
] |
Returns the name of the offensive scheme the team ran in the given
year.
:year: Int representing the season year.
:returns: A string representing the offensive scheme.
|
[
"Returns",
"the",
"name",
"of",
"the",
"offensive",
"scheme",
"the",
"team",
"ran",
"in",
"the",
"given",
"year",
"."
] |
python
|
test
|
fracpete/python-weka-wrapper3
|
python/weka/datagenerators.py
|
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/datagenerators.py#L167-L177
|
def make_copy(cls, generator):
"""
Creates a copy of the generator.
:param generator: the generator to copy
:type generator: DataGenerator
:return: the copy of the generator
:rtype: DataGenerator
"""
return from_commandline(
to_commandline(generator), classname=classes.get_classname(DataGenerator()))
|
[
"def",
"make_copy",
"(",
"cls",
",",
"generator",
")",
":",
"return",
"from_commandline",
"(",
"to_commandline",
"(",
"generator",
")",
",",
"classname",
"=",
"classes",
".",
"get_classname",
"(",
"DataGenerator",
"(",
")",
")",
")"
] |
Creates a copy of the generator.
:param generator: the generator to copy
:type generator: DataGenerator
:return: the copy of the generator
:rtype: DataGenerator
|
[
"Creates",
"a",
"copy",
"of",
"the",
"generator",
"."
] |
python
|
train
|
sdispater/poetry
|
poetry/repositories/pypi_repository.py
|
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/repositories/pypi_repository.py#L230-L242
|
def get_package_info(self, name): # type: (str) -> dict
"""
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
)
|
[
"def",
"get_package_info",
"(",
"self",
",",
"name",
")",
":",
"# type: (str) -> dict",
"if",
"self",
".",
"_disable_cache",
":",
"return",
"self",
".",
"_get_package_info",
"(",
"name",
")",
"return",
"self",
".",
"_cache",
".",
"store",
"(",
"\"packages\"",
")",
".",
"remember_forever",
"(",
"name",
",",
"lambda",
":",
"self",
".",
"_get_package_info",
"(",
"name",
")",
")"
] |
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
|
[
"Return",
"the",
"package",
"information",
"given",
"its",
"name",
"."
] |
python
|
train
|
horazont/aioxmpp
|
aioxmpp/disco/service.py
|
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/disco/service.py#L644-L746
|
def query_info(self, jid, *,
node=None, require_fresh=False, timeout=None,
no_cache=False):
"""
Query the features and identities of the specified entity.
:param jid: The entity to query.
:type jid: :class:`aioxmpp.JID`
:param node: The node to query.
:type node: :class:`str` or :data:`None`
:param require_fresh: Boolean flag to discard previous caches.
:type require_fresh: :class:`bool`
:param timeout: Optional timeout for the response.
:type timeout: :class:`float`
:param no_cache: Boolean flag to forbid caching of the request.
:type no_cache: :class:`bool`
:rtype: :class:`.xso.InfoQuery`
:return: Service discovery information of the `node` at `jid`.
The requests are cached. This means that only one request is ever fired
for a given target (identified by the `jid` and the `node`). The
request is re-used for all subsequent requests to that identity.
If `require_fresh` is set to true, the above does not hold and a fresh
request is always created. The new request is the request which will be
used as alias for subsequent requests to the same identity.
The visible effects of this are twofold:
* Caching: Results of requests are implicitly cached
* Aliasing: Two concurrent requests will be aliased to one request to
save computing resources
Both can be turned off by using `require_fresh`. In general, you should
not need to use `require_fresh`, as all requests are implicitly
cancelled whenever the underlying session gets destroyed.
`no_cache` can be set to true to prevent future requests to be aliased
to this request, i.e. the request is not stored in the internal request
cache. This does not affect `require_fresh`, i.e. if a cached result is
available, it is used.
The `timeout` can be used to restrict the time to wait for a
response. If the timeout triggers, :class:`TimeoutError` is raised.
If :meth:`~.Client.send` raises an
exception, all queries which were running simultanously for the same
target re-raise that exception. The result is not cached though. If a
new query is sent at a later point for the same target, a new query is
actually sent, independent of the value chosen for `require_fresh`.
.. versionchanged:: 0.9
The `no_cache` argument was added.
"""
key = jid, node
if not require_fresh:
try:
request = self._info_pending[key]
except KeyError:
pass
else:
try:
return (yield from request)
except asyncio.CancelledError:
pass
request = asyncio.ensure_future(
self.send_and_decode_info_query(jid, node)
)
request.add_done_callback(
functools.partial(
self._handle_info_received,
jid,
node
)
)
if not no_cache:
self._info_pending[key] = request
try:
if timeout is not None:
try:
result = yield from asyncio.wait_for(
request,
timeout=timeout)
except asyncio.TimeoutError:
raise TimeoutError()
else:
result = yield from request
except: # NOQA
if request.done():
try:
pending = self._info_pending[key]
except KeyError:
pass
else:
if pending is request:
del self._info_pending[key]
raise
return result
|
[
"def",
"query_info",
"(",
"self",
",",
"jid",
",",
"*",
",",
"node",
"=",
"None",
",",
"require_fresh",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"no_cache",
"=",
"False",
")",
":",
"key",
"=",
"jid",
",",
"node",
"if",
"not",
"require_fresh",
":",
"try",
":",
"request",
"=",
"self",
".",
"_info_pending",
"[",
"key",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"try",
":",
"return",
"(",
"yield",
"from",
"request",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"pass",
"request",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"send_and_decode_info_query",
"(",
"jid",
",",
"node",
")",
")",
"request",
".",
"add_done_callback",
"(",
"functools",
".",
"partial",
"(",
"self",
".",
"_handle_info_received",
",",
"jid",
",",
"node",
")",
")",
"if",
"not",
"no_cache",
":",
"self",
".",
"_info_pending",
"[",
"key",
"]",
"=",
"request",
"try",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"try",
":",
"result",
"=",
"yield",
"from",
"asyncio",
".",
"wait_for",
"(",
"request",
",",
"timeout",
"=",
"timeout",
")",
"except",
"asyncio",
".",
"TimeoutError",
":",
"raise",
"TimeoutError",
"(",
")",
"else",
":",
"result",
"=",
"yield",
"from",
"request",
"except",
":",
"# NOQA",
"if",
"request",
".",
"done",
"(",
")",
":",
"try",
":",
"pending",
"=",
"self",
".",
"_info_pending",
"[",
"key",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"if",
"pending",
"is",
"request",
":",
"del",
"self",
".",
"_info_pending",
"[",
"key",
"]",
"raise",
"return",
"result"
] |
Query the features and identities of the specified entity.
:param jid: The entity to query.
:type jid: :class:`aioxmpp.JID`
:param node: The node to query.
:type node: :class:`str` or :data:`None`
:param require_fresh: Boolean flag to discard previous caches.
:type require_fresh: :class:`bool`
:param timeout: Optional timeout for the response.
:type timeout: :class:`float`
:param no_cache: Boolean flag to forbid caching of the request.
:type no_cache: :class:`bool`
:rtype: :class:`.xso.InfoQuery`
:return: Service discovery information of the `node` at `jid`.
The requests are cached. This means that only one request is ever fired
for a given target (identified by the `jid` and the `node`). The
request is re-used for all subsequent requests to that identity.
If `require_fresh` is set to true, the above does not hold and a fresh
request is always created. The new request is the request which will be
used as alias for subsequent requests to the same identity.
The visible effects of this are twofold:
* Caching: Results of requests are implicitly cached
* Aliasing: Two concurrent requests will be aliased to one request to
save computing resources
Both can be turned off by using `require_fresh`. In general, you should
not need to use `require_fresh`, as all requests are implicitly
cancelled whenever the underlying session gets destroyed.
`no_cache` can be set to true to prevent future requests to be aliased
to this request, i.e. the request is not stored in the internal request
cache. This does not affect `require_fresh`, i.e. if a cached result is
available, it is used.
The `timeout` can be used to restrict the time to wait for a
response. If the timeout triggers, :class:`TimeoutError` is raised.
If :meth:`~.Client.send` raises an
exception, all queries which were running simultanously for the same
target re-raise that exception. The result is not cached though. If a
new query is sent at a later point for the same target, a new query is
actually sent, independent of the value chosen for `require_fresh`.
.. versionchanged:: 0.9
The `no_cache` argument was added.
|
[
"Query",
"the",
"features",
"and",
"identities",
"of",
"the",
"specified",
"entity",
"."
] |
python
|
train
|
twisted/vertex
|
vertex/q2q.py
|
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/q2q.py#L754-L903
|
def verifyCertificateAllowed(self,
ourAddress,
theirAddress):
"""
Check that the cert currently in use by this transport is valid to
claim that the connection offers authorization for this host speaking
for C{ourAddress}, to a host speaking for C{theirAddress}. The remote
host (the one claiming to use theirAddress) may have a certificate
which is issued for the domain for theirAddress or the full address
given in theirAddress.
This method runs B{after} cryptographic verification of the validity of
certificates, although it does not perform any cryptographic checks
itself. It depends on SSL connection handshaking - *and* the
particular certificate lookup logic which prevents spoofed Issuer
fields, to work properly. However, all it checks is the X509 names
present in the certificates matching with the application-level
security claims being made by our peer.
An example of successful verification, because both parties have
properly signed certificates for their usage from the domain they
have been issued::
our current certficate:
issuer: divmod.com
subject: [email protected]
their current certificate:
issuer: twistedmatrix.com
subject: [email protected]
Arguments to verifyCertificateAllowed:
ourAddress: [email protected]
theirAddress: [email protected]
Result of verifyCertificateAllowed: None
An example of rejected verification, because domain certificates are
always B{self}-signed in Q2Q; verisign is not a trusted certificate
authority for the entire internet as with some other TLS
implementations::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: verisign.com
subject: twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: twistedmatrix.com
Result of verifyCertificateAllowed: exception VerifyError raised
Another example of successful verification, because we assume our
current certificate is under the control of this side of the
connection, so *any* claimed subject is considered acceptable::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: divmod.com
subject: [email protected]
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: [email protected]
Result of verifyCertificateAllowed: None
Another example of successful verification, because the user is
claiming to be anonymous; there is also a somewhat looser
cryptographic check applied to signatures for anonymous
connections::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: @
subject: @
arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: @
Result of verifyCertificateAllowed: None
Accept anonymous connections with caution.
@param ourAddress: a L{Q2QAddress} representing the address that we are
supposed to have authority for, requested by our peer.
@param theirAddress: a L{Q2QAddress} representing the address that our
network peer claims to be communicating on behalf of. For example, if
our peer is foobar.com they may claim to be operating on behalf of any
user @foobar.com.
@raise: L{VerifyError} if the certificates do not match the
claimed addresses.
"""
# XXX TODO: Somehow, it's got to be possible for a single cluster to
# internally claim to be agents of any other host when issuing a
# CONNECT; in other words, we always implicitly trust ourselves. Also,
# we might want to issue anonymous CONNECTs over unencrypted
# connections.
# IOW: *we* can sign a certificate to be whoever, but the *peer* can
# only sign the certificate to be the peer.
# The easiest way to make this work is to issue ourselves a wildcard
# certificate.
if not self.authorized:
if theirAddress.domain == '':
# XXX TODO: document this rule, anonymous connections are
# allowed to not be authorized because they are not making any
# claims about who they are
# XXX also TODO: make it so that anonymous connections are
# disabled by default for most protocols
return True
raise VerifyError("No official negotiation has taken place.")
peerCert = Certificate.peerFromTransport(self.transport)
ourCert = self.hostCertificate
ourClaimedDomain = ourAddress.domainAddress()
theirClaimedDomain = theirAddress.domainAddress()
# Sanity check #1: did we pick the right certificate on our end?
if not ourClaimedDomain.claimedAsIssuerOf(ourCert):
raise VerifyError(
"Something has gone horribly wrong: local domain mismatch "
"claim: %s actual: %s" % (ourClaimedDomain,
ourCert.getIssuer()))
if theirClaimedDomain.claimedAsIssuerOf(peerCert):
# Their domain issued their certificate.
if (theirAddress.claimedAsSubjectOf(peerCert) or
theirClaimedDomain.claimedAsSubjectOf(peerCert)):
return
elif ourClaimedDomain.claimedAsIssuerOf(peerCert):
# *our* domain can spoof *anything*
return
elif ourAddress.claimedAsIssuerOf(peerCert):
# Neither our domain nor their domain signed this. Did *we*?
# (Useful in peer-to-peer persistent transactions where we don't
# want the server involved: [email protected] can sign
# [email protected]'s certificate).
return
raise VerifyError(
"Us: %s Them: %s "
"TheyClaimWeAre: %s TheyClaimTheyAre: %s" %
(ourCert, peerCert,
ourAddress, theirAddress))
|
[
"def",
"verifyCertificateAllowed",
"(",
"self",
",",
"ourAddress",
",",
"theirAddress",
")",
":",
"# XXX TODO: Somehow, it's got to be possible for a single cluster to",
"# internally claim to be agents of any other host when issuing a",
"# CONNECT; in other words, we always implicitly trust ourselves. Also,",
"# we might want to issue anonymous CONNECTs over unencrypted",
"# connections.",
"# IOW: *we* can sign a certificate to be whoever, but the *peer* can",
"# only sign the certificate to be the peer.",
"# The easiest way to make this work is to issue ourselves a wildcard",
"# certificate.",
"if",
"not",
"self",
".",
"authorized",
":",
"if",
"theirAddress",
".",
"domain",
"==",
"''",
":",
"# XXX TODO: document this rule, anonymous connections are",
"# allowed to not be authorized because they are not making any",
"# claims about who they are",
"# XXX also TODO: make it so that anonymous connections are",
"# disabled by default for most protocols",
"return",
"True",
"raise",
"VerifyError",
"(",
"\"No official negotiation has taken place.\"",
")",
"peerCert",
"=",
"Certificate",
".",
"peerFromTransport",
"(",
"self",
".",
"transport",
")",
"ourCert",
"=",
"self",
".",
"hostCertificate",
"ourClaimedDomain",
"=",
"ourAddress",
".",
"domainAddress",
"(",
")",
"theirClaimedDomain",
"=",
"theirAddress",
".",
"domainAddress",
"(",
")",
"# Sanity check #1: did we pick the right certificate on our end?",
"if",
"not",
"ourClaimedDomain",
".",
"claimedAsIssuerOf",
"(",
"ourCert",
")",
":",
"raise",
"VerifyError",
"(",
"\"Something has gone horribly wrong: local domain mismatch \"",
"\"claim: %s actual: %s\"",
"%",
"(",
"ourClaimedDomain",
",",
"ourCert",
".",
"getIssuer",
"(",
")",
")",
")",
"if",
"theirClaimedDomain",
".",
"claimedAsIssuerOf",
"(",
"peerCert",
")",
":",
"# Their domain issued their certificate.",
"if",
"(",
"theirAddress",
".",
"claimedAsSubjectOf",
"(",
"peerCert",
")",
"or",
"theirClaimedDomain",
".",
"claimedAsSubjectOf",
"(",
"peerCert",
")",
")",
":",
"return",
"elif",
"ourClaimedDomain",
".",
"claimedAsIssuerOf",
"(",
"peerCert",
")",
":",
"# *our* domain can spoof *anything*",
"return",
"elif",
"ourAddress",
".",
"claimedAsIssuerOf",
"(",
"peerCert",
")",
":",
"# Neither our domain nor their domain signed this. Did *we*?",
"# (Useful in peer-to-peer persistent transactions where we don't",
"# want the server involved: [email protected] can sign",
"# [email protected]'s certificate).",
"return",
"raise",
"VerifyError",
"(",
"\"Us: %s Them: %s \"",
"\"TheyClaimWeAre: %s TheyClaimTheyAre: %s\"",
"%",
"(",
"ourCert",
",",
"peerCert",
",",
"ourAddress",
",",
"theirAddress",
")",
")"
] |
Check that the cert currently in use by this transport is valid to
claim that the connection offers authorization for this host speaking
for C{ourAddress}, to a host speaking for C{theirAddress}. The remote
host (the one claiming to use theirAddress) may have a certificate
which is issued for the domain for theirAddress or the full address
given in theirAddress.
This method runs B{after} cryptographic verification of the validity of
certificates, although it does not perform any cryptographic checks
itself. It depends on SSL connection handshaking - *and* the
particular certificate lookup logic which prevents spoofed Issuer
fields, to work properly. However, all it checks is the X509 names
present in the certificates matching with the application-level
security claims being made by our peer.
An example of successful verification, because both parties have
properly signed certificates for their usage from the domain they
have been issued::
our current certficate:
issuer: divmod.com
subject: [email protected]
their current certificate:
issuer: twistedmatrix.com
subject: [email protected]
Arguments to verifyCertificateAllowed:
ourAddress: [email protected]
theirAddress: [email protected]
Result of verifyCertificateAllowed: None
An example of rejected verification, because domain certificates are
always B{self}-signed in Q2Q; verisign is not a trusted certificate
authority for the entire internet as with some other TLS
implementations::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: verisign.com
subject: twistedmatrix.com
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: twistedmatrix.com
Result of verifyCertificateAllowed: exception VerifyError raised
Another example of successful verification, because we assume our
current certificate is under the control of this side of the
connection, so *any* claimed subject is considered acceptable::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: divmod.com
subject: [email protected]
Arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: [email protected]
Result of verifyCertificateAllowed: None
Another example of successful verification, because the user is
claiming to be anonymous; there is also a somewhat looser
cryptographic check applied to signatures for anonymous
connections::
our current certificate:
issuer: divmod.com
subject: divmod.com
their current certificate:
issuer: @
subject: @
arguments to verifyCertificateAllowed:
ourAddress: divmod.com
theirAddress: @
Result of verifyCertificateAllowed: None
Accept anonymous connections with caution.
@param ourAddress: a L{Q2QAddress} representing the address that we are
supposed to have authority for, requested by our peer.
@param theirAddress: a L{Q2QAddress} representing the address that our
network peer claims to be communicating on behalf of. For example, if
our peer is foobar.com they may claim to be operating on behalf of any
user @foobar.com.
@raise: L{VerifyError} if the certificates do not match the
claimed addresses.
|
[
"Check",
"that",
"the",
"cert",
"currently",
"in",
"use",
"by",
"this",
"transport",
"is",
"valid",
"to",
"claim",
"that",
"the",
"connection",
"offers",
"authorization",
"for",
"this",
"host",
"speaking",
"for",
"C",
"{",
"ourAddress",
"}",
"to",
"a",
"host",
"speaking",
"for",
"C",
"{",
"theirAddress",
"}",
".",
"The",
"remote",
"host",
"(",
"the",
"one",
"claiming",
"to",
"use",
"theirAddress",
")",
"may",
"have",
"a",
"certificate",
"which",
"is",
"issued",
"for",
"the",
"domain",
"for",
"theirAddress",
"or",
"the",
"full",
"address",
"given",
"in",
"theirAddress",
"."
] |
python
|
train
|
chaoss/grimoirelab-elk
|
grimoire_elk/enriched/remo.py
|
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/remo.py#L70-L82
|
def get_identities(self, item):
"""Return the identities from an item"""
item = item['data']
if 'owner' in item:
owner = self.get_sh_identity(item['owner'])
yield owner
if 'user' in item:
user = self.get_sh_identity(item['user'])
yield user
if 'mentor' in item:
mentor = self.get_sh_identity(item['mentor'])
yield mentor
|
[
"def",
"get_identities",
"(",
"self",
",",
"item",
")",
":",
"item",
"=",
"item",
"[",
"'data'",
"]",
"if",
"'owner'",
"in",
"item",
":",
"owner",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
"[",
"'owner'",
"]",
")",
"yield",
"owner",
"if",
"'user'",
"in",
"item",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
"[",
"'user'",
"]",
")",
"yield",
"user",
"if",
"'mentor'",
"in",
"item",
":",
"mentor",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
"[",
"'mentor'",
"]",
")",
"yield",
"mentor"
] |
Return the identities from an item
|
[
"Return",
"the",
"identities",
"from",
"an",
"item"
] |
python
|
train
|
coyo8/parinx
|
parinx/parser.py
|
https://github.com/coyo8/parinx/blob/6493798ceba8089345d970f71be4a896eb6b081d/parinx/parser.py#L48-L59
|
def parse_request_headers(headers):
"""
convert headers in human readable format
:param headers:
:return:
"""
request_header_keys = set(headers.keys(lower=True))
request_meta_keys = set(XHEADERS_TO_ARGS_DICT.keys())
data_header_keys = request_header_keys.intersection(request_meta_keys)
return dict(([XHEADERS_TO_ARGS_DICT[key],
headers.get(key, None)] for key in data_header_keys))
|
[
"def",
"parse_request_headers",
"(",
"headers",
")",
":",
"request_header_keys",
"=",
"set",
"(",
"headers",
".",
"keys",
"(",
"lower",
"=",
"True",
")",
")",
"request_meta_keys",
"=",
"set",
"(",
"XHEADERS_TO_ARGS_DICT",
".",
"keys",
"(",
")",
")",
"data_header_keys",
"=",
"request_header_keys",
".",
"intersection",
"(",
"request_meta_keys",
")",
"return",
"dict",
"(",
"(",
"[",
"XHEADERS_TO_ARGS_DICT",
"[",
"key",
"]",
",",
"headers",
".",
"get",
"(",
"key",
",",
"None",
")",
"]",
"for",
"key",
"in",
"data_header_keys",
")",
")"
] |
convert headers in human readable format
:param headers:
:return:
|
[
"convert",
"headers",
"in",
"human",
"readable",
"format"
] |
python
|
train
|
emory-libraries/eulxml
|
eulxml/forms/xmlobject.py
|
https://github.com/emory-libraries/eulxml/blob/17d71c7d98c0cebda9932b7f13e72093805e1fe2/eulxml/forms/xmlobject.py#L44-L57
|
def _parse_field_list(fieldnames, include_parents=False):
"""
Parse a list of field names, possibly including dot-separated subform
fields, into an internal ParsedFieldList object representing the base
fields and subform listed.
:param fieldnames: a list of field names as strings. dot-separated names
are interpreted as subform fields.
:param include_parents: optional boolean, defaults to False. if True,
subform fields implicitly include their parent fields in the parsed
list.
"""
field_parts = (name.split('.') for name in fieldnames)
return _collect_fields(field_parts, include_parents)
|
[
"def",
"_parse_field_list",
"(",
"fieldnames",
",",
"include_parents",
"=",
"False",
")",
":",
"field_parts",
"=",
"(",
"name",
".",
"split",
"(",
"'.'",
")",
"for",
"name",
"in",
"fieldnames",
")",
"return",
"_collect_fields",
"(",
"field_parts",
",",
"include_parents",
")"
] |
Parse a list of field names, possibly including dot-separated subform
fields, into an internal ParsedFieldList object representing the base
fields and subform listed.
:param fieldnames: a list of field names as strings. dot-separated names
are interpreted as subform fields.
:param include_parents: optional boolean, defaults to False. if True,
subform fields implicitly include their parent fields in the parsed
list.
|
[
"Parse",
"a",
"list",
"of",
"field",
"names",
"possibly",
"including",
"dot",
"-",
"separated",
"subform",
"fields",
"into",
"an",
"internal",
"ParsedFieldList",
"object",
"representing",
"the",
"base",
"fields",
"and",
"subform",
"listed",
"."
] |
python
|
train
|
coursera-dl/coursera-dl
|
coursera/credentials.py
|
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/credentials.py#L37-L110
|
def get_config_paths(config_name): # pragma: no test
"""
Return a list of config files paths to try in order, given config file
name and possibly a user-specified path.
For Windows platforms, there are several paths that can be tried to
retrieve the netrc file. There is, however, no "standard way" of doing
things.
A brief recap of the situation (all file paths are written in Unix
convention):
1. By default, Windows does not define a $HOME path. However, some
people might define one manually, and many command-line tools imported
from Unix will search the $HOME environment variable first. This
includes MSYSGit tools (bash, ssh, ...) and Emacs.
2. Windows defines two 'user paths': $USERPROFILE, and the
concatenation of the two variables $HOMEDRIVE and $HOMEPATH. Both of
these paths point by default to the same location, e.g.
C:\\Users\\Username
3. $USERPROFILE cannot be changed, however $HOMEDRIVE and $HOMEPATH
can be changed. They are originally intended to be the equivalent of
the $HOME path, but there are many known issues with them
4. As for the name of the file itself, most of the tools ported from
Unix will use the standard '.dotfile' scheme, but some of these will
instead use "_dotfile". Of the latter, the two notable exceptions are
vim, which will first try '_vimrc' before '.vimrc' (but it will try
both) and git, which will require the user to name its netrc file
'_netrc'.
Relevant links :
http://markmail.org/message/i33ldu4xl5aterrr
http://markmail.org/message/wbzs4gmtvkbewgxi
http://stackoverflow.com/questions/6031214/
Because the whole thing is a mess, I suggest we tried various sensible
defaults until we succeed or have depleted all possibilities.
"""
if platform.system() != 'Windows':
return [None]
# Now, we only treat the case of Windows
env_vars = [["HOME"],
["HOMEDRIVE", "HOMEPATH"],
["USERPROFILE"],
["SYSTEMDRIVE"]]
env_dirs = []
for var_list in env_vars:
var_values = [_getenv_or_empty(var) for var in var_list]
directory = ''.join(var_values)
if not directory:
logging.debug('Environment var(s) %s not defined, skipping',
var_list)
else:
env_dirs.append(directory)
additional_dirs = ["C:", ""]
all_dirs = env_dirs + additional_dirs
leading_chars = [".", "_"]
res = [''.join([directory, os.sep, lc, config_name])
for directory in all_dirs
for lc in leading_chars]
return res
|
[
"def",
"get_config_paths",
"(",
"config_name",
")",
":",
"# pragma: no test",
"if",
"platform",
".",
"system",
"(",
")",
"!=",
"'Windows'",
":",
"return",
"[",
"None",
"]",
"# Now, we only treat the case of Windows",
"env_vars",
"=",
"[",
"[",
"\"HOME\"",
"]",
",",
"[",
"\"HOMEDRIVE\"",
",",
"\"HOMEPATH\"",
"]",
",",
"[",
"\"USERPROFILE\"",
"]",
",",
"[",
"\"SYSTEMDRIVE\"",
"]",
"]",
"env_dirs",
"=",
"[",
"]",
"for",
"var_list",
"in",
"env_vars",
":",
"var_values",
"=",
"[",
"_getenv_or_empty",
"(",
"var",
")",
"for",
"var",
"in",
"var_list",
"]",
"directory",
"=",
"''",
".",
"join",
"(",
"var_values",
")",
"if",
"not",
"directory",
":",
"logging",
".",
"debug",
"(",
"'Environment var(s) %s not defined, skipping'",
",",
"var_list",
")",
"else",
":",
"env_dirs",
".",
"append",
"(",
"directory",
")",
"additional_dirs",
"=",
"[",
"\"C:\"",
",",
"\"\"",
"]",
"all_dirs",
"=",
"env_dirs",
"+",
"additional_dirs",
"leading_chars",
"=",
"[",
"\".\"",
",",
"\"_\"",
"]",
"res",
"=",
"[",
"''",
".",
"join",
"(",
"[",
"directory",
",",
"os",
".",
"sep",
",",
"lc",
",",
"config_name",
"]",
")",
"for",
"directory",
"in",
"all_dirs",
"for",
"lc",
"in",
"leading_chars",
"]",
"return",
"res"
] |
Return a list of config files paths to try in order, given config file
name and possibly a user-specified path.
For Windows platforms, there are several paths that can be tried to
retrieve the netrc file. There is, however, no "standard way" of doing
things.
A brief recap of the situation (all file paths are written in Unix
convention):
1. By default, Windows does not define a $HOME path. However, some
people might define one manually, and many command-line tools imported
from Unix will search the $HOME environment variable first. This
includes MSYSGit tools (bash, ssh, ...) and Emacs.
2. Windows defines two 'user paths': $USERPROFILE, and the
concatenation of the two variables $HOMEDRIVE and $HOMEPATH. Both of
these paths point by default to the same location, e.g.
C:\\Users\\Username
3. $USERPROFILE cannot be changed, however $HOMEDRIVE and $HOMEPATH
can be changed. They are originally intended to be the equivalent of
the $HOME path, but there are many known issues with them
4. As for the name of the file itself, most of the tools ported from
Unix will use the standard '.dotfile' scheme, but some of these will
instead use "_dotfile". Of the latter, the two notable exceptions are
vim, which will first try '_vimrc' before '.vimrc' (but it will try
both) and git, which will require the user to name its netrc file
'_netrc'.
Relevant links :
http://markmail.org/message/i33ldu4xl5aterrr
http://markmail.org/message/wbzs4gmtvkbewgxi
http://stackoverflow.com/questions/6031214/
Because the whole thing is a mess, I suggest we tried various sensible
defaults until we succeed or have depleted all possibilities.
|
[
"Return",
"a",
"list",
"of",
"config",
"files",
"paths",
"to",
"try",
"in",
"order",
"given",
"config",
"file",
"name",
"and",
"possibly",
"a",
"user",
"-",
"specified",
"path",
"."
] |
python
|
train
|
MacHu-GWU/single_file_module-project
|
sfm/fingerprint.py
|
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/fingerprint.py#L161-L171
|
def of_text(self, text, encoding="utf-8"):
"""
Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
:type text: text_type
:param text: a text object
"""
m = self.hash_algo()
m.update(text.encode(encoding))
return self.digest(m)
|
[
"def",
"of_text",
"(",
"self",
",",
"text",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"m",
"=",
"self",
".",
"hash_algo",
"(",
")",
"m",
".",
"update",
"(",
"text",
".",
"encode",
"(",
"encoding",
")",
")",
"return",
"self",
".",
"digest",
"(",
"m",
")"
] |
Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
:type text: text_type
:param text: a text object
|
[
"Use",
"default",
"hash",
"method",
"to",
"return",
"hash",
"value",
"of",
"a",
"piece",
"of",
"string",
"default",
"setting",
"use",
"utf",
"-",
"8",
"encoding",
"."
] |
python
|
train
|
ArtoLabs/SimpleSteem
|
simplesteem/simplesteem.py
|
https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/simplesteem.py#L625-L638
|
def unvote_witness(self, witness, account=None):
''' Uses the steem_instance method to
unvote a witness.
'''
if not account:
account = self.mainaccount
try:
self.steem_instance().disapprove_witness(witness, account=account)
except Exception as e:
self.msg.error_message("COULD NOT UNVOTE "
+ witness + " AS WITNESS: " + e)
return False
else:
return True
|
[
"def",
"unvote_witness",
"(",
"self",
",",
"witness",
",",
"account",
"=",
"None",
")",
":",
"if",
"not",
"account",
":",
"account",
"=",
"self",
".",
"mainaccount",
"try",
":",
"self",
".",
"steem_instance",
"(",
")",
".",
"disapprove_witness",
"(",
"witness",
",",
"account",
"=",
"account",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"msg",
".",
"error_message",
"(",
"\"COULD NOT UNVOTE \"",
"+",
"witness",
"+",
"\" AS WITNESS: \"",
"+",
"e",
")",
"return",
"False",
"else",
":",
"return",
"True"
] |
Uses the steem_instance method to
unvote a witness.
|
[
"Uses",
"the",
"steem_instance",
"method",
"to",
"unvote",
"a",
"witness",
"."
] |
python
|
train
|
inasafe/inasafe
|
safe/gui/tools/wizard/step_fc05_functions2.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc05_functions2.py#L100-L122
|
def on_tblFunctions2_itemSelectionChanged(self):
"""Choose selected hazard x exposure constraints combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
functions = self.selected_functions_2()
if not functions:
self.lblAvailableFunctions2.clear()
else:
text = self.tr('Available functions:') + ' ' + ', '.join(
[f['name'] for f in functions])
self.lblAvailableFunctions2.setText(text)
self.parent.pbnNext.setEnabled(True)
# Put a dot to the selected cell - note there is no way
# to center an icon without using a custom ItemDelegate
selection = self.tblFunctions2.selectedItems()
selItem = (len(selection) == 1) and selection[0] or None
for row in range(self.tblFunctions2.rowCount()):
for column in range(self.tblFunctions2.columnCount()):
item = self.tblFunctions2.item(row, column)
item.setText((item == selItem) and '•' or '')
|
[
"def",
"on_tblFunctions2_itemSelectionChanged",
"(",
"self",
")",
":",
"functions",
"=",
"self",
".",
"selected_functions_2",
"(",
")",
"if",
"not",
"functions",
":",
"self",
".",
"lblAvailableFunctions2",
".",
"clear",
"(",
")",
"else",
":",
"text",
"=",
"self",
".",
"tr",
"(",
"'Available functions:'",
")",
"+",
"' '",
"+",
"', '",
".",
"join",
"(",
"[",
"f",
"[",
"'name'",
"]",
"for",
"f",
"in",
"functions",
"]",
")",
"self",
".",
"lblAvailableFunctions2",
".",
"setText",
"(",
"text",
")",
"self",
".",
"parent",
".",
"pbnNext",
".",
"setEnabled",
"(",
"True",
")",
"# Put a dot to the selected cell - note there is no way",
"# to center an icon without using a custom ItemDelegate",
"selection",
"=",
"self",
".",
"tblFunctions2",
".",
"selectedItems",
"(",
")",
"selItem",
"=",
"(",
"len",
"(",
"selection",
")",
"==",
"1",
")",
"and",
"selection",
"[",
"0",
"]",
"or",
"None",
"for",
"row",
"in",
"range",
"(",
"self",
".",
"tblFunctions2",
".",
"rowCount",
"(",
")",
")",
":",
"for",
"column",
"in",
"range",
"(",
"self",
".",
"tblFunctions2",
".",
"columnCount",
"(",
")",
")",
":",
"item",
"=",
"self",
".",
"tblFunctions2",
".",
"item",
"(",
"row",
",",
"column",
")",
"item",
".",
"setText",
"(",
"(",
"item",
"==",
"selItem",
")",
"and",
"'•' o",
" '",
")",
""
] |
Choose selected hazard x exposure constraints combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
|
[
"Choose",
"selected",
"hazard",
"x",
"exposure",
"constraints",
"combination",
"."
] |
python
|
train
|
HPENetworking/PYHPEIMC
|
pyhpeimc/plat/vlanm.py
|
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/vlanm.py#L455-L502
|
def create_dev_vlan(vlanid, vlan_name, auth, url, devid=None, devip=None):
"""
function takes devid and vlanid vlan_name of specific device and 802.1q VLAN tag
and issues a RESTFUL call to add the specified VLAN from the target device. VLAN Name
MUST be valid on target device.
:param vlanid:int or str value of target 802.1q VLAN
:param vlan_name: str value of the target 802.1q VLAN name. MUST be valid name on target device.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param devid: str requires devid of the target device
:param devip: str of ipv4 address of the target device
:return: str HTTP Response code. Should be 201 if successfully created
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vlanm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> create_dev_vlan = create_dev_vlan('350', '200', 'test vlan', auth.creds, auth.url)
"""
if devip is not None:
devid = get_dev_details(devip, auth, url)['id']
create_dev_vlan_url = "/imcrs/vlan?devId=" + str(devid)
f_url = url + create_dev_vlan_url
payload = '''{"vlanId":"%s", "vlanName":"%s"}''' % (str(vlanid), vlan_name)
response = requests.post(f_url, data=payload, auth=auth, headers=HEADERS)
try:
if response.status_code == 201:
print('Vlan Created')
return 201
elif response.status_code == 409:
print('''Unable to create VLAN.\nVLAN Already Exists\nDevice does not support VLAN
function''')
return 409
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " create_dev_vlan: An Error has occured"
|
[
"def",
"create_dev_vlan",
"(",
"vlanid",
",",
"vlan_name",
",",
"auth",
",",
"url",
",",
"devid",
"=",
"None",
",",
"devip",
"=",
"None",
")",
":",
"if",
"devip",
"is",
"not",
"None",
":",
"devid",
"=",
"get_dev_details",
"(",
"devip",
",",
"auth",
",",
"url",
")",
"[",
"'id'",
"]",
"create_dev_vlan_url",
"=",
"\"/imcrs/vlan?devId=\"",
"+",
"str",
"(",
"devid",
")",
"f_url",
"=",
"url",
"+",
"create_dev_vlan_url",
"payload",
"=",
"'''{\"vlanId\":\"%s\", \"vlanName\":\"%s\"}'''",
"%",
"(",
"str",
"(",
"vlanid",
")",
",",
"vlan_name",
")",
"response",
"=",
"requests",
".",
"post",
"(",
"f_url",
",",
"data",
"=",
"payload",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"HEADERS",
")",
"try",
":",
"if",
"response",
".",
"status_code",
"==",
"201",
":",
"print",
"(",
"'Vlan Created'",
")",
"return",
"201",
"elif",
"response",
".",
"status_code",
"==",
"409",
":",
"print",
"(",
"'''Unable to create VLAN.\\nVLAN Already Exists\\nDevice does not support VLAN\n function'''",
")",
"return",
"409",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"error",
":",
"return",
"\"Error:\\n\"",
"+",
"str",
"(",
"error",
")",
"+",
"\" create_dev_vlan: An Error has occured\""
] |
function takes devid and vlanid vlan_name of specific device and 802.1q VLAN tag
and issues a RESTFUL call to add the specified VLAN from the target device. VLAN Name
MUST be valid on target device.
:param vlanid:int or str value of target 802.1q VLAN
:param vlan_name: str value of the target 802.1q VLAN name. MUST be valid name on target device.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param devid: str requires devid of the target device
:param devip: str of ipv4 address of the target device
:return: str HTTP Response code. Should be 201 if successfully created
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vlanm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> create_dev_vlan = create_dev_vlan('350', '200', 'test vlan', auth.creds, auth.url)
|
[
"function",
"takes",
"devid",
"and",
"vlanid",
"vlan_name",
"of",
"specific",
"device",
"and",
"802",
".",
"1q",
"VLAN",
"tag",
"and",
"issues",
"a",
"RESTFUL",
"call",
"to",
"add",
"the",
"specified",
"VLAN",
"from",
"the",
"target",
"device",
".",
"VLAN",
"Name",
"MUST",
"be",
"valid",
"on",
"target",
"device",
"."
] |
python
|
train
|
estnltk/estnltk
|
estnltk/mw_verbs/basic_verbchain_detection.py
|
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1134-L1282
|
def _expandVerbChainsBySubcat( clauseTokens, clauseID, foundChains, verbSubcat, \
skipQuestionable=False, \
breakOnPunctuation=True ):
'''
Meetod, mis proovib laiendada (mitte-'olema') verbidega l6ppevaid predikaadifraase,
lisades nende lõppu rektsiooniseoste järgi uusi infiniitverbe,
nt "kutsub" + "langetama"
"püütakse" + "keelustada" "või" "takistada"
"ei julgenud" + "arvata",
"ei hakka" + "tülitama";
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse); Sisend 'verbSubcat' sisaldab andmeid verb-infiniitverb
rektsiooniseoste kohta;
Tulemusena t2iendatakse olemasolevat verbijadade listi (foundChains), pikendades seal
olevaid verbiga lõppevaid fraase, millal võimalik;
'''
global _breakerJaNingEgaVoi, _breakerKomaLopus, _breakerPunktuats
verb = WordTemplate({POSTAG:'V'})
verbInf1 = WordTemplate({POSTAG:'V', FORM:'^(da|ma|maks|mas|mast|mata)$'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
sonaMitte = WordTemplate({ROOT:'^mitte$',POSTAG:'D'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
# Leiame, millised verbid on veel vabad (st v6ivad potentsiaalselt liituda)
freeVerbsWIDs = [t[WORD_ID] for t in clauseTokens if verbInf1.matches(t) and t[WORD_ID] not in annotatedWords]
for verbObj in foundChains:
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
# Leiame viimasele s6nale vastava token'i, selle lemma ja vormitunnuse
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
lastIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == verbObj[PHRASE][-1]]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [(analysis[ROOT], analysis[FORM]) for analysis in verb.matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0][0]
mainVerbForm = mainVerb[0][1]
positivePhrase = (verbObj[POLARITY] == 'POS')
egaPhrase = (verbObj[PATTERN][0] == 'ega')
# Teeme kindlaks, kas verbi lemma on ylesm2rgitud rektsiooniseoste leksikoni
if mainVerbLemma in verbSubcat:
subcatForms = verbSubcat[ mainVerbLemma ]
for subcatForm in subcatForms:
foundSubcatChain = []
addingCompleted = False
# Kui on tegu vaieldava rektsiooniseosega: j2tame vahele v6i, kui vaieldavad
# on lubatud, eemaldame vaieldavuse m2rgi
if re.match("^.+\?$", subcatForm):
if skipQuestionable:
continue
else:
subcatForm = subcatForm.replace('?', '')
#
# 1) Otsime sobivat verbi v6i verbifraasi s6na tagant, osalause teisest poolest
#
j = lastIndex + 1
while (j < len(clauseTokens)):
token = clauseTokens[j]
tokenWID = token[WORD_ID]
# Katkestame kui:
# *) satume juba m2rgendatud s6nale;
# *) satume punktuatsioonile;
if tokenWID in annotatedWords:
break
if breakOnPunctuation and _breakerPunktuats.matches(token):
break
# Lisame kui:
# *) satume konjunktsioonile;
# *) satume sobivas vormis verbile;
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append( (subcatForm, token) )
# Katkestame kui:
# *) satume komale (kuna koma v6ib kinnituda sobiva s6na l6ppu);
if _breakerKomaLopus.matches(token):
break
j += 1
#
# Kui osalause teisest poolest midagi ei leidnud, vaatame
# osalause esimest poolt:
#
# 2) Otsime sobivat verbi v6i verbifraasi vahetult s6na algusest
# (seda vaid siis, kui tegemist pole nö 'ega'-verbifraasiga -
# nondele midagi eelneda ei saagi);
# ( NB! 'ega' fraaside puhul tuleks tegelikult ka tagasi vaadata,
# aga ainult verbi ja 'ega' vahele, ja mitte mingil juhul
# 'ega'-st ettepoole );
#
if not _suitableVerbExpansion( foundSubcatChain ) and not egaPhrase:
minWid = min( verbObj[PHRASE] )
j = lastIndex - 1
while (j > -1):
token = clauseTokens[j]
tokenWID = token[WORD_ID]
# Katkestame kui:
# *) satume juba m2rgendatud s6nale (mis pole sellest fraasist);
# *) satume komale v6i muule punktuatsioonile;
# *) satume s6nale, mis on k6ige esimesest fraasiliikmest tagapool kui 2 s6na;
if tokenWID in annotatedWords and tokenWID not in verbObj[PHRASE]:
break
if _breakerKomaLopus.matches(token) or (breakOnPunctuation and _breakerPunktuats.matches(token)):
break
if token[WORD_ID]+1 < minWid:
break
# Lisame kui:
# *) satume konjunktsioonile;
# *) satume sobivas vormis verbile;
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append( (subcatForm, token) )
j -= 1
suitablePhrase = _suitableVerbExpansion( foundSubcatChain )
if suitablePhrase:
#
# Kui sobiv fraasikandidaat leidus, teostamine liitmise
#
for token in suitablePhrase:
tokenWID = token[WORD_ID]
verbObj[PHRASE].append( tokenWID )
annotatedWords.append( tokenWID )
if _breakerJaNingEgaVoi.matches(token):
verbObj[PATTERN].append('&')
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token, _breakerJaNingEgaVoi ) )
elif len(suitablePhrase) == 1 and verbOlema.matches(token):
verbObj[PATTERN].append('ole')
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token, verbOlema ) )
freeVerbsWIDs.remove( tokenWID )
else:
verbObj[PATTERN].append('verb')
analysisIDs = [i for i in range(len(token[ANALYSIS])) if subcatForm == token[ANALYSIS][i][FORM]]
assert len(analysisIDs) > 0
verbObj[ANALYSIS_IDS].append( analysisIDs )
freeVerbsWIDs.remove( tokenWID )
if not freeVerbsWIDs:
verbObj[OTHER_VERBS] = False
addingCompleted = True
if addingCompleted:
break
|
[
"def",
"_expandVerbChainsBySubcat",
"(",
"clauseTokens",
",",
"clauseID",
",",
"foundChains",
",",
"verbSubcat",
",",
"skipQuestionable",
"=",
"False",
",",
"breakOnPunctuation",
"=",
"True",
")",
":",
"global",
"_breakerJaNingEgaVoi",
",",
"_breakerKomaLopus",
",",
"_breakerPunktuats",
"verb",
"=",
"WordTemplate",
"(",
"{",
"POSTAG",
":",
"'V'",
"}",
")",
"verbInf1",
"=",
"WordTemplate",
"(",
"{",
"POSTAG",
":",
"'V'",
",",
"FORM",
":",
"'^(da|ma|maks|mas|mast|mata)$'",
"}",
")",
"verbOlema",
"=",
"WordTemplate",
"(",
"{",
"POSTAG",
":",
"'V'",
",",
"ROOT",
":",
"'^(ole)$'",
"}",
")",
"sonaMitte",
"=",
"WordTemplate",
"(",
"{",
"ROOT",
":",
"'^mitte$'",
",",
"POSTAG",
":",
"'D'",
"}",
")",
"# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu\r",
"annotatedWords",
"=",
"[",
"]",
"for",
"verbObj",
"in",
"foundChains",
":",
"if",
"(",
"len",
"(",
"verbObj",
"[",
"PATTERN",
"]",
")",
"==",
"1",
"and",
"re",
".",
"match",
"(",
"'^(ei|ära|ega)$',",
" ",
"erbObj[",
"P",
"ATTERN]",
"[",
"0",
"]",
")",
")",
":",
"\r",
"# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega\r",
"continue",
"annotatedWords",
".",
"extend",
"(",
"verbObj",
"[",
"PHRASE",
"]",
")",
"# Leiame, millised verbid on veel vabad (st v6ivad potentsiaalselt liituda)\r",
"freeVerbsWIDs",
"=",
"[",
"t",
"[",
"WORD_ID",
"]",
"for",
"t",
"in",
"clauseTokens",
"if",
"verbInf1",
".",
"matches",
"(",
"t",
")",
"and",
"t",
"[",
"WORD_ID",
"]",
"not",
"in",
"annotatedWords",
"]",
"for",
"verbObj",
"in",
"foundChains",
":",
"# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav\r",
"if",
"_isVerbExpansible",
"(",
"verbObj",
",",
"clauseTokens",
",",
"clauseID",
")",
":",
"# Leiame viimasele s6nale vastava token'i, selle lemma ja vormitunnuse\r",
"lastToken",
"=",
"[",
"token",
"for",
"token",
"in",
"clauseTokens",
"if",
"token",
"[",
"WORD_ID",
"]",
"==",
"verbObj",
"[",
"PHRASE",
"]",
"[",
"-",
"1",
"]",
"]",
"lastIndex",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"clauseTokens",
")",
")",
"if",
"clauseTokens",
"[",
"i",
"]",
"[",
"WORD_ID",
"]",
"==",
"verbObj",
"[",
"PHRASE",
"]",
"[",
"-",
"1",
"]",
"]",
"lastToken",
"=",
"lastToken",
"[",
"0",
"]",
"lastIndex",
"=",
"lastIndex",
"[",
"0",
"]",
"mainVerb",
"=",
"[",
"(",
"analysis",
"[",
"ROOT",
"]",
",",
"analysis",
"[",
"FORM",
"]",
")",
"for",
"analysis",
"in",
"verb",
".",
"matchingAnalyses",
"(",
"lastToken",
")",
"]",
"mainVerbLemma",
"=",
"mainVerb",
"[",
"0",
"]",
"[",
"0",
"]",
"mainVerbForm",
"=",
"mainVerb",
"[",
"0",
"]",
"[",
"1",
"]",
"positivePhrase",
"=",
"(",
"verbObj",
"[",
"POLARITY",
"]",
"==",
"'POS'",
")",
"egaPhrase",
"=",
"(",
"verbObj",
"[",
"PATTERN",
"]",
"[",
"0",
"]",
"==",
"'ega'",
")",
"# Teeme kindlaks, kas verbi lemma on ylesm2rgitud rektsiooniseoste leksikoni\r",
"if",
"mainVerbLemma",
"in",
"verbSubcat",
":",
"subcatForms",
"=",
"verbSubcat",
"[",
"mainVerbLemma",
"]",
"for",
"subcatForm",
"in",
"subcatForms",
":",
"foundSubcatChain",
"=",
"[",
"]",
"addingCompleted",
"=",
"False",
"# Kui on tegu vaieldava rektsiooniseosega: j2tame vahele v6i, kui vaieldavad\r",
"# on lubatud, eemaldame vaieldavuse m2rgi \r",
"if",
"re",
".",
"match",
"(",
"\"^.+\\?$\"",
",",
"subcatForm",
")",
":",
"if",
"skipQuestionable",
":",
"continue",
"else",
":",
"subcatForm",
"=",
"subcatForm",
".",
"replace",
"(",
"'?'",
",",
"''",
")",
"#\r",
"# 1) Otsime sobivat verbi v6i verbifraasi s6na tagant, osalause teisest poolest\r",
"#\r",
"j",
"=",
"lastIndex",
"+",
"1",
"while",
"(",
"j",
"<",
"len",
"(",
"clauseTokens",
")",
")",
":",
"token",
"=",
"clauseTokens",
"[",
"j",
"]",
"tokenWID",
"=",
"token",
"[",
"WORD_ID",
"]",
"# Katkestame kui:\r",
"# *) satume juba m2rgendatud s6nale;\r",
"# *) satume punktuatsioonile;\r",
"if",
"tokenWID",
"in",
"annotatedWords",
":",
"break",
"if",
"breakOnPunctuation",
"and",
"_breakerPunktuats",
".",
"matches",
"(",
"token",
")",
":",
"break",
"# Lisame kui:\r",
"# *) satume konjunktsioonile;\r",
"# *) satume sobivas vormis verbile;\r",
"if",
"_breakerJaNingEgaVoi",
".",
"matches",
"(",
"token",
")",
":",
"foundSubcatChain",
".",
"append",
"(",
"(",
"'&'",
",",
"token",
")",
")",
"if",
"verb",
".",
"matches",
"(",
"token",
")",
":",
"tokenForms",
"=",
"[",
"analysis",
"[",
"FORM",
"]",
"for",
"analysis",
"in",
"verb",
".",
"matchingAnalyses",
"(",
"token",
")",
"]",
"if",
"subcatForm",
"in",
"tokenForms",
":",
"foundSubcatChain",
".",
"append",
"(",
"(",
"subcatForm",
",",
"token",
")",
")",
"# Katkestame kui:\r",
"# *) satume komale (kuna koma v6ib kinnituda sobiva s6na l6ppu);\r",
"if",
"_breakerKomaLopus",
".",
"matches",
"(",
"token",
")",
":",
"break",
"j",
"+=",
"1",
"#\r",
"# Kui osalause teisest poolest midagi ei leidnud, vaatame \r",
"# osalause esimest poolt:\r",
"#\r",
"# 2) Otsime sobivat verbi v6i verbifraasi vahetult s6na algusest\r",
"# (seda vaid siis, kui tegemist pole nö 'ega'-verbifraasiga - \r",
"# nondele midagi eelneda ei saagi);\r",
"# ( NB! 'ega' fraaside puhul tuleks tegelikult ka tagasi vaadata,\r",
"# aga ainult verbi ja 'ega' vahele, ja mitte mingil juhul \r",
"# 'ega'-st ettepoole );\r",
"#\r",
"if",
"not",
"_suitableVerbExpansion",
"(",
"foundSubcatChain",
")",
"and",
"not",
"egaPhrase",
":",
"minWid",
"=",
"min",
"(",
"verbObj",
"[",
"PHRASE",
"]",
")",
"j",
"=",
"lastIndex",
"-",
"1",
"while",
"(",
"j",
">",
"-",
"1",
")",
":",
"token",
"=",
"clauseTokens",
"[",
"j",
"]",
"tokenWID",
"=",
"token",
"[",
"WORD_ID",
"]",
"# Katkestame kui:\r",
"# *) satume juba m2rgendatud s6nale (mis pole sellest fraasist);\r",
"# *) satume komale v6i muule punktuatsioonile;\r",
"# *) satume s6nale, mis on k6ige esimesest fraasiliikmest tagapool kui 2 s6na;\r",
"if",
"tokenWID",
"in",
"annotatedWords",
"and",
"tokenWID",
"not",
"in",
"verbObj",
"[",
"PHRASE",
"]",
":",
"break",
"if",
"_breakerKomaLopus",
".",
"matches",
"(",
"token",
")",
"or",
"(",
"breakOnPunctuation",
"and",
"_breakerPunktuats",
".",
"matches",
"(",
"token",
")",
")",
":",
"break",
"if",
"token",
"[",
"WORD_ID",
"]",
"+",
"1",
"<",
"minWid",
":",
"break",
"# Lisame kui:\r",
"# *) satume konjunktsioonile;\r",
"# *) satume sobivas vormis verbile;\r",
"if",
"_breakerJaNingEgaVoi",
".",
"matches",
"(",
"token",
")",
":",
"foundSubcatChain",
".",
"append",
"(",
"(",
"'&'",
",",
"token",
")",
")",
"if",
"verb",
".",
"matches",
"(",
"token",
")",
":",
"tokenForms",
"=",
"[",
"analysis",
"[",
"FORM",
"]",
"for",
"analysis",
"in",
"verb",
".",
"matchingAnalyses",
"(",
"token",
")",
"]",
"if",
"subcatForm",
"in",
"tokenForms",
":",
"foundSubcatChain",
".",
"append",
"(",
"(",
"subcatForm",
",",
"token",
")",
")",
"j",
"-=",
"1",
"suitablePhrase",
"=",
"_suitableVerbExpansion",
"(",
"foundSubcatChain",
")",
"if",
"suitablePhrase",
":",
"#\r",
"# Kui sobiv fraasikandidaat leidus, teostamine liitmise\r",
"#\r",
"for",
"token",
"in",
"suitablePhrase",
":",
"tokenWID",
"=",
"token",
"[",
"WORD_ID",
"]",
"verbObj",
"[",
"PHRASE",
"]",
".",
"append",
"(",
"tokenWID",
")",
"annotatedWords",
".",
"append",
"(",
"tokenWID",
")",
"if",
"_breakerJaNingEgaVoi",
".",
"matches",
"(",
"token",
")",
":",
"verbObj",
"[",
"PATTERN",
"]",
".",
"append",
"(",
"'&'",
")",
"verbObj",
"[",
"ANALYSIS_IDS",
"]",
".",
"append",
"(",
"_getMatchingAnalysisIDs",
"(",
"token",
",",
"_breakerJaNingEgaVoi",
")",
")",
"elif",
"len",
"(",
"suitablePhrase",
")",
"==",
"1",
"and",
"verbOlema",
".",
"matches",
"(",
"token",
")",
":",
"verbObj",
"[",
"PATTERN",
"]",
".",
"append",
"(",
"'ole'",
")",
"verbObj",
"[",
"ANALYSIS_IDS",
"]",
".",
"append",
"(",
"_getMatchingAnalysisIDs",
"(",
"token",
",",
"verbOlema",
")",
")",
"freeVerbsWIDs",
".",
"remove",
"(",
"tokenWID",
")",
"else",
":",
"verbObj",
"[",
"PATTERN",
"]",
".",
"append",
"(",
"'verb'",
")",
"analysisIDs",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"token",
"[",
"ANALYSIS",
"]",
")",
")",
"if",
"subcatForm",
"==",
"token",
"[",
"ANALYSIS",
"]",
"[",
"i",
"]",
"[",
"FORM",
"]",
"]",
"assert",
"len",
"(",
"analysisIDs",
")",
">",
"0",
"verbObj",
"[",
"ANALYSIS_IDS",
"]",
".",
"append",
"(",
"analysisIDs",
")",
"freeVerbsWIDs",
".",
"remove",
"(",
"tokenWID",
")",
"if",
"not",
"freeVerbsWIDs",
":",
"verbObj",
"[",
"OTHER_VERBS",
"]",
"=",
"False",
"addingCompleted",
"=",
"True",
"if",
"addingCompleted",
":",
"break"
] |
Meetod, mis proovib laiendada (mitte-'olema') verbidega l6ppevaid predikaadifraase,
lisades nende lõppu rektsiooniseoste järgi uusi infiniitverbe,
nt "kutsub" + "langetama"
"püütakse" + "keelustada" "või" "takistada"
"ei julgenud" + "arvata",
"ei hakka" + "tülitama";
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse); Sisend 'verbSubcat' sisaldab andmeid verb-infiniitverb
rektsiooniseoste kohta;
Tulemusena t2iendatakse olemasolevat verbijadade listi (foundChains), pikendades seal
olevaid verbiga lõppevaid fraase, millal võimalik;
|
[
"Meetod",
"mis",
"proovib",
"laiendada",
"(",
"mitte",
"-",
"olema",
")",
"verbidega",
"l6ppevaid",
"predikaadifraase",
"lisades",
"nende",
"lõppu",
"rektsiooniseoste",
"järgi",
"uusi",
"infiniitverbe",
"nt",
"kutsub",
"+",
"langetama",
"püütakse",
"+",
"keelustada",
"või",
"takistada",
"ei",
"julgenud",
"+",
"arvata",
"ei",
"hakka",
"+",
"tülitama",
";",
"Sisend",
"clauseTokens",
"on",
"list",
"mis",
"sisaldab",
"yhe",
"osalause",
"k6iki",
"s6nu",
"(",
"pyvabamorfi",
"poolt",
"tehtud",
"s6na",
"-",
"analyyse",
")",
";",
"Sisend",
"verbSubcat",
"sisaldab",
"andmeid",
"verb",
"-",
"infiniitverb",
"rektsiooniseoste",
"kohta",
";",
"Tulemusena",
"t2iendatakse",
"olemasolevat",
"verbijadade",
"listi",
"(",
"foundChains",
")",
"pikendades",
"seal",
"olevaid",
"verbiga",
"lõppevaid",
"fraase",
"millal",
"võimalik",
";"
] |
python
|
train
|
ray-project/ray
|
python/ray/tune/ray_trial_executor.py
|
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L499-L514
|
def _checkpoint_and_erase(self, trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
"""
with warn_if_slow("save_to_disk"):
trial._checkpoint.value = ray.get(trial.runner.save.remote())
if len(trial.history) >= trial.keep_checkpoints_num:
ray.get(trial.runner.delete_checkpoint.remote(trial.history[-1]))
trial.history.pop()
trial.history.insert(0, trial._checkpoint.value)
|
[
"def",
"_checkpoint_and_erase",
"(",
"self",
",",
"trial",
")",
":",
"with",
"warn_if_slow",
"(",
"\"save_to_disk\"",
")",
":",
"trial",
".",
"_checkpoint",
".",
"value",
"=",
"ray",
".",
"get",
"(",
"trial",
".",
"runner",
".",
"save",
".",
"remote",
"(",
")",
")",
"if",
"len",
"(",
"trial",
".",
"history",
")",
">=",
"trial",
".",
"keep_checkpoints_num",
":",
"ray",
".",
"get",
"(",
"trial",
".",
"runner",
".",
"delete_checkpoint",
".",
"remote",
"(",
"trial",
".",
"history",
"[",
"-",
"1",
"]",
")",
")",
"trial",
".",
"history",
".",
"pop",
"(",
")",
"trial",
".",
"history",
".",
"insert",
"(",
"0",
",",
"trial",
".",
"_checkpoint",
".",
"value",
")"
] |
Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
|
[
"Checkpoints",
"the",
"model",
"and",
"erases",
"old",
"checkpoints",
"if",
"needed",
".",
"Parameters",
"----------",
"trial",
":",
"trial",
"to",
"save"
] |
python
|
train
|
alephdata/memorious
|
memorious/operations/extract.py
|
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/extract.py#L78-L102
|
def extract(context, data):
"""Extract a compressed file"""
with context.http.rehash(data) as result:
file_path = result.file_path
content_type = result.content_type
extract_dir = random_filename(context.work_path)
if content_type in ZIP_MIME_TYPES:
extracted_files = extract_zip(file_path, extract_dir)
elif content_type in TAR_MIME_TYPES:
extracted_files = extract_tar(file_path, extract_dir, context)
elif content_type in SEVENZIP_MIME_TYPES:
extracted_files = extract_7zip(file_path, extract_dir, context)
else:
context.log.warning(
"Unsupported archive content type: %s", content_type
)
return
extracted_content_hashes = {}
for path in extracted_files:
relative_path = os.path.relpath(path, extract_dir)
content_hash = context.store_file(path)
extracted_content_hashes[relative_path] = content_hash
data['content_hash'] = content_hash
data['file_name'] = relative_path
context.emit(data=data.copy())
|
[
"def",
"extract",
"(",
"context",
",",
"data",
")",
":",
"with",
"context",
".",
"http",
".",
"rehash",
"(",
"data",
")",
"as",
"result",
":",
"file_path",
"=",
"result",
".",
"file_path",
"content_type",
"=",
"result",
".",
"content_type",
"extract_dir",
"=",
"random_filename",
"(",
"context",
".",
"work_path",
")",
"if",
"content_type",
"in",
"ZIP_MIME_TYPES",
":",
"extracted_files",
"=",
"extract_zip",
"(",
"file_path",
",",
"extract_dir",
")",
"elif",
"content_type",
"in",
"TAR_MIME_TYPES",
":",
"extracted_files",
"=",
"extract_tar",
"(",
"file_path",
",",
"extract_dir",
",",
"context",
")",
"elif",
"content_type",
"in",
"SEVENZIP_MIME_TYPES",
":",
"extracted_files",
"=",
"extract_7zip",
"(",
"file_path",
",",
"extract_dir",
",",
"context",
")",
"else",
":",
"context",
".",
"log",
".",
"warning",
"(",
"\"Unsupported archive content type: %s\"",
",",
"content_type",
")",
"return",
"extracted_content_hashes",
"=",
"{",
"}",
"for",
"path",
"in",
"extracted_files",
":",
"relative_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
",",
"extract_dir",
")",
"content_hash",
"=",
"context",
".",
"store_file",
"(",
"path",
")",
"extracted_content_hashes",
"[",
"relative_path",
"]",
"=",
"content_hash",
"data",
"[",
"'content_hash'",
"]",
"=",
"content_hash",
"data",
"[",
"'file_name'",
"]",
"=",
"relative_path",
"context",
".",
"emit",
"(",
"data",
"=",
"data",
".",
"copy",
"(",
")",
")"
] |
Extract a compressed file
|
[
"Extract",
"a",
"compressed",
"file"
] |
python
|
train
|
mongodb/mongo-python-driver
|
pymongo/message.py
|
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L311-L349
|
def get_message(self, set_slave_ok, sock_info, use_cmd=False):
"""Get a query message, possibly setting the slaveOk bit."""
if set_slave_ok:
# Set the slaveOk bit.
flags = self.flags | 4
else:
flags = self.flags
ns = self.namespace()
spec = self.spec
if use_cmd:
spec = self.as_command(sock_info)[0]
if sock_info.op_msg_enabled:
request_id, msg, size, _ = _op_msg(
0, spec, self.db, self.read_preference,
set_slave_ok, False, self.codec_options,
ctx=sock_info.compression_context)
return request_id, msg, size
ns = _UJOIN % (self.db, "$cmd")
ntoreturn = -1 # All DB commands return 1 document
else:
# OP_QUERY treats ntoreturn of -1 and 1 the same, return
# one document and close the cursor. We have to use 2 for
# batch size if 1 is specified.
ntoreturn = self.batch_size == 1 and 2 or self.batch_size
if self.limit:
if ntoreturn:
ntoreturn = min(self.limit, ntoreturn)
else:
ntoreturn = self.limit
if sock_info.is_mongos:
spec = _maybe_add_read_preference(spec,
self.read_preference)
return query(flags, ns, self.ntoskip, ntoreturn,
spec, None if use_cmd else self.fields,
self.codec_options, ctx=sock_info.compression_context)
|
[
"def",
"get_message",
"(",
"self",
",",
"set_slave_ok",
",",
"sock_info",
",",
"use_cmd",
"=",
"False",
")",
":",
"if",
"set_slave_ok",
":",
"# Set the slaveOk bit.",
"flags",
"=",
"self",
".",
"flags",
"|",
"4",
"else",
":",
"flags",
"=",
"self",
".",
"flags",
"ns",
"=",
"self",
".",
"namespace",
"(",
")",
"spec",
"=",
"self",
".",
"spec",
"if",
"use_cmd",
":",
"spec",
"=",
"self",
".",
"as_command",
"(",
"sock_info",
")",
"[",
"0",
"]",
"if",
"sock_info",
".",
"op_msg_enabled",
":",
"request_id",
",",
"msg",
",",
"size",
",",
"_",
"=",
"_op_msg",
"(",
"0",
",",
"spec",
",",
"self",
".",
"db",
",",
"self",
".",
"read_preference",
",",
"set_slave_ok",
",",
"False",
",",
"self",
".",
"codec_options",
",",
"ctx",
"=",
"sock_info",
".",
"compression_context",
")",
"return",
"request_id",
",",
"msg",
",",
"size",
"ns",
"=",
"_UJOIN",
"%",
"(",
"self",
".",
"db",
",",
"\"$cmd\"",
")",
"ntoreturn",
"=",
"-",
"1",
"# All DB commands return 1 document",
"else",
":",
"# OP_QUERY treats ntoreturn of -1 and 1 the same, return",
"# one document and close the cursor. We have to use 2 for",
"# batch size if 1 is specified.",
"ntoreturn",
"=",
"self",
".",
"batch_size",
"==",
"1",
"and",
"2",
"or",
"self",
".",
"batch_size",
"if",
"self",
".",
"limit",
":",
"if",
"ntoreturn",
":",
"ntoreturn",
"=",
"min",
"(",
"self",
".",
"limit",
",",
"ntoreturn",
")",
"else",
":",
"ntoreturn",
"=",
"self",
".",
"limit",
"if",
"sock_info",
".",
"is_mongos",
":",
"spec",
"=",
"_maybe_add_read_preference",
"(",
"spec",
",",
"self",
".",
"read_preference",
")",
"return",
"query",
"(",
"flags",
",",
"ns",
",",
"self",
".",
"ntoskip",
",",
"ntoreturn",
",",
"spec",
",",
"None",
"if",
"use_cmd",
"else",
"self",
".",
"fields",
",",
"self",
".",
"codec_options",
",",
"ctx",
"=",
"sock_info",
".",
"compression_context",
")"
] |
Get a query message, possibly setting the slaveOk bit.
|
[
"Get",
"a",
"query",
"message",
"possibly",
"setting",
"the",
"slaveOk",
"bit",
"."
] |
python
|
train
|
chrislit/abydos
|
abydos/phonetic/_soundex.py
|
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/phonetic/_soundex.py#L190-L255
|
def soundex(word, max_length=4, var='American', reverse=False, zero_pad=True):
"""Return the Soundex code for a word.
This is a wrapper for :py:meth:`Soundex.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as described
at :cite:`US:2007` and in :cite:`Knuth:1998`; this is also called
Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55 :cite:`US:1997`
by the US Census, including coding prefixed and unprefixed
versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for blocking
in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The Soundex value
Examples
--------
>>> soundex("Christopher")
'C623'
>>> soundex("Niall")
'N400'
>>> soundex('Smith')
'S530'
>>> soundex('Schmidt')
'S530'
>>> soundex('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> soundex('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> soundex('Christopher', reverse=True)
'R132'
>>> soundex('Ashcroft')
'A261'
>>> soundex('Asicroft')
'A226'
>>> soundex('Ashcroft', var='special')
'A226'
>>> soundex('Asicroft', var='special')
'A226'
"""
return Soundex().encode(word, max_length, var, reverse, zero_pad)
|
[
"def",
"soundex",
"(",
"word",
",",
"max_length",
"=",
"4",
",",
"var",
"=",
"'American'",
",",
"reverse",
"=",
"False",
",",
"zero_pad",
"=",
"True",
")",
":",
"return",
"Soundex",
"(",
")",
".",
"encode",
"(",
"word",
",",
"max_length",
",",
"var",
",",
"reverse",
",",
"zero_pad",
")"
] |
Return the Soundex code for a word.
This is a wrapper for :py:meth:`Soundex.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as described
at :cite:`US:2007` and in :cite:`Knuth:1998`; this is also called
Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55 :cite:`US:1997`
by the US Census, including coding prefixed and unprefixed
versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for blocking
in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The Soundex value
Examples
--------
>>> soundex("Christopher")
'C623'
>>> soundex("Niall")
'N400'
>>> soundex('Smith')
'S530'
>>> soundex('Schmidt')
'S530'
>>> soundex('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> soundex('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> soundex('Christopher', reverse=True)
'R132'
>>> soundex('Ashcroft')
'A261'
>>> soundex('Asicroft')
'A226'
>>> soundex('Ashcroft', var='special')
'A226'
>>> soundex('Asicroft', var='special')
'A226'
|
[
"Return",
"the",
"Soundex",
"code",
"for",
"a",
"word",
"."
] |
python
|
valid
|
zhmcclient/python-zhmcclient
|
zhmcclient/_metrics.py
|
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_metrics.py#L307-L333
|
def get_metrics(self):
"""
Retrieve the current metric values for this :term:`Metrics Context`
resource from the HMC.
The metric values are returned by this method as a string in the
`MetricsResponse` format described with the 'Get Metrics' operation in
the :term:`HMC API` book.
The :class:`~zhmcclient.MetricsResponse` class can be used to process
the `MetricsResponse` string returned by this method, and provides
structured access to the metrics values.
Returns:
:term:`string`:
The current metric values, in the `MetricsResponse` string format.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
metrics_response = self.manager.session.get(self.uri)
return metrics_response
|
[
"def",
"get_metrics",
"(",
"self",
")",
":",
"metrics_response",
"=",
"self",
".",
"manager",
".",
"session",
".",
"get",
"(",
"self",
".",
"uri",
")",
"return",
"metrics_response"
] |
Retrieve the current metric values for this :term:`Metrics Context`
resource from the HMC.
The metric values are returned by this method as a string in the
`MetricsResponse` format described with the 'Get Metrics' operation in
the :term:`HMC API` book.
The :class:`~zhmcclient.MetricsResponse` class can be used to process
the `MetricsResponse` string returned by this method, and provides
structured access to the metrics values.
Returns:
:term:`string`:
The current metric values, in the `MetricsResponse` string format.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
|
[
"Retrieve",
"the",
"current",
"metric",
"values",
"for",
"this",
":",
"term",
":",
"Metrics",
"Context",
"resource",
"from",
"the",
"HMC",
"."
] |
python
|
train
|
saltstack/salt
|
salt/states/rsync.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rsync.py#L56-L77
|
def _get_changes(rsync_out):
'''
Get changes from the rsync successful output.
'''
copied = list()
deleted = list()
for line in rsync_out.split("\n\n")[0].split("\n")[1:]:
if line.startswith("deleting "):
deleted.append(line.split(" ", 1)[-1])
else:
copied.append(line)
ret = {
'copied': os.linesep.join(sorted(copied)) or "N/A",
'deleted': os.linesep.join(sorted(deleted)) or "N/A",
}
# Return whether anything really changed
ret['changed'] = not ((ret['copied'] == 'N/A') and (ret['deleted'] == 'N/A'))
return ret
|
[
"def",
"_get_changes",
"(",
"rsync_out",
")",
":",
"copied",
"=",
"list",
"(",
")",
"deleted",
"=",
"list",
"(",
")",
"for",
"line",
"in",
"rsync_out",
".",
"split",
"(",
"\"\\n\\n\"",
")",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"1",
":",
"]",
":",
"if",
"line",
".",
"startswith",
"(",
"\"deleting \"",
")",
":",
"deleted",
".",
"append",
"(",
"line",
".",
"split",
"(",
"\" \"",
",",
"1",
")",
"[",
"-",
"1",
"]",
")",
"else",
":",
"copied",
".",
"append",
"(",
"line",
")",
"ret",
"=",
"{",
"'copied'",
":",
"os",
".",
"linesep",
".",
"join",
"(",
"sorted",
"(",
"copied",
")",
")",
"or",
"\"N/A\"",
",",
"'deleted'",
":",
"os",
".",
"linesep",
".",
"join",
"(",
"sorted",
"(",
"deleted",
")",
")",
"or",
"\"N/A\"",
",",
"}",
"# Return whether anything really changed",
"ret",
"[",
"'changed'",
"]",
"=",
"not",
"(",
"(",
"ret",
"[",
"'copied'",
"]",
"==",
"'N/A'",
")",
"and",
"(",
"ret",
"[",
"'deleted'",
"]",
"==",
"'N/A'",
")",
")",
"return",
"ret"
] |
Get changes from the rsync successful output.
|
[
"Get",
"changes",
"from",
"the",
"rsync",
"successful",
"output",
"."
] |
python
|
train
|
jaredLunde/vital-tools
|
vital/debug/__init__.py
|
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/debug/__init__.py#L879-L882
|
def randtld(self):
""" -> a random #str tld via :mod:tlds """
self.tlds = tuple(tlds.tlds) if not self.tlds else self.tlds
return self.random.choice(self.tlds)
|
[
"def",
"randtld",
"(",
"self",
")",
":",
"self",
".",
"tlds",
"=",
"tuple",
"(",
"tlds",
".",
"tlds",
")",
"if",
"not",
"self",
".",
"tlds",
"else",
"self",
".",
"tlds",
"return",
"self",
".",
"random",
".",
"choice",
"(",
"self",
".",
"tlds",
")"
] |
-> a random #str tld via :mod:tlds
|
[
"-",
">",
"a",
"random",
"#str",
"tld",
"via",
":",
"mod",
":",
"tlds"
] |
python
|
train
|
sendgrid/sendgrid-python
|
sendgrid/helpers/mail/mail.py
|
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L627-L645
|
def dynamic_template_data(self, value):
"""Data for a transactional template
:param value: Data for a transactional template
:type value: DynamicTemplateData, a JSON-serializeable structure
"""
if not isinstance(value, DynamicTemplateData):
value = DynamicTemplateData(value)
try:
personalization = self._personalizations[value.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
personalization.dynamic_template_data = value.dynamic_template_data
if not has_internal_personalization:
self.add_personalization(
personalization, index=value.personalization)
|
[
"def",
"dynamic_template_data",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"DynamicTemplateData",
")",
":",
"value",
"=",
"DynamicTemplateData",
"(",
"value",
")",
"try",
":",
"personalization",
"=",
"self",
".",
"_personalizations",
"[",
"value",
".",
"personalization",
"]",
"has_internal_personalization",
"=",
"True",
"except",
"IndexError",
":",
"personalization",
"=",
"Personalization",
"(",
")",
"has_internal_personalization",
"=",
"False",
"personalization",
".",
"dynamic_template_data",
"=",
"value",
".",
"dynamic_template_data",
"if",
"not",
"has_internal_personalization",
":",
"self",
".",
"add_personalization",
"(",
"personalization",
",",
"index",
"=",
"value",
".",
"personalization",
")"
] |
Data for a transactional template
:param value: Data for a transactional template
:type value: DynamicTemplateData, a JSON-serializeable structure
|
[
"Data",
"for",
"a",
"transactional",
"template"
] |
python
|
train
|
pymupdf/PyMuPDF
|
fitz/fitz.py
|
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L403-L409
|
def abs_unit(self):
"""Return unit vector of a point with positive coordinates."""
s = self.x * self.x + self.y * self.y
if s < 1e-5:
return Point(0,0)
s = math.sqrt(s)
return Point(abs(self.x) / s, abs(self.y) / s)
|
[
"def",
"abs_unit",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"x",
"*",
"self",
".",
"x",
"+",
"self",
".",
"y",
"*",
"self",
".",
"y",
"if",
"s",
"<",
"1e-5",
":",
"return",
"Point",
"(",
"0",
",",
"0",
")",
"s",
"=",
"math",
".",
"sqrt",
"(",
"s",
")",
"return",
"Point",
"(",
"abs",
"(",
"self",
".",
"x",
")",
"/",
"s",
",",
"abs",
"(",
"self",
".",
"y",
")",
"/",
"s",
")"
] |
Return unit vector of a point with positive coordinates.
|
[
"Return",
"unit",
"vector",
"of",
"a",
"point",
"with",
"positive",
"coordinates",
"."
] |
python
|
train
|
klahnakoski/pyLibrary
|
mo_math/vendor/strangman/stats.py
|
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/vendor/strangman/stats.py#L1509-L1542
|
def F_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
means = [0] * a
vars = [0] * a
ns = [0] * a
alldata = []
tmp = lists
means = map(mean, tmp)
vars = map(var, tmp)
ns = map(len, lists)
for i in range(len(lists)):
alldata = alldata + lists[i]
bign = len(alldata)
sstot = ss(alldata) - (square_of_sums(alldata) / float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + square_of_sums(list) / float(len(list))
ssbn = ssbn - (square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = a - 1
dfwn = bign - a
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = fprob(dfbn, dfwn, f)
return f, prob
|
[
"def",
"F_oneway",
"(",
"*",
"lists",
")",
":",
"a",
"=",
"len",
"(",
"lists",
")",
"# ANOVA on 'a' groups, each in it's own list",
"means",
"=",
"[",
"0",
"]",
"*",
"a",
"vars",
"=",
"[",
"0",
"]",
"*",
"a",
"ns",
"=",
"[",
"0",
"]",
"*",
"a",
"alldata",
"=",
"[",
"]",
"tmp",
"=",
"lists",
"means",
"=",
"map",
"(",
"mean",
",",
"tmp",
")",
"vars",
"=",
"map",
"(",
"var",
",",
"tmp",
")",
"ns",
"=",
"map",
"(",
"len",
",",
"lists",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"lists",
")",
")",
":",
"alldata",
"=",
"alldata",
"+",
"lists",
"[",
"i",
"]",
"bign",
"=",
"len",
"(",
"alldata",
")",
"sstot",
"=",
"ss",
"(",
"alldata",
")",
"-",
"(",
"square_of_sums",
"(",
"alldata",
")",
"/",
"float",
"(",
"bign",
")",
")",
"ssbn",
"=",
"0",
"for",
"list",
"in",
"lists",
":",
"ssbn",
"=",
"ssbn",
"+",
"square_of_sums",
"(",
"list",
")",
"/",
"float",
"(",
"len",
"(",
"list",
")",
")",
"ssbn",
"=",
"ssbn",
"-",
"(",
"square_of_sums",
"(",
"alldata",
")",
"/",
"float",
"(",
"bign",
")",
")",
"sswn",
"=",
"sstot",
"-",
"ssbn",
"dfbn",
"=",
"a",
"-",
"1",
"dfwn",
"=",
"bign",
"-",
"a",
"msb",
"=",
"ssbn",
"/",
"float",
"(",
"dfbn",
")",
"msw",
"=",
"sswn",
"/",
"float",
"(",
"dfwn",
")",
"f",
"=",
"msb",
"/",
"msw",
"prob",
"=",
"fprob",
"(",
"dfbn",
",",
"dfwn",
",",
"f",
")",
"return",
"f",
",",
"prob"
] |
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
|
[
"Performs",
"a",
"1",
"-",
"way",
"ANOVA",
"returning",
"an",
"F",
"-",
"value",
"and",
"probability",
"given",
"any",
"number",
"of",
"groups",
".",
"From",
"Heiman",
"pp",
".",
"394",
"-",
"7",
"."
] |
python
|
train
|
pypa/pipenv
|
pipenv/vendor/plette/models/scripts.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/plette/models/scripts.py#L48-L79
|
def cmdify(self, extra_args=None):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html
"""
parts = list(self._parts)
if extra_args:
parts.extend(extra_args)
return " ".join(
arg if not next(re.finditer(r'\s', arg), None)
else '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', arg))
for arg in parts
)
|
[
"def",
"cmdify",
"(",
"self",
",",
"extra_args",
"=",
"None",
")",
":",
"parts",
"=",
"list",
"(",
"self",
".",
"_parts",
")",
"if",
"extra_args",
":",
"parts",
".",
"extend",
"(",
"extra_args",
")",
"return",
"\" \"",
".",
"join",
"(",
"arg",
"if",
"not",
"next",
"(",
"re",
".",
"finditer",
"(",
"r'\\s'",
",",
"arg",
")",
",",
"None",
")",
"else",
"'\"{0}\"'",
".",
"format",
"(",
"re",
".",
"sub",
"(",
"r'(\\\\*)\"'",
",",
"r'\\1\\1\\\\\"'",
",",
"arg",
")",
")",
"for",
"arg",
"in",
"parts",
")"
] |
Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
whitespaces. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html
|
[
"Encode",
"into",
"a",
"cmd",
"-",
"executable",
"string",
"."
] |
python
|
train
|
gabstopper/smc-python
|
smc/api/session.py
|
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/api/session.py#L572-L603
|
def logout(self):
"""
Logout session from SMC
:return: None
"""
if not self.session:
self.manager._deregister(self)
return
try:
r = self.session.put(self.entry_points.get('logout'))
if r.status_code == 204:
logger.info('Logged out admin: %s of domain: %s successfully',
self.name, self.domain)
else:
logger.error('Logout status was unexpected. Received response '
'with status code: %s', (r.status_code))
except requests.exceptions.SSLError as e:
logger.error('SSL exception thrown during logout: %s', e)
except requests.exceptions.ConnectionError as e:
logger.error('Connection error on logout: %s', e)
finally:
self.entry_points.clear()
self.manager._deregister(self)
self._session = None
try:
delattr(self, 'current_user')
except AttributeError:
pass
logger.debug('Call counters: %s' % counters)
|
[
"def",
"logout",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"session",
":",
"self",
".",
"manager",
".",
"_deregister",
"(",
"self",
")",
"return",
"try",
":",
"r",
"=",
"self",
".",
"session",
".",
"put",
"(",
"self",
".",
"entry_points",
".",
"get",
"(",
"'logout'",
")",
")",
"if",
"r",
".",
"status_code",
"==",
"204",
":",
"logger",
".",
"info",
"(",
"'Logged out admin: %s of domain: %s successfully'",
",",
"self",
".",
"name",
",",
"self",
".",
"domain",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Logout status was unexpected. Received response '",
"'with status code: %s'",
",",
"(",
"r",
".",
"status_code",
")",
")",
"except",
"requests",
".",
"exceptions",
".",
"SSLError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'SSL exception thrown during logout: %s'",
",",
"e",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Connection error on logout: %s'",
",",
"e",
")",
"finally",
":",
"self",
".",
"entry_points",
".",
"clear",
"(",
")",
"self",
".",
"manager",
".",
"_deregister",
"(",
"self",
")",
"self",
".",
"_session",
"=",
"None",
"try",
":",
"delattr",
"(",
"self",
",",
"'current_user'",
")",
"except",
"AttributeError",
":",
"pass",
"logger",
".",
"debug",
"(",
"'Call counters: %s'",
"%",
"counters",
")"
] |
Logout session from SMC
:return: None
|
[
"Logout",
"session",
"from",
"SMC",
":",
"return",
":",
"None"
] |
python
|
train
|
robertdfrench/psychic-disco
|
psychic_disco/api_gateway_config.py
|
https://github.com/robertdfrench/psychic-disco/blob/3cf167b44c64d64606691fc186be7d9ef8e8e938/psychic_disco/api_gateway_config.py#L27-L32
|
def fetch_method(api_id, resource_id, verb):
""" Fetch extra metadata for this particular method """
return console.get_method(
restApiId=api_id,
resourceId=resource_id,
httpMethod=verb)
|
[
"def",
"fetch_method",
"(",
"api_id",
",",
"resource_id",
",",
"verb",
")",
":",
"return",
"console",
".",
"get_method",
"(",
"restApiId",
"=",
"api_id",
",",
"resourceId",
"=",
"resource_id",
",",
"httpMethod",
"=",
"verb",
")"
] |
Fetch extra metadata for this particular method
|
[
"Fetch",
"extra",
"metadata",
"for",
"this",
"particular",
"method"
] |
python
|
train
|
bcbio/bcbio-nextgen
|
bcbio/ngsalign/hisat2.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/hisat2.py#L127-L146
|
def get_splicejunction_file(align_dir, data):
"""
locate the splice junction file from hisat2. hisat2 outputs a novel
splicesites file to go along with the provided file, if available.
this combines the two together and outputs a combined file of all
of the known and novel splice junctions
"""
samplename = dd.get_sample_name(data)
align_dir = os.path.dirname(dd.get_work_bam(data))
knownfile = get_known_splicesites_file(align_dir, data)
novelfile = os.path.join(align_dir, "%s-novelsplicesites.bed" % samplename)
bed_files = [x for x in [knownfile, novelfile] if file_exists(x)]
splicejunction = bed.concat(bed_files)
splicejunctionfile = os.path.join(align_dir,
"%s-splicejunctions.bed" % samplename)
if splicejunction:
splicejunction.saveas(splicejunctionfile)
return splicejunctionfile
else:
return None
|
[
"def",
"get_splicejunction_file",
"(",
"align_dir",
",",
"data",
")",
":",
"samplename",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"align_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dd",
".",
"get_work_bam",
"(",
"data",
")",
")",
"knownfile",
"=",
"get_known_splicesites_file",
"(",
"align_dir",
",",
"data",
")",
"novelfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"align_dir",
",",
"\"%s-novelsplicesites.bed\"",
"%",
"samplename",
")",
"bed_files",
"=",
"[",
"x",
"for",
"x",
"in",
"[",
"knownfile",
",",
"novelfile",
"]",
"if",
"file_exists",
"(",
"x",
")",
"]",
"splicejunction",
"=",
"bed",
".",
"concat",
"(",
"bed_files",
")",
"splicejunctionfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"align_dir",
",",
"\"%s-splicejunctions.bed\"",
"%",
"samplename",
")",
"if",
"splicejunction",
":",
"splicejunction",
".",
"saveas",
"(",
"splicejunctionfile",
")",
"return",
"splicejunctionfile",
"else",
":",
"return",
"None"
] |
locate the splice junction file from hisat2. hisat2 outputs a novel
splicesites file to go along with the provided file, if available.
this combines the two together and outputs a combined file of all
of the known and novel splice junctions
|
[
"locate",
"the",
"splice",
"junction",
"file",
"from",
"hisat2",
".",
"hisat2",
"outputs",
"a",
"novel",
"splicesites",
"file",
"to",
"go",
"along",
"with",
"the",
"provided",
"file",
"if",
"available",
".",
"this",
"combines",
"the",
"two",
"together",
"and",
"outputs",
"a",
"combined",
"file",
"of",
"all",
"of",
"the",
"known",
"and",
"novel",
"splice",
"junctions"
] |
python
|
train
|
Erotemic/ubelt
|
setup.py
|
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/setup.py#L125-L182
|
def parse_requirements(fname='requirements.txt'):
"""
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
TODO:
perhaps use https://github.com/davidfischer/requirements-parser instead
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
from os.path import dirname, join, exists
import re
require_fpath = join(dirname(__file__), fname)
def parse_line(line):
"""
Parse information from a line in a requirements text file
"""
info = {}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
return info
# This breaks on pip install, so check that it exists.
if exists(require_fpath):
with open(require_fpath, 'r') as f:
packages = []
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
info = parse_line(line)
package = info['package']
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
package += ';' + platform_deps
packages.append(package)
return packages
return []
|
[
"def",
"parse_requirements",
"(",
"fname",
"=",
"'requirements.txt'",
")",
":",
"from",
"os",
".",
"path",
"import",
"dirname",
",",
"join",
",",
"exists",
"import",
"re",
"require_fpath",
"=",
"join",
"(",
"dirname",
"(",
"__file__",
")",
",",
"fname",
")",
"def",
"parse_line",
"(",
"line",
")",
":",
"\"\"\"\n Parse information from a line in a requirements text file\n \"\"\"",
"info",
"=",
"{",
"}",
"if",
"line",
".",
"startswith",
"(",
"'-e '",
")",
":",
"info",
"[",
"'package'",
"]",
"=",
"line",
".",
"split",
"(",
"'#egg='",
")",
"[",
"1",
"]",
"else",
":",
"# Remove versioning from the package",
"pat",
"=",
"'('",
"+",
"'|'",
".",
"join",
"(",
"[",
"'>='",
",",
"'=='",
",",
"'>'",
"]",
")",
"+",
"')'",
"parts",
"=",
"re",
".",
"split",
"(",
"pat",
",",
"line",
",",
"maxsplit",
"=",
"1",
")",
"parts",
"=",
"[",
"p",
".",
"strip",
"(",
")",
"for",
"p",
"in",
"parts",
"]",
"info",
"[",
"'package'",
"]",
"=",
"parts",
"[",
"0",
"]",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"op",
",",
"rest",
"=",
"parts",
"[",
"1",
":",
"]",
"if",
"';'",
"in",
"rest",
":",
"# Handle platform specific dependencies",
"# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies",
"version",
",",
"platform_deps",
"=",
"map",
"(",
"str",
".",
"strip",
",",
"rest",
".",
"split",
"(",
"';'",
")",
")",
"info",
"[",
"'platform_deps'",
"]",
"=",
"platform_deps",
"else",
":",
"version",
"=",
"rest",
"# NOQA",
"info",
"[",
"'version'",
"]",
"=",
"(",
"op",
",",
"version",
")",
"return",
"info",
"# This breaks on pip install, so check that it exists.",
"if",
"exists",
"(",
"require_fpath",
")",
":",
"with",
"open",
"(",
"require_fpath",
",",
"'r'",
")",
"as",
"f",
":",
"packages",
"=",
"[",
"]",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"and",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"info",
"=",
"parse_line",
"(",
"line",
")",
"package",
"=",
"info",
"[",
"'package'",
"]",
"if",
"not",
"sys",
".",
"version",
".",
"startswith",
"(",
"'3.4'",
")",
":",
"# apparently package_deps are broken in 3.4",
"platform_deps",
"=",
"info",
".",
"get",
"(",
"'platform_deps'",
")",
"if",
"platform_deps",
"is",
"not",
"None",
":",
"package",
"+=",
"';'",
"+",
"platform_deps",
"packages",
".",
"append",
"(",
"package",
")",
"return",
"packages",
"return",
"[",
"]"
] |
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
TODO:
perhaps use https://github.com/davidfischer/requirements-parser instead
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
|
[
"Parse",
"the",
"package",
"dependencies",
"listed",
"in",
"a",
"requirements",
"file",
"but",
"strips",
"specific",
"versioning",
"information",
"."
] |
python
|
valid
|
bheinzerling/pyrouge
|
pyrouge/Rouge155.py
|
https://github.com/bheinzerling/pyrouge/blob/afeb37dd2608f1399e2fb24a4ee2fe10a2a18603/pyrouge/Rouge155.py#L536-L553
|
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
|
[
"def",
"__create_dir_property",
"(",
"self",
",",
"dir_name",
",",
"docstring",
")",
":",
"property_name",
"=",
"\"{}_dir\"",
".",
"format",
"(",
"dir_name",
")",
"private_name",
"=",
"\"_\"",
"+",
"property_name",
"setattr",
"(",
"self",
",",
"private_name",
",",
"None",
")",
"def",
"fget",
"(",
"self",
")",
":",
"return",
"getattr",
"(",
"self",
",",
"private_name",
")",
"def",
"fset",
"(",
"self",
",",
"path",
")",
":",
"verify_dir",
"(",
"path",
",",
"dir_name",
")",
"setattr",
"(",
"self",
",",
"private_name",
",",
"path",
")",
"p",
"=",
"property",
"(",
"fget",
"=",
"fget",
",",
"fset",
"=",
"fset",
",",
"doc",
"=",
"docstring",
")",
"setattr",
"(",
"self",
".",
"__class__",
",",
"property_name",
",",
"p",
")"
] |
Generate getter and setter for a directory property.
|
[
"Generate",
"getter",
"and",
"setter",
"for",
"a",
"directory",
"property",
"."
] |
python
|
train
|
Falkonry/falkonry-python-client
|
falkonryclient/helper/utils.py
|
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/helper/utils.py#L18-L66
|
def exception_handler(exceptionObj):
"""Function that takes exception Object(<Byte>,<str>) as a parameter and returns the error message<str>"""
try:
if isinstance(exceptionObj, Exception) and hasattr(exceptionObj, 'args'):
if not (hasattr(exceptionObj, 'message' or hasattr(exceptionObj, 'msg'))):
if len(exceptionObj.args) >= 1:
if type(exceptionObj.args[0]) == type(b''):
ob = json.loads(exceptionObj.args[0].decode('utf-8'))
if type(ob) == type({}) and ob['message']:
return ob['message']
else:
try:
if type(exceptionObj.args[0]) == type('') and exceptionObj.args[0][0] == 'b':
ob = json.loads(exceptionObj.args[0][2:-1])
else:
ob = json.loads(exceptionObj.args[0])
if type(ob) == type({}) and ob['message']:
try:
return exception_handler(ob['message'])
except Exception as e:
return ob['message']
elif type(ob) == type({}) and ob['msg']:
try:
return exception_handler(ob['message'])
except Exception as e:
return ob['msg']
return str(json.loads(exceptionObj.args[0]))
except Exception as e:
return str(exceptionObj.args[0])
elif hasattr(exceptionObj, 'msg'):
return exceptionObj.msg
elif hasattr(exceptionObj, 'message'):
return exceptionObj.message
elif type(exceptionObj) == type(''):
try:
ob = json.loads(exceptionObj)
if type(ob) == type({}):
if ob['message']:
return ob['message']
elif ob['msg']:
return ob['msg']
else:
return ob
except Exception as e:
return exceptionObj
except Exception as e:
return e
|
[
"def",
"exception_handler",
"(",
"exceptionObj",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"exceptionObj",
",",
"Exception",
")",
"and",
"hasattr",
"(",
"exceptionObj",
",",
"'args'",
")",
":",
"if",
"not",
"(",
"hasattr",
"(",
"exceptionObj",
",",
"'message'",
"or",
"hasattr",
"(",
"exceptionObj",
",",
"'msg'",
")",
")",
")",
":",
"if",
"len",
"(",
"exceptionObj",
".",
"args",
")",
">=",
"1",
":",
"if",
"type",
"(",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
")",
"==",
"type",
"(",
"b''",
")",
":",
"ob",
"=",
"json",
".",
"loads",
"(",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"if",
"type",
"(",
"ob",
")",
"==",
"type",
"(",
"{",
"}",
")",
"and",
"ob",
"[",
"'message'",
"]",
":",
"return",
"ob",
"[",
"'message'",
"]",
"else",
":",
"try",
":",
"if",
"type",
"(",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
")",
"==",
"type",
"(",
"''",
")",
"and",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'b'",
":",
"ob",
"=",
"json",
".",
"loads",
"(",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
"[",
"2",
":",
"-",
"1",
"]",
")",
"else",
":",
"ob",
"=",
"json",
".",
"loads",
"(",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
")",
"if",
"type",
"(",
"ob",
")",
"==",
"type",
"(",
"{",
"}",
")",
"and",
"ob",
"[",
"'message'",
"]",
":",
"try",
":",
"return",
"exception_handler",
"(",
"ob",
"[",
"'message'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"ob",
"[",
"'message'",
"]",
"elif",
"type",
"(",
"ob",
")",
"==",
"type",
"(",
"{",
"}",
")",
"and",
"ob",
"[",
"'msg'",
"]",
":",
"try",
":",
"return",
"exception_handler",
"(",
"ob",
"[",
"'message'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"ob",
"[",
"'msg'",
"]",
"return",
"str",
"(",
"json",
".",
"loads",
"(",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"str",
"(",
"exceptionObj",
".",
"args",
"[",
"0",
"]",
")",
"elif",
"hasattr",
"(",
"exceptionObj",
",",
"'msg'",
")",
":",
"return",
"exceptionObj",
".",
"msg",
"elif",
"hasattr",
"(",
"exceptionObj",
",",
"'message'",
")",
":",
"return",
"exceptionObj",
".",
"message",
"elif",
"type",
"(",
"exceptionObj",
")",
"==",
"type",
"(",
"''",
")",
":",
"try",
":",
"ob",
"=",
"json",
".",
"loads",
"(",
"exceptionObj",
")",
"if",
"type",
"(",
"ob",
")",
"==",
"type",
"(",
"{",
"}",
")",
":",
"if",
"ob",
"[",
"'message'",
"]",
":",
"return",
"ob",
"[",
"'message'",
"]",
"elif",
"ob",
"[",
"'msg'",
"]",
":",
"return",
"ob",
"[",
"'msg'",
"]",
"else",
":",
"return",
"ob",
"except",
"Exception",
"as",
"e",
":",
"return",
"exceptionObj",
"except",
"Exception",
"as",
"e",
":",
"return",
"e"
] |
Function that takes exception Object(<Byte>,<str>) as a parameter and returns the error message<str>
|
[
"Function",
"that",
"takes",
"exception",
"Object",
"(",
"<Byte",
">",
"<str",
">",
")",
"as",
"a",
"parameter",
"and",
"returns",
"the",
"error",
"message<str",
">"
] |
python
|
train
|
ThePlasmaRailgun/py-rolldice
|
rolldice/rolldice.py
|
https://github.com/ThePlasmaRailgun/py-rolldice/blob/dc46d1d3e765592e76c52fd812b4f3b7425db552/rolldice/rolldice.py#L179-L187
|
def _eval_unaryop(self, node):
"""
Evaluate a unary operator node (ie. -2, +3)
Currently just supports positive and negative
:param node: Node to eval
:return: Result of node
"""
return self.operators[type(node.op)](self._eval(node.operand))
|
[
"def",
"_eval_unaryop",
"(",
"self",
",",
"node",
")",
":",
"return",
"self",
".",
"operators",
"[",
"type",
"(",
"node",
".",
"op",
")",
"]",
"(",
"self",
".",
"_eval",
"(",
"node",
".",
"operand",
")",
")"
] |
Evaluate a unary operator node (ie. -2, +3)
Currently just supports positive and negative
:param node: Node to eval
:return: Result of node
|
[
"Evaluate",
"a",
"unary",
"operator",
"node",
"(",
"ie",
".",
"-",
"2",
"+",
"3",
")",
"Currently",
"just",
"supports",
"positive",
"and",
"negative"
] |
python
|
train
|
lepture/flask-oauthlib
|
flask_oauthlib/provider/oauth2.py
|
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth2.py#L479-L515
|
def confirm_authorization_request(self):
"""When consumer confirm the authorization."""
server = self.server
scope = request.values.get('scope') or ''
scopes = scope.split()
credentials = dict(
client_id=request.values.get('client_id'),
redirect_uri=request.values.get('redirect_uri', None),
response_type=request.values.get('response_type', None),
state=request.values.get('state', None)
)
log.debug('Fetched credentials from request %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect_uri %s.', redirect_uri)
uri, http_method, body, headers = extract_params()
try:
ret = server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful.')
return create_response(*ret)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e, exc_info=True)
return self._on_exception(e, e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e, exc_info=True)
# on auth error, we should preserve state if it's present according to RFC 6749
state = request.values.get('state')
if state and not e.state:
e.state = state # set e.state so e.in_uri() can add the state query parameter to redirect uri
return self._on_exception(e, e.in_uri(redirect_uri or self.error_uri))
except Exception as e:
log.exception(e)
return self._on_exception(e, add_params_to_uri(
self.error_uri, {'error': str(e)}
))
|
[
"def",
"confirm_authorization_request",
"(",
"self",
")",
":",
"server",
"=",
"self",
".",
"server",
"scope",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'scope'",
")",
"or",
"''",
"scopes",
"=",
"scope",
".",
"split",
"(",
")",
"credentials",
"=",
"dict",
"(",
"client_id",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'client_id'",
")",
",",
"redirect_uri",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'redirect_uri'",
",",
"None",
")",
",",
"response_type",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'response_type'",
",",
"None",
")",
",",
"state",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'state'",
",",
"None",
")",
")",
"log",
".",
"debug",
"(",
"'Fetched credentials from request %r.'",
",",
"credentials",
")",
"redirect_uri",
"=",
"credentials",
".",
"get",
"(",
"'redirect_uri'",
")",
"log",
".",
"debug",
"(",
"'Found redirect_uri %s.'",
",",
"redirect_uri",
")",
"uri",
",",
"http_method",
",",
"body",
",",
"headers",
"=",
"extract_params",
"(",
")",
"try",
":",
"ret",
"=",
"server",
".",
"create_authorization_response",
"(",
"uri",
",",
"http_method",
",",
"body",
",",
"headers",
",",
"scopes",
",",
"credentials",
")",
"log",
".",
"debug",
"(",
"'Authorization successful.'",
")",
"return",
"create_response",
"(",
"*",
"ret",
")",
"except",
"oauth2",
".",
"FatalClientError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'Fatal client error %r'",
",",
"e",
",",
"exc_info",
"=",
"True",
")",
"return",
"self",
".",
"_on_exception",
"(",
"e",
",",
"e",
".",
"in_uri",
"(",
"self",
".",
"error_uri",
")",
")",
"except",
"oauth2",
".",
"OAuth2Error",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'OAuth2Error: %r'",
",",
"e",
",",
"exc_info",
"=",
"True",
")",
"# on auth error, we should preserve state if it's present according to RFC 6749",
"state",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'state'",
")",
"if",
"state",
"and",
"not",
"e",
".",
"state",
":",
"e",
".",
"state",
"=",
"state",
"# set e.state so e.in_uri() can add the state query parameter to redirect uri",
"return",
"self",
".",
"_on_exception",
"(",
"e",
",",
"e",
".",
"in_uri",
"(",
"redirect_uri",
"or",
"self",
".",
"error_uri",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"exception",
"(",
"e",
")",
"return",
"self",
".",
"_on_exception",
"(",
"e",
",",
"add_params_to_uri",
"(",
"self",
".",
"error_uri",
",",
"{",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")",
")"
] |
When consumer confirm the authorization.
|
[
"When",
"consumer",
"confirm",
"the",
"authorization",
"."
] |
python
|
test
|
NuGrid/NuGridPy
|
nugridpy/ascii_table.py
|
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ascii_table.py#L248-L346
|
def _readFile(self, sldir, fileName, sep):
'''
Private method that reads in the header and column data.
'''
if sldir.endswith(os.sep):
fileName = str(sldir)+str(fileName)
else:
fileName = str(sldir)+os.sep+str(fileName)
fileLines=[] #list of lines in the file
header=[] #list of Header lines
dataCols=[] #Dictionary of data column names
data=[] #List of Data lists
cols=[] #List of column names
f=open(fileName,'r')
fileLines=f.readlines()
i=0
if self.datatype != 'trajectory':
while i<len(fileLines):
if fileLines[i].startswith(self.header_char):
tmp=fileLines[i].lstrip(self.header_char)
header.append(tmp.strip())
else:
break
i+=1
cols=fileLines[i].split(sep)
tmp=[]
tmp1=[]
for j in range(len(cols)):
tmp1=cols[j].strip()
if tmp1 !='':
tmp.append(tmp1)
cols=tmp
i+=1
else:
header={}
while fileLines[i].startswith('#') or '=' in fileLines[i]:
if fileLines[i].startswith('#') and cols==[]:
cols=fileLines[i].strip('#')
cols=cols.strip()
cols=cols.split()
elif fileLines[i].startswith('#'):
tmp1=fileLines[i].strip('#')
tmp1=tmp1.strip()
self.headerLines.append(tmp1)
elif not fileLines[i].startswith('#'):
tmp=fileLines[i].split('=')
tmp[0]=tmp[0].strip()
tmp[1]=tmp[1].strip()
if header=={}:
header={str(tmp[0]):str(tmp[1])}
else:
header[str(tmp[0])]=str(tmp[1])
i+=1
while i<len(fileLines):
if fileLines[i].startswith('#'):
i=i+1
else:
tmp=fileLines[i].split()
for j in range(len(tmp)):
tmp[j]=tmp[j].strip()
data.append(tmp)
i+=1
tmp=[]
tmp1=[]
for j in range(len(data)):
for k in range(len(data[j])):
tmp1=data[j][k].strip()
if tmp1 !='':
tmp.append(tmp1)
data[j]=tmp
tmp=[]
tmp=[]
for j in range(len(cols)):
for k in range(len(data)):
try:
a=float(data[k][j])
tmp.append(a)
except ValueError:
tmp.append(data[k][j])
#else:
# tmp.append(float(data[k][j])) # previously tmp.append(float(data[k][j]))
tmp=array(tmp)
if j == 0:
dataCols={cols[j]:tmp}
else:
dataCols[cols[j]]=tmp
tmp=[]
return header,dataCols
|
[
"def",
"_readFile",
"(",
"self",
",",
"sldir",
",",
"fileName",
",",
"sep",
")",
":",
"if",
"sldir",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
":",
"fileName",
"=",
"str",
"(",
"sldir",
")",
"+",
"str",
"(",
"fileName",
")",
"else",
":",
"fileName",
"=",
"str",
"(",
"sldir",
")",
"+",
"os",
".",
"sep",
"+",
"str",
"(",
"fileName",
")",
"fileLines",
"=",
"[",
"]",
"#list of lines in the file",
"header",
"=",
"[",
"]",
"#list of Header lines",
"dataCols",
"=",
"[",
"]",
"#Dictionary of data column names",
"data",
"=",
"[",
"]",
"#List of Data lists",
"cols",
"=",
"[",
"]",
"#List of column names",
"f",
"=",
"open",
"(",
"fileName",
",",
"'r'",
")",
"fileLines",
"=",
"f",
".",
"readlines",
"(",
")",
"i",
"=",
"0",
"if",
"self",
".",
"datatype",
"!=",
"'trajectory'",
":",
"while",
"i",
"<",
"len",
"(",
"fileLines",
")",
":",
"if",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"self",
".",
"header_char",
")",
":",
"tmp",
"=",
"fileLines",
"[",
"i",
"]",
".",
"lstrip",
"(",
"self",
".",
"header_char",
")",
"header",
".",
"append",
"(",
"tmp",
".",
"strip",
"(",
")",
")",
"else",
":",
"break",
"i",
"+=",
"1",
"cols",
"=",
"fileLines",
"[",
"i",
"]",
".",
"split",
"(",
"sep",
")",
"tmp",
"=",
"[",
"]",
"tmp1",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"cols",
")",
")",
":",
"tmp1",
"=",
"cols",
"[",
"j",
"]",
".",
"strip",
"(",
")",
"if",
"tmp1",
"!=",
"''",
":",
"tmp",
".",
"append",
"(",
"tmp1",
")",
"cols",
"=",
"tmp",
"i",
"+=",
"1",
"else",
":",
"header",
"=",
"{",
"}",
"while",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
"or",
"'='",
"in",
"fileLines",
"[",
"i",
"]",
":",
"if",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
"and",
"cols",
"==",
"[",
"]",
":",
"cols",
"=",
"fileLines",
"[",
"i",
"]",
".",
"strip",
"(",
"'#'",
")",
"cols",
"=",
"cols",
".",
"strip",
"(",
")",
"cols",
"=",
"cols",
".",
"split",
"(",
")",
"elif",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"tmp1",
"=",
"fileLines",
"[",
"i",
"]",
".",
"strip",
"(",
"'#'",
")",
"tmp1",
"=",
"tmp1",
".",
"strip",
"(",
")",
"self",
".",
"headerLines",
".",
"append",
"(",
"tmp1",
")",
"elif",
"not",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"tmp",
"=",
"fileLines",
"[",
"i",
"]",
".",
"split",
"(",
"'='",
")",
"tmp",
"[",
"0",
"]",
"=",
"tmp",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"tmp",
"[",
"1",
"]",
"=",
"tmp",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"if",
"header",
"==",
"{",
"}",
":",
"header",
"=",
"{",
"str",
"(",
"tmp",
"[",
"0",
"]",
")",
":",
"str",
"(",
"tmp",
"[",
"1",
"]",
")",
"}",
"else",
":",
"header",
"[",
"str",
"(",
"tmp",
"[",
"0",
"]",
")",
"]",
"=",
"str",
"(",
"tmp",
"[",
"1",
"]",
")",
"i",
"+=",
"1",
"while",
"i",
"<",
"len",
"(",
"fileLines",
")",
":",
"if",
"fileLines",
"[",
"i",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"i",
"=",
"i",
"+",
"1",
"else",
":",
"tmp",
"=",
"fileLines",
"[",
"i",
"]",
".",
"split",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"tmp",
")",
")",
":",
"tmp",
"[",
"j",
"]",
"=",
"tmp",
"[",
"j",
"]",
".",
"strip",
"(",
")",
"data",
".",
"append",
"(",
"tmp",
")",
"i",
"+=",
"1",
"tmp",
"=",
"[",
"]",
"tmp1",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"j",
"]",
")",
")",
":",
"tmp1",
"=",
"data",
"[",
"j",
"]",
"[",
"k",
"]",
".",
"strip",
"(",
")",
"if",
"tmp1",
"!=",
"''",
":",
"tmp",
".",
"append",
"(",
"tmp1",
")",
"data",
"[",
"j",
"]",
"=",
"tmp",
"tmp",
"=",
"[",
"]",
"tmp",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"cols",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"try",
":",
"a",
"=",
"float",
"(",
"data",
"[",
"k",
"]",
"[",
"j",
"]",
")",
"tmp",
".",
"append",
"(",
"a",
")",
"except",
"ValueError",
":",
"tmp",
".",
"append",
"(",
"data",
"[",
"k",
"]",
"[",
"j",
"]",
")",
"#else:",
"# tmp.append(float(data[k][j])) # previously tmp.append(float(data[k][j]))",
"tmp",
"=",
"array",
"(",
"tmp",
")",
"if",
"j",
"==",
"0",
":",
"dataCols",
"=",
"{",
"cols",
"[",
"j",
"]",
":",
"tmp",
"}",
"else",
":",
"dataCols",
"[",
"cols",
"[",
"j",
"]",
"]",
"=",
"tmp",
"tmp",
"=",
"[",
"]",
"return",
"header",
",",
"dataCols"
] |
Private method that reads in the header and column data.
|
[
"Private",
"method",
"that",
"reads",
"in",
"the",
"header",
"and",
"column",
"data",
"."
] |
python
|
train
|
JnyJny/Geometry
|
Geometry/ellipse.py
|
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/ellipse.py#L209-L220
|
def b(self):
'''
Positive antipodal point on the minor axis, Point class.
'''
b = Point(self.center)
if self.xAxisIsMinor:
b.x += self.minorRadius
else:
b.y += self.minorRadius
return b
|
[
"def",
"b",
"(",
"self",
")",
":",
"b",
"=",
"Point",
"(",
"self",
".",
"center",
")",
"if",
"self",
".",
"xAxisIsMinor",
":",
"b",
".",
"x",
"+=",
"self",
".",
"minorRadius",
"else",
":",
"b",
".",
"y",
"+=",
"self",
".",
"minorRadius",
"return",
"b"
] |
Positive antipodal point on the minor axis, Point class.
|
[
"Positive",
"antipodal",
"point",
"on",
"the",
"minor",
"axis",
"Point",
"class",
"."
] |
python
|
train
|
postmanlabs/httpbin
|
httpbin/core.py
|
https://github.com/postmanlabs/httpbin/blob/f8ec666b4d1b654e4ff6aedd356f510dcac09f83/httpbin/core.py#L1423-L1450
|
def random_bytes(n):
"""Returns n random bytes generated with given seed
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
"""
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if "seed" in params:
random.seed(int(params["seed"]))
response = make_response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = "application/octet-stream"
return response
|
[
"def",
"random_bytes",
"(",
"n",
")",
":",
"n",
"=",
"min",
"(",
"n",
",",
"100",
"*",
"1024",
")",
"# set 100KB limit",
"params",
"=",
"CaseInsensitiveDict",
"(",
"request",
".",
"args",
".",
"items",
"(",
")",
")",
"if",
"\"seed\"",
"in",
"params",
":",
"random",
".",
"seed",
"(",
"int",
"(",
"params",
"[",
"\"seed\"",
"]",
")",
")",
"response",
"=",
"make_response",
"(",
")",
"# Note: can't just use os.urandom here because it ignores the seed",
"response",
".",
"data",
"=",
"bytearray",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"255",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
")",
"response",
".",
"content_type",
"=",
"\"application/octet-stream\"",
"return",
"response"
] |
Returns n random bytes generated with given seed
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
|
[
"Returns",
"n",
"random",
"bytes",
"generated",
"with",
"given",
"seed",
"---",
"tags",
":",
"-",
"Dynamic",
"data",
"parameters",
":",
"-",
"in",
":",
"path",
"name",
":",
"n",
"type",
":",
"int",
"produces",
":",
"-",
"application",
"/",
"octet",
"-",
"stream",
"responses",
":",
"200",
":",
"description",
":",
"Bytes",
"."
] |
python
|
train
|
Neurita/boyle
|
boyle/databuffer.py
|
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/databuffer.py#L364-L449
|
def put_df_as_ndarray(self, key, df, range_values, loop_multiindex=False,
unstack=False, fill_value=0, fill_method=None):
"""Returns a PyTables HDF Array from df in the shape given by its index columns range values.
:param key: string object
:param df: pandas DataFrame
:param range_values: dict or array-like
Must contain for each index column of df an entry with all the values
within the range of the column.
:param loop_multiindex: bool
Will loop through the first index in a multiindex dataframe, extract a
dataframe only for one value, complete and fill the missing values and
store in the HDF.
If this is True, it will not use unstack.
This is as fast as unstacking.
:param unstack: bool
Unstack means that this will use the first index name to
unfold the DataFrame, and will create a group with as many datasets
as valus has this first index.
Use this if you think the filled dataframe won't fit in your RAM memory.
If set to False, this will transform the dataframe in memory first
and only then save it.
:param fill_value: scalar or 'nearest', default 0
Value to use for missing values. Defaults to 0, but can be any
"compatible" value, e.g., NaN.
The 'nearest' mode will fill the missing value with the nearest value in
the column.
:param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed DataFrame
'pad' / 'ffill': propagate last valid observation forward to next valid
'backfill' / 'bfill': use NEXT valid observation to fill gap
:return: PyTables data node
"""
idx_colnames = df.index.names
#idx_colranges = [range_values[x] for x in idx_colnames]
#dataset group name if not given
if key is None:
key = idx_colnames[0]
if loop_multiindex:
idx_values = df.index.get_level_values(0).unique()
for idx in idx_values:
vals, _ = self._fill_missing_values(df.xs((idx,), level=idx_colnames[0]),
range_values,
fill_value=fill_value,
fill_method=fill_method)
ds_name = str(idx) + '_' + '_'.join(vals.columns)
self._push_dfblock(key, vals, ds_name, range_values)
return self._handle.get_node('/' + str(key))
#separate the dataframe into blocks, only with the first index
else:
if unstack:
df = df.unstack(idx_colnames[0])
for idx in df:
vals, _ = self._fill_missing_values(df[idx], range_values,
fill_value=fill_value,
fill_method=fill_method)
vals = np.nan_to_num(vals)
ds_name = '_'.join([str(x) for x in vals.name])
self._push_dfblock(key, vals, ds_name, range_values)
return self._handle.get_node('/' + str(key))
#not separate the data
vals, _ = self._fill_missing_values(df, range_values,
fill_value=fill_value,
fill_method=fill_method)
ds_name = self._array_dsname
return self._push_dfblock(key, vals, ds_name, range_values)
|
[
"def",
"put_df_as_ndarray",
"(",
"self",
",",
"key",
",",
"df",
",",
"range_values",
",",
"loop_multiindex",
"=",
"False",
",",
"unstack",
"=",
"False",
",",
"fill_value",
"=",
"0",
",",
"fill_method",
"=",
"None",
")",
":",
"idx_colnames",
"=",
"df",
".",
"index",
".",
"names",
"#idx_colranges = [range_values[x] for x in idx_colnames]",
"#dataset group name if not given",
"if",
"key",
"is",
"None",
":",
"key",
"=",
"idx_colnames",
"[",
"0",
"]",
"if",
"loop_multiindex",
":",
"idx_values",
"=",
"df",
".",
"index",
".",
"get_level_values",
"(",
"0",
")",
".",
"unique",
"(",
")",
"for",
"idx",
"in",
"idx_values",
":",
"vals",
",",
"_",
"=",
"self",
".",
"_fill_missing_values",
"(",
"df",
".",
"xs",
"(",
"(",
"idx",
",",
")",
",",
"level",
"=",
"idx_colnames",
"[",
"0",
"]",
")",
",",
"range_values",
",",
"fill_value",
"=",
"fill_value",
",",
"fill_method",
"=",
"fill_method",
")",
"ds_name",
"=",
"str",
"(",
"idx",
")",
"+",
"'_'",
"+",
"'_'",
".",
"join",
"(",
"vals",
".",
"columns",
")",
"self",
".",
"_push_dfblock",
"(",
"key",
",",
"vals",
",",
"ds_name",
",",
"range_values",
")",
"return",
"self",
".",
"_handle",
".",
"get_node",
"(",
"'/'",
"+",
"str",
"(",
"key",
")",
")",
"#separate the dataframe into blocks, only with the first index",
"else",
":",
"if",
"unstack",
":",
"df",
"=",
"df",
".",
"unstack",
"(",
"idx_colnames",
"[",
"0",
"]",
")",
"for",
"idx",
"in",
"df",
":",
"vals",
",",
"_",
"=",
"self",
".",
"_fill_missing_values",
"(",
"df",
"[",
"idx",
"]",
",",
"range_values",
",",
"fill_value",
"=",
"fill_value",
",",
"fill_method",
"=",
"fill_method",
")",
"vals",
"=",
"np",
".",
"nan_to_num",
"(",
"vals",
")",
"ds_name",
"=",
"'_'",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"vals",
".",
"name",
"]",
")",
"self",
".",
"_push_dfblock",
"(",
"key",
",",
"vals",
",",
"ds_name",
",",
"range_values",
")",
"return",
"self",
".",
"_handle",
".",
"get_node",
"(",
"'/'",
"+",
"str",
"(",
"key",
")",
")",
"#not separate the data",
"vals",
",",
"_",
"=",
"self",
".",
"_fill_missing_values",
"(",
"df",
",",
"range_values",
",",
"fill_value",
"=",
"fill_value",
",",
"fill_method",
"=",
"fill_method",
")",
"ds_name",
"=",
"self",
".",
"_array_dsname",
"return",
"self",
".",
"_push_dfblock",
"(",
"key",
",",
"vals",
",",
"ds_name",
",",
"range_values",
")"
] |
Returns a PyTables HDF Array from df in the shape given by its index columns range values.
:param key: string object
:param df: pandas DataFrame
:param range_values: dict or array-like
Must contain for each index column of df an entry with all the values
within the range of the column.
:param loop_multiindex: bool
Will loop through the first index in a multiindex dataframe, extract a
dataframe only for one value, complete and fill the missing values and
store in the HDF.
If this is True, it will not use unstack.
This is as fast as unstacking.
:param unstack: bool
Unstack means that this will use the first index name to
unfold the DataFrame, and will create a group with as many datasets
as valus has this first index.
Use this if you think the filled dataframe won't fit in your RAM memory.
If set to False, this will transform the dataframe in memory first
and only then save it.
:param fill_value: scalar or 'nearest', default 0
Value to use for missing values. Defaults to 0, but can be any
"compatible" value, e.g., NaN.
The 'nearest' mode will fill the missing value with the nearest value in
the column.
:param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed DataFrame
'pad' / 'ffill': propagate last valid observation forward to next valid
'backfill' / 'bfill': use NEXT valid observation to fill gap
:return: PyTables data node
|
[
"Returns",
"a",
"PyTables",
"HDF",
"Array",
"from",
"df",
"in",
"the",
"shape",
"given",
"by",
"its",
"index",
"columns",
"range",
"values",
"."
] |
python
|
valid
|
daler/metaseq
|
metaseq/results_table.py
|
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/results_table.py#L690-L758
|
def genes_with_peak(self, peaks, transform_func=None, split=False,
intersect_kwargs=None, id_attribute='ID', *args,
**kwargs):
"""
Returns a boolean index of genes that have a peak nearby.
Parameters
----------
peaks : string or pybedtools.BedTool
If string, then assume it's a filename to a BED/GFF/GTF file of
intervals; otherwise use the pybedtools.BedTool object directly.
transform_func : callable
This function will be applied to each gene object returned by
self.features(). Additional args and kwargs are passed to
`transform_func`. For example, if you're looking for peaks within
1kb upstream of TSSs, then pybedtools.featurefuncs.TSS would be
a useful `transform_func`, and you could supply additional kwargs
of `upstream=1000` and `downstream=0`.
This function can return iterables of features, too. For example,
you might want to look for peaks falling within the exons of
a gene. In this case, `transform_func` should return an iterable
of pybedtools.Interval objects. The only requirement is that the
`name` field of any feature matches the index of the dataframe.
intersect_kwargs : dict
kwargs passed to pybedtools.BedTool.intersect.
id_attribute : str
The attribute in the GTF or GFF file that contains the id of the
gene. For meaningful results to be returned, a gene's ID be also
found in the index of the dataframe.
For GFF files, typically you'd use `id_attribute="ID"`. For GTF
files, you'd typically use `id_attribute="gene_id"`.
"""
def _transform_func(x):
"""
In order to support transform funcs that return a single feature or
an iterable of features, we need to wrap it
"""
result = transform_func(x)
if isinstance(result, pybedtools.Interval):
result = [result]
for i in result:
if i:
yield result
intersect_kwargs = intersect_kwargs or {}
if not self._cached_features:
self._cached_features = pybedtools\
.BedTool(self.features())\
.saveas()
if transform_func:
if split:
features = self._cached_features\
.split(_transform_func, *args, **kwargs)
else:
features = self._cached_features\
.each(transform_func, *args, **kwargs)
else:
features = self._cached_features
hits = list(set([i[id_attribute] for i in features.intersect(
peaks, **intersect_kwargs)]))
return self.data.index.isin(hits)
|
[
"def",
"genes_with_peak",
"(",
"self",
",",
"peaks",
",",
"transform_func",
"=",
"None",
",",
"split",
"=",
"False",
",",
"intersect_kwargs",
"=",
"None",
",",
"id_attribute",
"=",
"'ID'",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_transform_func",
"(",
"x",
")",
":",
"\"\"\"\n In order to support transform funcs that return a single feature or\n an iterable of features, we need to wrap it\n \"\"\"",
"result",
"=",
"transform_func",
"(",
"x",
")",
"if",
"isinstance",
"(",
"result",
",",
"pybedtools",
".",
"Interval",
")",
":",
"result",
"=",
"[",
"result",
"]",
"for",
"i",
"in",
"result",
":",
"if",
"i",
":",
"yield",
"result",
"intersect_kwargs",
"=",
"intersect_kwargs",
"or",
"{",
"}",
"if",
"not",
"self",
".",
"_cached_features",
":",
"self",
".",
"_cached_features",
"=",
"pybedtools",
".",
"BedTool",
"(",
"self",
".",
"features",
"(",
")",
")",
".",
"saveas",
"(",
")",
"if",
"transform_func",
":",
"if",
"split",
":",
"features",
"=",
"self",
".",
"_cached_features",
".",
"split",
"(",
"_transform_func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"features",
"=",
"self",
".",
"_cached_features",
".",
"each",
"(",
"transform_func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"features",
"=",
"self",
".",
"_cached_features",
"hits",
"=",
"list",
"(",
"set",
"(",
"[",
"i",
"[",
"id_attribute",
"]",
"for",
"i",
"in",
"features",
".",
"intersect",
"(",
"peaks",
",",
"*",
"*",
"intersect_kwargs",
")",
"]",
")",
")",
"return",
"self",
".",
"data",
".",
"index",
".",
"isin",
"(",
"hits",
")"
] |
Returns a boolean index of genes that have a peak nearby.
Parameters
----------
peaks : string or pybedtools.BedTool
If string, then assume it's a filename to a BED/GFF/GTF file of
intervals; otherwise use the pybedtools.BedTool object directly.
transform_func : callable
This function will be applied to each gene object returned by
self.features(). Additional args and kwargs are passed to
`transform_func`. For example, if you're looking for peaks within
1kb upstream of TSSs, then pybedtools.featurefuncs.TSS would be
a useful `transform_func`, and you could supply additional kwargs
of `upstream=1000` and `downstream=0`.
This function can return iterables of features, too. For example,
you might want to look for peaks falling within the exons of
a gene. In this case, `transform_func` should return an iterable
of pybedtools.Interval objects. The only requirement is that the
`name` field of any feature matches the index of the dataframe.
intersect_kwargs : dict
kwargs passed to pybedtools.BedTool.intersect.
id_attribute : str
The attribute in the GTF or GFF file that contains the id of the
gene. For meaningful results to be returned, a gene's ID be also
found in the index of the dataframe.
For GFF files, typically you'd use `id_attribute="ID"`. For GTF
files, you'd typically use `id_attribute="gene_id"`.
|
[
"Returns",
"a",
"boolean",
"index",
"of",
"genes",
"that",
"have",
"a",
"peak",
"nearby",
"."
] |
python
|
train
|
Kronuz/pyScss
|
scss/compiler.py
|
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/compiler.py#L490-L542
|
def _at_option(self, calculator, rule, scope, block):
"""
Implements @option
"""
# TODO This only actually supports "style" (which only really makes
# sense as the first thing in a single input file) or "warn_unused"
# (which only makes sense at file level /at best/). Explore either
# replacing this with a better mechanism or dropping it entirely.
# Note also that all rules share the same underlying legacy option
# dict, so the rules aren't even lexically scoped like you might think,
# and @importing a file can change the compiler! That seems totally
# wrong.
for option in block.argument.split(','):
key, colon, value = option.partition(':')
key = key.strip().lower().replace('-', '_')
value = value.strip().lower()
if value in ('1', 'true', 't', 'yes', 'y', 'on'):
value = True
elif value in ('0', 'false', 'f', 'no', 'n', 'off', 'undefined'):
value = False
elif not colon:
value = True
if key == 'compress':
warn_deprecated(
rule,
"The 'compress' @option is deprecated. "
"Please use 'style' instead."
)
key = 'style'
value = 'compressed' if value else 'legacy'
if key in ('short_colors', 'reverse_colors'):
warn_deprecated(
rule,
"The '{0}' @option no longer has any effect."
.format(key),
)
return
elif key == 'style':
try:
OutputStyle[value]
except KeyError:
raise SassError("No such output style: {0}".format(value))
elif key in ('warn_unused', 'control_scoping'):
# TODO deprecate control_scoping? or add it to compiler?
if not isinstance(value, bool):
raise SassError("The '{0}' @option requires a bool, not {1!r}".format(key, value))
else:
raise SassError("Unknown @option: {0}".format(key))
rule.legacy_compiler_options[key] = value
|
[
"def",
"_at_option",
"(",
"self",
",",
"calculator",
",",
"rule",
",",
"scope",
",",
"block",
")",
":",
"# TODO This only actually supports \"style\" (which only really makes",
"# sense as the first thing in a single input file) or \"warn_unused\"",
"# (which only makes sense at file level /at best/). Explore either",
"# replacing this with a better mechanism or dropping it entirely.",
"# Note also that all rules share the same underlying legacy option",
"# dict, so the rules aren't even lexically scoped like you might think,",
"# and @importing a file can change the compiler! That seems totally",
"# wrong.",
"for",
"option",
"in",
"block",
".",
"argument",
".",
"split",
"(",
"','",
")",
":",
"key",
",",
"colon",
",",
"value",
"=",
"option",
".",
"partition",
"(",
"':'",
")",
"key",
"=",
"key",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"value",
"=",
"value",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"value",
"in",
"(",
"'1'",
",",
"'true'",
",",
"'t'",
",",
"'yes'",
",",
"'y'",
",",
"'on'",
")",
":",
"value",
"=",
"True",
"elif",
"value",
"in",
"(",
"'0'",
",",
"'false'",
",",
"'f'",
",",
"'no'",
",",
"'n'",
",",
"'off'",
",",
"'undefined'",
")",
":",
"value",
"=",
"False",
"elif",
"not",
"colon",
":",
"value",
"=",
"True",
"if",
"key",
"==",
"'compress'",
":",
"warn_deprecated",
"(",
"rule",
",",
"\"The 'compress' @option is deprecated. \"",
"\"Please use 'style' instead.\"",
")",
"key",
"=",
"'style'",
"value",
"=",
"'compressed'",
"if",
"value",
"else",
"'legacy'",
"if",
"key",
"in",
"(",
"'short_colors'",
",",
"'reverse_colors'",
")",
":",
"warn_deprecated",
"(",
"rule",
",",
"\"The '{0}' @option no longer has any effect.\"",
".",
"format",
"(",
"key",
")",
",",
")",
"return",
"elif",
"key",
"==",
"'style'",
":",
"try",
":",
"OutputStyle",
"[",
"value",
"]",
"except",
"KeyError",
":",
"raise",
"SassError",
"(",
"\"No such output style: {0}\"",
".",
"format",
"(",
"value",
")",
")",
"elif",
"key",
"in",
"(",
"'warn_unused'",
",",
"'control_scoping'",
")",
":",
"# TODO deprecate control_scoping? or add it to compiler?",
"if",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"raise",
"SassError",
"(",
"\"The '{0}' @option requires a bool, not {1!r}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"else",
":",
"raise",
"SassError",
"(",
"\"Unknown @option: {0}\"",
".",
"format",
"(",
"key",
")",
")",
"rule",
".",
"legacy_compiler_options",
"[",
"key",
"]",
"=",
"value"
] |
Implements @option
|
[
"Implements"
] |
python
|
train
|
spacetelescope/drizzlepac
|
drizzlepac/processInput.py
|
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/processInput.py#L639-L648
|
def buildFileList(input, output=None, ivmlist=None,
wcskey=None, updatewcs=True, **workinplace):
"""
Builds a file list which has undergone various instrument-specific
checks for input to MultiDrizzle, including splitting STIS associations.
"""
newfilelist, ivmlist, output, oldasndict, filelist = \
buildFileListOrig(input=input, output=output, ivmlist=ivmlist,
wcskey=wcskey, updatewcs=updatewcs, **workinplace)
return newfilelist, ivmlist, output, oldasndict
|
[
"def",
"buildFileList",
"(",
"input",
",",
"output",
"=",
"None",
",",
"ivmlist",
"=",
"None",
",",
"wcskey",
"=",
"None",
",",
"updatewcs",
"=",
"True",
",",
"*",
"*",
"workinplace",
")",
":",
"newfilelist",
",",
"ivmlist",
",",
"output",
",",
"oldasndict",
",",
"filelist",
"=",
"buildFileListOrig",
"(",
"input",
"=",
"input",
",",
"output",
"=",
"output",
",",
"ivmlist",
"=",
"ivmlist",
",",
"wcskey",
"=",
"wcskey",
",",
"updatewcs",
"=",
"updatewcs",
",",
"*",
"*",
"workinplace",
")",
"return",
"newfilelist",
",",
"ivmlist",
",",
"output",
",",
"oldasndict"
] |
Builds a file list which has undergone various instrument-specific
checks for input to MultiDrizzle, including splitting STIS associations.
|
[
"Builds",
"a",
"file",
"list",
"which",
"has",
"undergone",
"various",
"instrument",
"-",
"specific",
"checks",
"for",
"input",
"to",
"MultiDrizzle",
"including",
"splitting",
"STIS",
"associations",
"."
] |
python
|
train
|
andy-z/ged4py
|
ged4py/model.py
|
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L69-L95
|
def sub_tag(self, path, follow=True):
"""Returns direct sub-record with given tag name or None.
Path can be a simple tag name, in which case the first direct
sub-record of this record with the matching tag is returned. Path
can also consist of several tags separated by slashes, in that case
sub-records are searched recursively.
If `follow` is True then pointer records are resolved and pointed
record is used instead of pointer record, this also works for all
intermediate records in a path.
:param str path: tag names separated by slashes.
:param boolean follow: If True then resolve pointers.
:return: `Record` instance or `None` if sub-record with a given
tag does not exist.
"""
tags = path.split('/')
rec = self
for tag in tags:
recs = [x for x in (rec.sub_records or []) if x.tag == tag]
if not recs:
return None
rec = recs[0]
if follow and isinstance(rec, Pointer):
rec = rec.ref
return rec
|
[
"def",
"sub_tag",
"(",
"self",
",",
"path",
",",
"follow",
"=",
"True",
")",
":",
"tags",
"=",
"path",
".",
"split",
"(",
"'/'",
")",
"rec",
"=",
"self",
"for",
"tag",
"in",
"tags",
":",
"recs",
"=",
"[",
"x",
"for",
"x",
"in",
"(",
"rec",
".",
"sub_records",
"or",
"[",
"]",
")",
"if",
"x",
".",
"tag",
"==",
"tag",
"]",
"if",
"not",
"recs",
":",
"return",
"None",
"rec",
"=",
"recs",
"[",
"0",
"]",
"if",
"follow",
"and",
"isinstance",
"(",
"rec",
",",
"Pointer",
")",
":",
"rec",
"=",
"rec",
".",
"ref",
"return",
"rec"
] |
Returns direct sub-record with given tag name or None.
Path can be a simple tag name, in which case the first direct
sub-record of this record with the matching tag is returned. Path
can also consist of several tags separated by slashes, in that case
sub-records are searched recursively.
If `follow` is True then pointer records are resolved and pointed
record is used instead of pointer record, this also works for all
intermediate records in a path.
:param str path: tag names separated by slashes.
:param boolean follow: If True then resolve pointers.
:return: `Record` instance or `None` if sub-record with a given
tag does not exist.
|
[
"Returns",
"direct",
"sub",
"-",
"record",
"with",
"given",
"tag",
"name",
"or",
"None",
"."
] |
python
|
train
|
Alignak-monitoring/alignak
|
alignak/external_command.py
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1773-L1785
|
def del_all_svc_downtimes(self, service):
"""Delete all service downtime
Format of the line that triggers function call::
DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
for downtime in service.downtimes:
self.del_svc_downtime(downtime)
self.send_an_element(service.get_update_status_brok())
|
[
"def",
"del_all_svc_downtimes",
"(",
"self",
",",
"service",
")",
":",
"for",
"downtime",
"in",
"service",
".",
"downtimes",
":",
"self",
".",
"del_svc_downtime",
"(",
"downtime",
")",
"self",
".",
"send_an_element",
"(",
"service",
".",
"get_update_status_brok",
"(",
")",
")"
] |
Delete all service downtime
Format of the line that triggers function call::
DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
|
[
"Delete",
"all",
"service",
"downtime",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] |
python
|
train
|
googledatalab/pydatalab
|
google/datalab/contrib/pipeline/_pipeline.py
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/_pipeline.py#L201-L236
|
def _get_operator_param_name_and_values(operator_class_name, task_details):
""" Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab
to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to
be made.
Similarly, we the parameter value could come from the notebook's context. All that happens
here.
Returns:
Dict containing _only_ the keys and values that are required in Airflow operator definition.
This requires a substituting existing keys in the dictionary with their Airflow equivalents (
i.e. by adding new keys, and removing the existing ones).
"""
# We make a clone and then remove 'type' and 'up_stream' since these aren't needed for the
# the operator's parameters.
operator_task_details = task_details.copy()
if 'type' in operator_task_details.keys():
del operator_task_details['type']
if 'up_stream' in operator_task_details.keys():
del operator_task_details['up_stream']
# We special-case certain operators if we do some translation of the parameter names. This is
# usually the case when we use syntactic sugar to expose the functionality.
# TODO(rajivpb): It should be possible to make this a lookup from the modules mapping via
# getattr() or equivalent. Avoid hard-coding these class-names here.
if (operator_class_name == 'BigQueryOperator'):
return PipelineGenerator._get_bq_execute_params(operator_task_details)
if (operator_class_name == 'BigQueryToCloudStorageOperator'):
return PipelineGenerator._get_bq_extract_params(operator_task_details)
if (operator_class_name == 'GoogleCloudStorageToBigQueryOperator'):
return PipelineGenerator._get_bq_load_params(operator_task_details)
return operator_task_details
|
[
"def",
"_get_operator_param_name_and_values",
"(",
"operator_class_name",
",",
"task_details",
")",
":",
"# We make a clone and then remove 'type' and 'up_stream' since these aren't needed for the",
"# the operator's parameters.",
"operator_task_details",
"=",
"task_details",
".",
"copy",
"(",
")",
"if",
"'type'",
"in",
"operator_task_details",
".",
"keys",
"(",
")",
":",
"del",
"operator_task_details",
"[",
"'type'",
"]",
"if",
"'up_stream'",
"in",
"operator_task_details",
".",
"keys",
"(",
")",
":",
"del",
"operator_task_details",
"[",
"'up_stream'",
"]",
"# We special-case certain operators if we do some translation of the parameter names. This is",
"# usually the case when we use syntactic sugar to expose the functionality.",
"# TODO(rajivpb): It should be possible to make this a lookup from the modules mapping via",
"# getattr() or equivalent. Avoid hard-coding these class-names here.",
"if",
"(",
"operator_class_name",
"==",
"'BigQueryOperator'",
")",
":",
"return",
"PipelineGenerator",
".",
"_get_bq_execute_params",
"(",
"operator_task_details",
")",
"if",
"(",
"operator_class_name",
"==",
"'BigQueryToCloudStorageOperator'",
")",
":",
"return",
"PipelineGenerator",
".",
"_get_bq_extract_params",
"(",
"operator_task_details",
")",
"if",
"(",
"operator_class_name",
"==",
"'GoogleCloudStorageToBigQueryOperator'",
")",
":",
"return",
"PipelineGenerator",
".",
"_get_bq_load_params",
"(",
"operator_task_details",
")",
"return",
"operator_task_details"
] |
Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab
to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to
be made.
Similarly, we the parameter value could come from the notebook's context. All that happens
here.
Returns:
Dict containing _only_ the keys and values that are required in Airflow operator definition.
This requires a substituting existing keys in the dictionary with their Airflow equivalents (
i.e. by adding new keys, and removing the existing ones).
|
[
"Internal",
"helper",
"gets",
"the",
"name",
"of",
"the",
"python",
"parameter",
"for",
"the",
"Airflow",
"operator",
"class",
".",
"In",
"some",
"cases",
"we",
"do",
"not",
"expose",
"the",
"airflow",
"parameter",
"name",
"in",
"its",
"native",
"form",
"but",
"choose",
"to",
"expose",
"a",
"name",
"that",
"s",
"more",
"standard",
"for",
"Datalab",
"or",
"one",
"that",
"s",
"more",
"friendly",
".",
"For",
"example",
"Airflow",
"s",
"BigQueryOperator",
"uses",
"bql",
"for",
"the",
"query",
"string",
"but",
"we",
"want",
"%%bq",
"users",
"in",
"Datalab",
"to",
"use",
"query",
".",
"Hence",
"a",
"few",
"substitutions",
"that",
"are",
"specific",
"to",
"the",
"Airflow",
"operator",
"need",
"to",
"be",
"made",
"."
] |
python
|
train
|
cloud-custodian/cloud-custodian
|
c7n/cli.py
|
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/cli.py#L178-L190
|
def _schema_options(p):
""" Add options specific to schema subcommand. """
p.add_argument(
'resource', metavar='selector', nargs='?',
default=None).completer = _schema_tab_completer
p.add_argument(
'--summary', action="store_true",
help="Summarize counts of available resources, actions and filters")
p.add_argument('--json', action="store_true", help=argparse.SUPPRESS)
p.add_argument("-v", "--verbose", action="count", help="Verbose logging")
p.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
p.add_argument("--debug", default=False, help=argparse.SUPPRESS)
|
[
"def",
"_schema_options",
"(",
"p",
")",
":",
"p",
".",
"add_argument",
"(",
"'resource'",
",",
"metavar",
"=",
"'selector'",
",",
"nargs",
"=",
"'?'",
",",
"default",
"=",
"None",
")",
".",
"completer",
"=",
"_schema_tab_completer",
"p",
".",
"add_argument",
"(",
"'--summary'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Summarize counts of available resources, actions and filters\"",
")",
"p",
".",
"add_argument",
"(",
"'--json'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
")",
"p",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"action",
"=",
"\"count\"",
",",
"help",
"=",
"\"Verbose logging\"",
")",
"p",
".",
"add_argument",
"(",
"\"-q\"",
",",
"\"--quiet\"",
",",
"action",
"=",
"\"count\"",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
")",
"p",
".",
"add_argument",
"(",
"\"--debug\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
")"
] |
Add options specific to schema subcommand.
|
[
"Add",
"options",
"specific",
"to",
"schema",
"subcommand",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.