_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q278100
|
MetricMarkdown.generateMarkdown
|
test
|
def generateMarkdown(self):
"""
Look up each of the metrics and then output in Markdown
"""
self.generateMetricDefinitions()
self.generateFieldDefinitions()
self.generateDashboardDefinitions()
self.outputMarkdown()
|
python
|
{
"resource": ""
}
|
q278101
|
ParserBase.parse
|
test
|
def parse(self, text):
"""Attempt to parse source code."""
self.original_text = text
try:
return getattr(self, self.entry_point)(text)
except (DeadEnd) as exc:
raise ParserError(self.most_consumed, "Failed to parse input") from exc
return tree
|
python
|
{
"resource": ""
}
|
q278102
|
ParserBase._attempting
|
test
|
def _attempting(self, text):
"""Keeps track of the furthest point in the source code the parser has reached to this point."""
consumed = len(self.original_text) - len(text)
self.most_consumed = max(consumed, self.most_consumed)
|
python
|
{
"resource": ""
}
|
q278103
|
MeasurementGet.add_arguments
|
test
|
def add_arguments(self):
"""
Add specific command line arguments for this command
"""
# Call our parent to add the default arguments
ApiCli.add_arguments(self)
# Command specific arguments
self.parser.add_argument('-f', '--format', dest='format', action='store', required=False,
choices=['csv', 'json', 'raw', 'xml'], help='Output format. Default is raw')
self.parser.add_argument('-n', '--name', dest='metric_name', action='store', required=True,
metavar="metric_name", help='Metric identifier')
self.parser.add_argument('-g', '--aggregate', dest='aggregate', action='store', required=False,
choices=['sum', 'avg', 'max', 'min'], help='Metric default aggregate')
self.parser.add_argument('-r', '--sample', dest='sample', action='store', type=int, metavar="sample",
help='Down sample rate sample in seconds')
self.parser.add_argument('-s', '--source', dest='source', action='store', metavar="source", required=True,
help='Source of measurement')
self.parser.add_argument('-b', '--start', dest='start', action='store', required=True, metavar="start",
help='Start of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-d', '--end', dest='end', action='store', metavar="end", required=False,
help='End of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-o', '--date-format', dest='date_format', action='store', metavar="format",
required=False,
help='For CSV, JSON, and XML output formats dates (see Python date.strftime). ' +
'Default format is %%s')
|
python
|
{
"resource": ""
}
|
q278104
|
MeasurementGet.parse_time_date
|
test
|
def parse_time_date(self, s):
"""
Attempt to parse the passed in string into a valid datetime.
If we get a parse error then assume the string is an epoch time
and convert to a datetime.
"""
try:
ret = parser.parse(str(s))
except ValueError:
try:
ret = datetime.fromtimestamp(int(s))
except TypeError:
ret = None
return ret
|
python
|
{
"resource": ""
}
|
q278105
|
MeasurementGet.output_csv
|
test
|
def output_csv(self, text):
"""
Output results in CSV format
"""
payload = json.loads(text)
# Print CSV header
print("{0},{1},{2},{3},{4}".format('timestamp', 'metric', 'aggregate', 'source', 'value'))
metric_name = self._metric_name
# Loop through the aggregates one row per timestamp, and 1 or more source/value pairs
for r in payload['result']['aggregates']['key']:
timestamp = self._format_timestamp(r[0][0])
# timestamp = string.strip(timestamp, ' ')
# timestamp = string.strip(timestamp, "'")
for s in r[1]:
print('{0},"{1}","{2}","{3}",{4}'.format(timestamp, metric_name, self.aggregate, s[0], s[1]))
|
python
|
{
"resource": ""
}
|
q278106
|
MeasurementGet.output_json
|
test
|
def output_json(self, text):
"""
Output results in structured JSON format
"""
payload = json.loads(text)
data = []
metric_name = self._metric_name
for r in payload['result']['aggregates']['key']:
timestamp = self._format_timestamp(r[0][0])
for s in r[1]:
data.append({
"timestamp": timestamp,
"metric": metric_name,
"aggregate": self.aggregate,
"source": s[0],
"value": s[1],
})
payload = {"data": data}
out = json.dumps(payload, indent=self._indent, separators=(',', ': '))
print(self.colorize_json(out))
|
python
|
{
"resource": ""
}
|
q278107
|
MeasurementGet.output_raw
|
test
|
def output_raw(self, text):
"""
Output results in raw JSON format
"""
payload = json.loads(text)
out = json.dumps(payload, sort_keys=True, indent=self._indent, separators=(',', ': '))
print(self.colorize_json(out))
|
python
|
{
"resource": ""
}
|
q278108
|
MeasurementGet.output_xml
|
test
|
def output_xml(self, text):
"""
Output results in JSON format
"""
# Create the main document nodes
document = Element('results')
comment = Comment('Generated by TrueSight Pulse measurement-get CLI')
document.append(comment)
aggregates = SubElement(document, 'aggregates')
aggregate = SubElement(aggregates, 'aggregate')
measurements = SubElement(aggregate, 'measurements')
# Parse the JSON result so we can translate to XML
payload = json.loads(text)
# Current only support a single metric, if we move to the batch API then
# we can handle multiple
metric_name = self._metric_name
# Loop through the aggregates one row per timestamp, and 1 or more source/value pairs
for r in payload['result']['aggregates']['key']:
timestamp = self._format_timestamp(r[0][0])
for s in r[1]:
# Each timestamp, metric, source, values is placed in a measure tag
measure_node = SubElement(measurements, 'measure')
source = s[0]
value = str(s[1])
ts_node = SubElement(measure_node, 'timestamp')
ts_node.text = str(timestamp)
metric_node = SubElement(measure_node, 'metric')
metric_node.text = metric_name
metric_node = SubElement(measure_node, 'aggregate')
metric_node.text = self.aggregate
source_node = SubElement(measure_node, 'source')
source_node.text = source
value_node = SubElement(measure_node, 'value')
value_node.text = value
rough_string = ElementTree.tostring(document, 'utf-8')
reparse = minidom.parseString(rough_string)
output = reparse.toprettyxml(indent=" ")
print(self.colorize_xml(output))
|
python
|
{
"resource": ""
}
|
q278109
|
trimmed_pred_default
|
test
|
def trimmed_pred_default(node, parent):
"""The default predicate used in Node.trimmed."""
return isinstance(node, ParseNode) and (node.is_empty or node.is_type(ParseNodeType.terminal))
|
python
|
{
"resource": ""
}
|
q278110
|
pprint
|
test
|
def pprint(root, depth=0, space_unit=" ", *, source_len=0, file=None):
"""Pretting print a parse tree."""
spacing = space_unit * depth
if isinstance(root, str):
print("{0}terminal@(?): {1}".format(spacing, root), file=file)
else:
if root.position is None:
position = -1
elif root.position < 0:
position = source_len + root.position
else:
position = root.position
if root.is_value:
print("{0}{1}@({2}:{3}):\t{4}".format(spacing, root.node_type, position, root.consumed, root.svalue), file=file)
else:
print("{0}{1}@({2}:{3}):".format(spacing, root.node_type, position, root.consumed), file=file)
for child in root.children:
pprint(child, depth + 1, source_len=source_len, file=file)
|
python
|
{
"resource": ""
}
|
q278111
|
repetition
|
test
|
def repetition(extractor, bounds, *, ignore_whitespace=False):
"""Returns a partial of _get_repetition that accepts only a text argument."""
return partial(_get_repetition, extractor, bounds=bounds, ignore_whitespace=ignore_whitespace)
|
python
|
{
"resource": ""
}
|
q278112
|
_get_terminal
|
test
|
def _get_terminal(value, text):
"""Checks the beginning of text for a value. If it is found, a terminal ParseNode is returned
filled out appropriately for the value it found. DeadEnd is raised if the value does not match.
"""
if text and text.startswith(value):
return ParseNode(ParseNodeType.terminal,
children=[value],
consumed=len(value),
position=-len(text))
else:
raise DeadEnd()
|
python
|
{
"resource": ""
}
|
q278113
|
_get_repetition
|
test
|
def _get_repetition(extractor, text, *, bounds=(0, None), ignore_whitespace=False):
"""Tries to pull text with extractor repeatedly.
Bounds is a 2-tuple of (lbound, ubound) where lbound is a number and ubound is a number or None.
If the ubound is None, this method will execute extractor on text until extrator raises DeadEnd.
Otherwise, extractor will be called until it raises DeadEnd, or it has extracted ubound times.
If the number of children extracted is >= lbound, then a ParseNode with type repetition is
returned. Otherwise, DeadEnd is raised.
Bounds are interpreted as (lbound, ubound]
This method is used to implement:
- option (0, 1)
- zero_or_more (0, None)
- one_or_more (1, None)
- exact_repeat (n, n)
"""
minr, maxr = bounds
children = []
while maxr is None or len(children) <= maxr:
ignored_ws, use_text = _split_ignored(text, ignore_whitespace)
try:
child = _call_extractor(extractor, use_text)
child.add_ignored(ignored_ws)
except DeadEnd:
break
if child.is_empty:
break
children.append(child)
text = text[child.consumed:]
if len(children) >= minr:
return ParseNode(ParseNodeType.repetition,
children=children)
else:
raise DeadEnd()
|
python
|
{
"resource": ""
}
|
q278114
|
_get_exclusion
|
test
|
def _get_exclusion(extractor, exclusion, text):
"""Returns extractor's result if exclusion does not match.
If exclusion raises DeadEnd (meaning it did not match) then the result of extractor(text) is
returned. Otherwise, if exclusion does not raise DeadEnd it means it did match, and we then
raise DeadEnd.
"""
try:
_call_extractor(exclusion, text)
exclusion_matches = True
except DeadEnd:
exclusion_matches = False
if exclusion_matches:
raise DeadEnd()
else:
return _call_extractor(extractor, text)
|
python
|
{
"resource": ""
}
|
q278115
|
_count_leading_whitespace
|
test
|
def _count_leading_whitespace(text):
"""Returns the number of characters at the beginning of text that are whitespace."""
idx = 0
for idx, char in enumerate(text):
if not char.isspace():
return idx
return idx + 1
|
python
|
{
"resource": ""
}
|
q278116
|
_call_extractor
|
test
|
def _call_extractor(extractor, text):
"""This method calls an extractor on some text.
If extractor is just a string, it is passed as the first value to _get_terminal. Otherwise it is
treated as a callable and text is passed directly to it.
This makes it so you can have a shorthand of terminal(val) <-> val.
"""
if isinstance(extractor, str):
return _get_terminal(extractor, text)
else:
return extractor(text)
|
python
|
{
"resource": ""
}
|
q278117
|
ParseNode.position
|
test
|
def position(self):
"""Gets the position of the text the ParseNode processed. If the ParseNode does not have its
own position, it looks to its first child for its position.
'Value Nodes' (terminals) must have their own position, otherwise this method will throw an
exception when it tries to get the position property of the string child.
"""
pos = self._position
if pos is None and self.children:
ch1 = self.children[0]
if isinstance(ch1, ParseNode):
pos = ch1.position
return pos
|
python
|
{
"resource": ""
}
|
q278118
|
ParseNode.is_empty
|
test
|
def is_empty(self):
"""Returns True if this node has no children, or if all of its children are ParseNode instances
and are empty.
"""
return all(isinstance(c, ParseNode) and c.is_empty for c in self.children)
|
python
|
{
"resource": ""
}
|
q278119
|
ParseNode.add_ignored
|
test
|
def add_ignored(self, ignored):
"""Add ignored text to the node. This will add the length of the ignored text to the node's
consumed property.
"""
if ignored:
if self.ignored:
self.ignored = ignored + self.ignored
else:
self.ignored = ignored
self.consumed += len(ignored)
|
python
|
{
"resource": ""
}
|
q278120
|
ParseNode.is_type
|
test
|
def is_type(self, value):
"""Returns True if node_type == value.
If value is a tuple, node_type is checked against each member and True is returned if any of
them match.
"""
if isinstance(value, tuple):
for opt in value:
if self.node_type == opt:
return True
return False
else:
return self.node_type == value
|
python
|
{
"resource": ""
}
|
q278121
|
ParseNode.flattened
|
test
|
def flattened(self, pred=flattened_pred_default):
"""Flattens nodes by hoisting children up to ancestor nodes.
A node is hoisted if pred(node) returns True.
"""
if self.is_value:
return self
new_children = []
for child in self.children:
if child.is_empty:
continue
new_child = child.flattened(pred)
if pred(new_child, self):
new_children.extend(new_child.children)
else:
new_children.append(new_child)
return ParseNode(self.node_type,
children=new_children,
consumed=self.consumed,
position=self.position,
ignored=self.ignored)
|
python
|
{
"resource": ""
}
|
q278122
|
ParseNode.trimmed
|
test
|
def trimmed(self, pred=trimmed_pred_default):
"""Trim a ParseTree.
A node is trimmed if pred(node) returns True.
"""
new_children = []
for child in self.children:
if isinstance(child, ParseNode):
new_child = child.trimmed(pred)
else:
new_child = child
if not pred(new_child, self):
new_children.append(new_child)
return ParseNode(self.node_type,
children=new_children,
consumed=self.consumed,
position=self.position,
ignored=self.ignored)
|
python
|
{
"resource": ""
}
|
q278123
|
ParseNode.merged
|
test
|
def merged(self, other):
"""Returns a new ParseNode whose type is this node's type, and whose children are all the
children from this node and the other whose length is not 0.
"""
children = [c for c in itertools.chain(self.children, other.children) if len(c) > 0]
# NOTE: Only terminals should have ignored text attached to them, and terminals shouldn't be
# merged (probably) so it shouldn't be necessary to copy of ignored -- it should always
# be None. But, we'll go ahead and copy it over anyway, recognizing that other's
# ignored text will be lost.
return ParseNode(self.node_type,
children=children,
consumed=self.consumed + other.consumed,
ignored=self.ignored)
|
python
|
{
"resource": ""
}
|
q278124
|
ParseNode.retyped
|
test
|
def retyped(self, new_type):
"""Returns a new node with the same contents as self, but with a new node_type."""
return ParseNode(new_type,
children=list(self.children),
consumed=self.consumed,
position=self.position,
ignored=self.ignored)
|
python
|
{
"resource": ""
}
|
q278125
|
ParseNode.compressed
|
test
|
def compressed(self, new_type=None, *, include_ignored=False):
"""Turns the node into a value node, whose single string child is the concatenation of all its
children.
"""
values = []
consumed = 0
ignored = None
for i, child in enumerate(self.children):
consumed += child.consumed
if i == 0 and not include_ignored:
ignored = child.ignored
if child.is_value:
if include_ignored:
values.append("{0}{1}".format(child.ignored or "", child.value))
else:
values.append(child.value)
else:
values.append(child.compressed(include_ignored=include_ignored).value)
return ParseNode(new_type or self.node_type,
children=["".join(values)],
consumed=consumed,
ignored=ignored,
position=self.position)
|
python
|
{
"resource": ""
}
|
q278126
|
Cursor.position
|
test
|
def position(self) -> Position:
"""The current position of the cursor."""
return Position(self._index, self._lineno, self._col_offset)
|
python
|
{
"resource": ""
}
|
q278127
|
Cursor.max_readed_position
|
test
|
def max_readed_position(self) -> Position:
"""The index of the deepest character readed."""
return Position(self._maxindex, self._maxline, self._maxcol)
|
python
|
{
"resource": ""
}
|
q278128
|
Cursor.step_next_char
|
test
|
def step_next_char(self):
"""Puts the cursor on the next character."""
self._index += 1
self._col_offset += 1
if self._index > self._maxindex:
self._maxindex = self._index
self._maxcol = self._col_offset
self._maxline = self._lineno
|
python
|
{
"resource": ""
}
|
q278129
|
Cursor.step_next_line
|
test
|
def step_next_line(self):
"""Sets cursor as beginning of next line."""
self._eol.append(self.position)
self._lineno += 1
self._col_offset = 0
|
python
|
{
"resource": ""
}
|
q278130
|
Cursor.step_prev_line
|
test
|
def step_prev_line(self):
"""Sets cursor as end of previous line."""
#TODO(bps): raise explicit error for unregistered eol
#assert self._eol[-1].index == self._index
if len(self._eol) > 0:
self.position = self._eol.pop()
|
python
|
{
"resource": ""
}
|
q278131
|
Stream.last_readed_line
|
test
|
def last_readed_line(self) -> str:
"""Usefull string to compute error message."""
mpos = self._cursor.max_readed_position
mindex = mpos.index
# search last \n
prevline = mindex - 1 if mindex == self.eos_index else mindex
while prevline >= 0 and self._content[prevline] != '\n':
prevline -= 1
# search next \n
nextline = mindex
while nextline < self.eos_index and self._content[nextline] != '\n':
nextline += 1
last_line = self._content[prevline + 1:nextline]
return last_line
|
python
|
{
"resource": ""
}
|
q278132
|
Stream.incpos
|
test
|
def incpos(self, length: int=1) -> int:
"""Increment the cursor to the next character."""
if length < 0:
raise ValueError("length must be positive")
i = 0
while (i < length):
if self._cursor.index < self._len:
if self.peek_char == '\n':
self._cursor.step_next_line()
self._cursor.step_next_char()
i += 1
return self._cursor.index
|
python
|
{
"resource": ""
}
|
q278133
|
Stream.save_context
|
test
|
def save_context(self) -> bool:
"""Save current position."""
self._contexts.append(self._cursor.position)
return True
|
python
|
{
"resource": ""
}
|
q278134
|
Stream.restore_context
|
test
|
def restore_context(self) -> bool:
"""Rollback to previous saved position."""
self._cursor.position = self._contexts.pop()
return False
|
python
|
{
"resource": ""
}
|
q278135
|
to_fmt
|
test
|
def to_fmt(self, with_from=False) -> fmt.indentable:
"""
Return a Fmt representation of Translator for pretty-printing
"""
txt = fmt.sep("\n", [
fmt.sep(
" ",
[
self._type_source,
"to",
self._type_target,
'=',
self._fun.to_fmt()
]
),
self._notify.get_content(with_from)
])
return txt
|
python
|
{
"resource": ""
}
|
q278136
|
Scope.set_name
|
test
|
def set_name(self, name: str):
""" You could set the name after construction """
self.name = name
# update internal names
lsig = self._hsig.values()
self._hsig = {}
for s in lsig:
self._hsig[s.internal_name()] = s
|
python
|
{
"resource": ""
}
|
q278137
|
Scope.count_vars
|
test
|
def count_vars(self) -> int:
""" Count var define by this scope """
n = 0
for s in self._hsig.values():
if hasattr(s, 'is_var') and s.is_var:
n += 1
return n
|
python
|
{
"resource": ""
}
|
q278138
|
Scope.count_funs
|
test
|
def count_funs(self) -> int:
""" Count function define by this scope """
n = 0
for s in self._hsig.values():
if hasattr(s, 'is_fun') and s.is_fun:
n += 1
return n
|
python
|
{
"resource": ""
}
|
q278139
|
Scope.__update_count
|
test
|
def __update_count(self):
""" Update internal counters """
self._ntypes = self.count_types()
self._nvars = self.count_vars()
self._nfuns = self.count_funs()
|
python
|
{
"resource": ""
}
|
q278140
|
Scope.update
|
test
|
def update(self, sig: list or Scope) -> Scope:
""" Update the Set with values of another Set """
values = sig
if hasattr(sig, 'values'):
values = sig.values()
for s in values:
if self.is_namespace:
s.set_parent(self)
if isinstance(s, Scope):
s.state = StateScope.EMBEDDED
self._hsig[s.internal_name()] = s
self.__update_count()
return self
|
python
|
{
"resource": ""
}
|
q278141
|
Scope.union
|
test
|
def union(self, sig: Scope) -> Scope:
""" Create a new Set produce by the union of 2 Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new |= sig
return new
|
python
|
{
"resource": ""
}
|
q278142
|
Scope.intersection_update
|
test
|
def intersection_update(self, oset: Scope) -> Scope:
""" Update Set with common values of another Set """
keys = list(self._hsig.keys())
for k in keys:
if k not in oset:
del self._hsig[k]
else:
self._hsig[k] = oset.get(k)
return self
|
python
|
{
"resource": ""
}
|
q278143
|
Scope.intersection
|
test
|
def intersection(self, sig: Scope) -> Scope:
""" Create a new Set produce by the intersection of 2 Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new &= sig
return new
|
python
|
{
"resource": ""
}
|
q278144
|
Scope.difference_update
|
test
|
def difference_update(self, oset: Scope) -> Scope:
""" Remove values common with another Set """
keys = list(self._hsig.keys())
for k in keys:
if k in oset:
del self._hsig[k]
return self
|
python
|
{
"resource": ""
}
|
q278145
|
Scope.difference
|
test
|
def difference(self, sig: Scope) -> Scope:
""" Create a new Set produce by a Set subtracted by another Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new -= sig
return new
|
python
|
{
"resource": ""
}
|
q278146
|
Scope.symmetric_difference_update
|
test
|
def symmetric_difference_update(self, oset: Scope) -> Scope:
""" Remove common values
and Update specific values from another Set
"""
skey = set()
keys = list(self._hsig.keys())
for k in keys:
if k in oset:
skey.add(k)
for k in oset._hsig.keys():
if k not in skey:
self._hsig[k] = oset.get(k)
for k in skey:
del self._hsig[k]
return self
|
python
|
{
"resource": ""
}
|
q278147
|
Scope.symmetric_difference
|
test
|
def symmetric_difference(self, sig: Scope) -> Scope:
""" Create a new Set with values present in only one Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new ^= sig
return new
|
python
|
{
"resource": ""
}
|
q278148
|
Scope.add
|
test
|
def add(self, it: Signature) -> bool:
""" Add it to the Set """
if isinstance(it, Scope):
it.state = StateScope.EMBEDDED
txt = it.internal_name()
it.set_parent(self)
if self.is_namespace:
txt = it.internal_name()
if txt == "":
txt = '_' + str(len(self._hsig))
if txt in self._hsig:
raise KeyError("Already exists %s" % txt)
self._hsig[txt] = it
self.__update_count()
return True
|
python
|
{
"resource": ""
}
|
q278149
|
Scope.remove
|
test
|
def remove(self, it: Signature) -> bool:
""" Remove it but raise KeyError if not found """
txt = it.internal_name()
if txt not in self._hsig:
raise KeyError(it.show_name() + ' not in Set')
sig = self._hsig[txt]
if isinstance(sig, Scope):
sig.state = StateScope.LINKED
del self._hsig[txt]
return True
|
python
|
{
"resource": ""
}
|
q278150
|
Scope.discard
|
test
|
def discard(self, it: Signature) -> bool:
""" Remove it only if present """
txt = it.internal_name()
if txt in self._hsig:
sig = self._hsig[txt]
if isinstance(sig, Scope):
sig.state = StateScope.LINKED
del self._hsig[txt]
return True
return False
|
python
|
{
"resource": ""
}
|
q278151
|
Scope.values
|
test
|
def values(self) -> [Signature]:
""" Retrieve all values """
if self.state == StateScope.EMBEDDED and self.parent is not None:
return list(self._hsig.values()) + list(self.parent().values())
else:
return self._hsig.values()
|
python
|
{
"resource": ""
}
|
q278152
|
Scope.first
|
test
|
def first(self) -> Signature:
""" Retrieve the first Signature ordered by mangling descendant """
k = sorted(self._hsig.keys())
return self._hsig[k[0]]
|
python
|
{
"resource": ""
}
|
q278153
|
Scope.last
|
test
|
def last(self) -> Signature:
""" Retrieve the last Signature ordered by mangling descendant """
k = sorted(self._hsig.keys())
return self._hsig[k[-1]]
|
python
|
{
"resource": ""
}
|
q278154
|
Scope.get
|
test
|
def get(self, key: str, default=None) -> Signature:
""" Get a signature instance by its internal_name """
item = default
if key in self._hsig:
item = self._hsig[key]
return item
|
python
|
{
"resource": ""
}
|
q278155
|
Scope.get_by_symbol_name
|
test
|
def get_by_symbol_name(self, name: str) -> Scope:
""" Retrieve a Set of all signature by symbol name """
lst = []
for s in self.values():
if s.name == name:
# create an EvalCtx only when necessary
lst.append(EvalCtx.from_sig(s))
# include parent
# TODO: see all case of local redefinition for
# global overloads
# possible algos... take all with different internal_name
if len(lst) == 0:
p = self.get_parent()
if p is not None:
return p.get_by_symbol_name(name)
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return rscope
|
python
|
{
"resource": ""
}
|
q278156
|
Scope.getsig_by_symbol_name
|
test
|
def getsig_by_symbol_name(self, name: str) -> Signature:
""" Retrieve the unique Signature of a symbol.
Fail if the Signature is not unique
"""
subscope = self.get_by_symbol_name(name)
if len(subscope) != 1:
raise KeyError("%s have multiple candidates in scope" % name)
v = list(subscope.values())
return v[0]
|
python
|
{
"resource": ""
}
|
q278157
|
Scope.get_all_polymorphic_return
|
test
|
def get_all_polymorphic_return(self) -> bool:
""" For now, polymorphic return type are handle by symbol artefact.
--> possible multi-polymorphic but with different constraint attached!
"""
lst = []
for s in self.values():
if hasattr(s, 'tret') and s.tret.is_polymorphic:
# encapsulate s into a EvalCtx for meta-var resolution
lst.append(EvalCtx.from_sig(s))
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return rscope
|
python
|
{
"resource": ""
}
|
q278158
|
Scope.callInjector
|
test
|
def callInjector(self, old: Node, trans: Translator) -> Node:
""" If don't have injector call from parent """
if self.astTranslatorInjector is None:
if self.parent is not None:
# TODO: think if we forward for all StateScope
# forward to parent scope
return self.parent().callInjector(old, trans)
else:
raise TypeError("Must define an Translator Injector")
return self.astTranslatorInjector(old, trans)
|
python
|
{
"resource": ""
}
|
q278159
|
normalize
|
test
|
def normalize(ast: Node) -> Node:
"""
Normalize an AST nodes.
all builtins containers are replace by referencable subclasses
"""
res = ast
typemap = {DictNode, ListNode, TupleNode}
if type(ast) is dict:
res = DictNode(ast)
elif type(ast) is list:
res = ListNode(ast)
elif type(ast) is tuple:
res = TupleNode(ast)
# in-depth change
if hasattr(res, 'items'):
for k, v in res.items():
res[k] = normalize(v)
elif hasattr(res, '__getitem__'):
for idx, v in zip(range(len(res)), res):
res[idx] = normalize(v)
if type(res) not in typemap and hasattr(res, '__dict__'):
subattr = vars(res)
for k, v in subattr.items():
setattr(res, k, normalize(v))
return res
|
python
|
{
"resource": ""
}
|
q278160
|
Node.set
|
test
|
def set(self, othernode):
"""allow to completly mutate the node into any subclasses of Node"""
self.__class__ = othernode.__class__
self.clean()
if len(othernode) > 0:
for k, v in othernode.items():
self[k] = v
for k, v in vars(othernode).items():
setattr(self, k, v)
|
python
|
{
"resource": ""
}
|
q278161
|
ListNodeItem.rvalues
|
test
|
def rvalues(self):
"""
in reversed order
"""
tmp = self
while tmp is not None:
yield tmp.data
tmp = tmp.prev
|
python
|
{
"resource": ""
}
|
q278162
|
_hit_ok
|
test
|
def _hit_ok(hit, min_hit_charge, max_hit_charge):
''' Check if given hit is withing the limits.
'''
# Omit hits with charge < min_hit_charge
if hit['charge'] < min_hit_charge:
return False
# Omit hits with charge > max_hit_charge
if max_hit_charge != 0 and hit['charge'] > max_hit_charge:
return False
return True
|
python
|
{
"resource": ""
}
|
q278163
|
EvalCtx.get_compute_sig
|
test
|
def get_compute_sig(self) -> Signature:
"""
Compute a signature Using resolution!!!
TODO: discuss of relevance of a final generation for a signature
"""
tret = []
tparams = []
for t in self.tret.components:
if t in self.resolution and self.resolution[t] is not None:
tret.append(self.resolution[t]().show_name())
else:
tret.append(t)
if hasattr(self, 'tparams'):
for p in self.tparams:
tp = []
for t in p.components:
if t in self.resolution and self.resolution[t] is not None:
tp.append(self.resolution[t]().show_name())
else:
tp.append(t)
tparams.append(" ".join(tp))
if self.variadic:
if self._variadic_types is None:
raise ValueError("Can't compute the sig "
+ "with unresolved variadic argument"
)
for p in self._variadic_types:
tp = []
for t in p.components:
if (t in self.resolution
and self.resolution[t] is not None
):
tp.append(self.resolution[t]().show_name())
else:
tp.append(t)
tparams.append(" ".join(tp))
ret = Fun(self.name, " ".join(tret), tparams)
# transform as-is into our internal Signature (Val, Var, whatever)
ret.__class__ = self._sig.__class__
return ret
|
python
|
{
"resource": ""
}
|
q278164
|
EvalCtx.resolve
|
test
|
def resolve(self):
"""
Process the signature and find definition for type.
"""
# collect types for resolution
t2resolv = []
if hasattr(self._sig, 'tret'):
t2resolv.append(self._sig.tret)
if hasattr(self._sig, 'tparams') and self._sig.tparams is not None:
for p in self._sig.tparams:
t2resolv.append(p)
if self._translate_to is not None:
t2resolv.append(self._translate_to.target)
if self._variadic_types is not None:
for t in self._variadic_types:
t2resolv.append(t)
for t in t2resolv:
for c in t.components:
if c not in self.resolution or self.resolution[c] is None:
# try to find what is c
parent = self.get_parent()
if parent is not None:
sc = parent.get_by_symbol_name(c)
if len(sc) == 1:
sc = list(sc.values())[0]
# unwrap EvalCtx around Type
if isinstance(sc, EvalCtx):
sc = sc._sig
rtyp = weakref.ref(sc)
self.resolution[c] = rtyp
continue
# unresolved
self.resolution[c] = None
|
python
|
{
"resource": ""
}
|
q278165
|
EvalCtx.get_resolved_names
|
test
|
def get_resolved_names(self, type_name: TypeName) -> list:
"""
Use self.resolution to subsitute type_name.
Allow to instanciate polymorphic type ?1, ?toto
"""
if not isinstance(type_name, TypeName):
raise Exception("Take a TypeName as parameter not a %s"
% type(type_name))
rnames = []
for name in type_name.components:
if name not in self.resolution:
raise Exception("Unknown type %s in a EvalCtx" % name)
rname = self.resolution[name]
if rname is not None:
rname = rname().show_name()
else:
rname = name
rnames.append(rname)
return rnames
|
python
|
{
"resource": ""
}
|
q278166
|
EvalCtx.set_resolved_name
|
test
|
def set_resolved_name(self, ref: dict, type_name2solve: TypeName,
type_name_ref: TypeName):
"""
Warning!!! Need to rethink it when global poly type
"""
if self.resolution[type_name2solve.value] is None:
self.resolution[type_name2solve.value] = ref[type_name_ref.value]
|
python
|
{
"resource": ""
}
|
q278167
|
S3Saver._delete_local
|
test
|
def _delete_local(self, filename):
"""Deletes the specified file from the local filesystem."""
if os.path.exists(filename):
os.remove(filename)
|
python
|
{
"resource": ""
}
|
q278168
|
S3Saver._delete_s3
|
test
|
def _delete_s3(self, filename, bucket_name):
"""Deletes the specified file from the given S3 bucket."""
conn = S3Connection(self.access_key_id, self.access_key_secret)
bucket = conn.get_bucket(bucket_name)
if type(filename).__name__ == 'Key':
filename = '/' + filename.name
path = self._get_s3_path(filename)
k = Key(bucket)
k.key = path
try:
bucket.delete_key(k)
except S3ResponseError:
pass
|
python
|
{
"resource": ""
}
|
q278169
|
S3Saver.delete
|
test
|
def delete(self, filename, storage_type=None, bucket_name=None):
"""Deletes the specified file, either locally or from S3, depending on the file's storage type."""
if not (storage_type and bucket_name):
self._delete_local(filename)
else:
if storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % storage_type)
self._delete_s3(filename, bucket_name)
|
python
|
{
"resource": ""
}
|
q278170
|
S3Saver._save_local
|
test
|
def _save_local(self, temp_file, filename, obj):
"""Saves the specified file to the local file system."""
path = self._get_path(filename)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission | 0o111)
fd = open(path, 'wb')
# Thanks to:
# http://stackoverflow.com/a/3253276/2066849
temp_file.seek(0)
t = temp_file.read(1048576)
while t:
fd.write(t)
t = temp_file.read(1048576)
fd.close()
if self.filesize_field:
setattr(obj, self.filesize_field, os.path.getsize(path))
return filename
|
python
|
{
"resource": ""
}
|
q278171
|
S3Saver._save_s3
|
test
|
def _save_s3(self, temp_file, filename, obj):
"""Saves the specified file to the configured S3 bucket."""
conn = S3Connection(self.access_key_id, self.access_key_secret)
bucket = conn.get_bucket(self.bucket_name)
path = self._get_s3_path(filename)
k = bucket.new_key(path)
k.set_contents_from_string(temp_file.getvalue())
k.set_acl(self.acl)
if self.filesize_field:
setattr(obj, self.filesize_field, k.size)
return filename
|
python
|
{
"resource": ""
}
|
q278172
|
S3Saver.save
|
test
|
def save(self, temp_file, filename, obj):
"""Saves the specified file to either S3 or the local filesystem, depending on the currently enabled storage type."""
if not (self.storage_type and self.bucket_name):
ret = self._save_local(temp_file, filename, obj)
else:
if self.storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % self.storage_type)
ret = self._save_s3(temp_file, filename, obj)
if self.field_name:
setattr(obj, self.field_name, ret)
if self.storage_type == 's3':
if self.storage_type_field:
setattr(obj, self.storage_type_field, self.storage_type)
if self.bucket_name_field:
setattr(obj, self.bucket_name_field, self.bucket_name)
else:
if self.storage_type_field:
setattr(obj, self.storage_type_field, '')
if self.bucket_name_field:
setattr(obj, self.bucket_name_field, '')
return ret
|
python
|
{
"resource": ""
}
|
q278173
|
S3Saver._find_by_path_s3
|
test
|
def _find_by_path_s3(self, path, bucket_name):
"""Finds files by licking an S3 bucket's contents by prefix."""
conn = S3Connection(self.access_key_id, self.access_key_secret)
bucket = conn.get_bucket(bucket_name)
s3_path = self._get_s3_path(path)
return bucket.list(prefix=s3_path)
|
python
|
{
"resource": ""
}
|
q278174
|
enum
|
test
|
def enum(*sequential, **named):
"""
Build an enum statement
"""
#: build enums from parameter
enums = dict(zip(sequential, range(len(sequential))), **named)
enums['map'] = copy.copy(enums)
#: build reverse mapping
enums['rmap'] = {}
for key, value in enums.items():
if type(value) is int:
enums['rmap'][value] = key
return type('Enum', (), enums)
|
python
|
{
"resource": ""
}
|
q278175
|
checktypes
|
test
|
def checktypes(func):
"""Decorator to verify arguments and return types."""
sig = inspect.signature(func)
types = {}
for param in sig.parameters.values():
# Iterate through function's parameters and build the list of
# arguments types
param_type = param.annotation
if param_type is param.empty or not inspect.isclass(param_type):
# Missing annotation or not a type, skip it
continue
types[param.name] = param_type
# If the argument has a type specified, let's check that its
# default value (if present) conforms with the type.
if (param.default is not param.empty and
not isinstance(param.default, param_type)):
raise ValueError(
"{func}: wrong type of a default value for {arg!r}".format(
func=func.__qualname__, arg=param.name)
)
def check_type(sig, arg_name, arg_type, arg_value):
# Internal function that encapsulates arguments type checking
if not isinstance(arg_value, arg_type):
raise ValueError("{func}: wrong type of {arg!r} argument, "
"{exp!r} expected, got {got!r}".
format(func=func.__qualname__, arg=arg_name,
exp=arg_type.__name__,
got=type(arg_value).__name__))
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Let's bind the arguments
ba = sig.bind(*args, **kwargs)
for arg_name, arg in ba.arguments.items():
# And iterate through the bound arguments
try:
type_ = types[arg_name]
except KeyError:
continue
else:
# OK, we have a type for the argument, lets get the
# corresponding parameter description from the signature object
param = sig.parameters[arg_name]
if param.kind == param.VAR_POSITIONAL:
# If this parameter is a variable-argument parameter,
# then we need to check each of its values
for value in arg:
check_type(sig, arg_name, type_, value)
elif param.kind == param.VAR_KEYWORD:
# If this parameter is a variable-keyword-argument
# parameter:
for subname, value in arg.items():
check_type(sig, arg_name + ':' + subname, type_, value)
else:
# And, finally, if this parameter a regular one:
check_type(sig, arg_name, type_, arg)
result = func(*ba.args, **ba.kwargs)
# The last bit - let's check that the result is correct
return_type = sig.return_annotation
if (return_type is not sig.empty and
isinstance(return_type, type) and
not isinstance(result, return_type)):
raise ValueError(
'{func}: wrong return type, {exp} expected, got {got}'.format(
func=func.__qualname__, exp=return_type.__name__,
got=type(result).__name__)
)
return result
return wrapper
|
python
|
{
"resource": ""
}
|
q278176
|
set_one
|
test
|
def set_one(chainmap, thing_name, callobject):
""" Add a mapping with key thing_name for callobject in chainmap with
namespace handling.
"""
namespaces = reversed(thing_name.split("."))
lstname = []
for name in namespaces:
lstname.insert(0, name)
strname = '.'.join(lstname)
chainmap[strname] = callobject
|
python
|
{
"resource": ""
}
|
q278177
|
add_method
|
test
|
def add_method(cls):
"""Attach a method to a class."""
def wrapper(f):
#if hasattr(cls, f.__name__):
# raise AttributeError("{} already has a '{}' attribute".format(
# cls.__name__, f.__name__))
setattr(cls, f.__name__, f)
return f
return wrapper
|
python
|
{
"resource": ""
}
|
q278178
|
hook
|
test
|
def hook(cls, hookname=None, erase=False):
"""Attach a method to a parsing class and register it as a parser hook.
The method is registered with its name unless hookname is provided.
"""
if not hasattr(cls, '_hooks'):
raise TypeError(
"%s didn't seems to be a BasicParser subsclasse" % cls.__name__)
class_hook_list = cls._hooks
class_rule_list = cls._rules
def wrapper(f):
nonlocal hookname
add_method(cls)(f)
if hookname is None:
hookname = f.__name__
if not erase and (hookname in class_hook_list or hookname in class_rule_list):
raise TypeError("%s is already define has rule or hook" % hookname)
if '.' not in hookname:
hookname = '.'.join([cls.__module__, cls.__name__, hookname])
set_one(class_hook_list, hookname, f)
return f
return wrapper
|
python
|
{
"resource": ""
}
|
q278179
|
rule
|
test
|
def rule(cls, rulename=None, erase=False):
"""Attach a method to a parsing class and register it as a parser rule.
The method is registered with its name unless rulename is provided.
"""
if not hasattr(cls, '_rules'):
raise TypeError(
"%s didn't seems to be a BasicParser subsclasse" % cls.__name__)
class_hook_list = cls._hooks
class_rule_list = cls._rules
def wrapper(f):
nonlocal rulename
add_method(cls)(f)
if rulename is None:
rulename = f.__name__
if not erase and (rulename in class_hook_list or rulename in class_rule_list):
raise TypeError("%s is already define has rule or hook" % rulename)
if '.' not in rulename:
rulename = cls.__module__ + '.' + cls.__name__ + '.' + rulename
set_one(class_rule_list, rulename, f)
return f
return wrapper
|
python
|
{
"resource": ""
}
|
q278180
|
directive
|
test
|
def directive(directname=None):
"""Attach a class to a parsing class and register it as a parser directive.
The class is registered with its name unless directname is provided.
"""
global _directives
class_dir_list = _directives
def wrapper(f):
nonlocal directname
if directname is None:
directname = f.__name__
f.ns_name = directname
set_one(class_dir_list, directname, f)
return f
return wrapper
|
python
|
{
"resource": ""
}
|
q278181
|
decorator
|
test
|
def decorator(directname=None):
"""
Attach a class to a parsing decorator and register it to the global
decorator list.
The class is registered with its name unless directname is provided
"""
global _decorators
class_deco_list = _decorators
def wrapper(f):
nonlocal directname
if directname is None:
directname = f.__name__
f.ns_name = directname
set_one(class_deco_list, directname, f)
return wrapper
|
python
|
{
"resource": ""
}
|
q278182
|
bind
|
test
|
def bind(self, dst: str, src: Node) -> bool:
"""Allow to alias a node to another name.
Useful to bind a node to _ as return of Rule::
R = [
__scope__:L [item:I #add_item(L, I]* #bind('_', L)
]
It's also the default behaviour of ':>'
"""
for m in self.rule_nodes.maps:
for k, v in m.items():
if k == dst:
m[k] = src
return True
raise Exception('%s not found' % dst)
|
python
|
{
"resource": ""
}
|
q278183
|
read_eol
|
test
|
def read_eol(self) -> bool:
"""Return True if the parser can consume an EOL byte sequence."""
if self.read_eof():
return False
self._stream.save_context()
self.read_char('\r')
if self.read_char('\n'):
return self._stream.validate_context()
return self._stream.restore_context()
|
python
|
{
"resource": ""
}
|
q278184
|
BasicParser.push_rule_nodes
|
test
|
def push_rule_nodes(self) -> bool:
"""Push context variable to store rule nodes."""
if self.rule_nodes is None:
self.rule_nodes = collections.ChainMap()
self.tag_cache = collections.ChainMap()
self.id_cache = collections.ChainMap()
else:
self.rule_nodes = self.rule_nodes.new_child()
self.tag_cache = self.tag_cache.new_child()
self.id_cache = self.id_cache.new_child()
return True
|
python
|
{
"resource": ""
}
|
q278185
|
BasicParser.pop_rule_nodes
|
test
|
def pop_rule_nodes(self) -> bool:
"""Pop context variable that store rule nodes"""
self.rule_nodes = self.rule_nodes.parents
self.tag_cache = self.tag_cache.parents
self.id_cache = self.id_cache.parents
return True
|
python
|
{
"resource": ""
}
|
q278186
|
BasicParser.value
|
test
|
def value(self, n: Node) -> str:
"""Return the text value of the node"""
id_n = id(n)
idcache = self.id_cache
if id_n not in idcache:
return ""
name = idcache[id_n]
tag_cache = self.tag_cache
if name not in tag_cache:
raise Exception("Incoherent tag cache")
tag = tag_cache[name]
k = "%d:%d" % (tag._begin, tag._end)
valcache = self._streams[-1].value_cache
if k not in valcache:
valcache[k] = str(tag)
return valcache[k]
|
python
|
{
"resource": ""
}
|
q278187
|
BasicParser.parsed_stream
|
test
|
def parsed_stream(self, content: str, name: str=None):
"""Push a new Stream into the parser.
All subsequent called functions will parse this new stream,
until the 'popStream' function is called.
"""
self._streams.append(Stream(content, name))
|
python
|
{
"resource": ""
}
|
q278188
|
BasicParser.begin_tag
|
test
|
def begin_tag(self, name: str) -> Node:
"""Save the current index under the given name."""
# Check if we could attach tag cache to current rule_nodes scope
self.tag_cache[name] = Tag(self._stream, self._stream.index)
return True
|
python
|
{
"resource": ""
}
|
q278189
|
BasicParser.end_tag
|
test
|
def end_tag(self, name: str) -> Node:
"""Extract the string between saved and current index."""
self.tag_cache[name].set_end(self._stream.index)
return True
|
python
|
{
"resource": ""
}
|
q278190
|
BasicParser.set_rules
|
test
|
def set_rules(cls, rules: dict) -> bool:
"""
Merge internal rules set with the given rules
"""
cls._rules = cls._rules.new_child()
for rule_name, rule_pt in rules.items():
if '.' not in rule_name:
rule_name = cls.__module__ \
+ '.' + cls.__name__ \
+ '.' + rule_name
meta.set_one(cls._rules, rule_name, rule_pt)
return True
|
python
|
{
"resource": ""
}
|
q278191
|
BasicParser.set_hooks
|
test
|
def set_hooks(cls, hooks: dict) -> bool:
"""
Merge internal hooks set with the given hooks
"""
cls._hooks = cls._hooks.new_child()
for hook_name, hook_pt in hooks.items():
if '.' not in hook_name:
hook_name = cls.__module__ \
+ '.' + cls.__name__ \
+ '.' + hook_name
meta.set_one(cls._hooks, hook_name, hook_pt)
return True
|
python
|
{
"resource": ""
}
|
q278192
|
BasicParser.set_directives
|
test
|
def set_directives(cls, directives: dict) -> bool:
"""
Merge internal directives set with the given directives.
For working directives, attach it only in the dsl.Parser class
"""
meta._directives = meta._directives.new_child()
for dir_name, dir_pt in directives.items():
meta.set_one(meta._directives, dir_name, dir_pt)
dir_pt.ns_name = dir_name
return True
|
python
|
{
"resource": ""
}
|
q278193
|
BasicParser.eval_rule
|
test
|
def eval_rule(self, name: str) -> Node:
"""Evaluate a rule by name."""
# context created by caller
n = Node()
id_n = id(n)
self.rule_nodes['_'] = n
self.id_cache[id_n] = '_'
# TODO: other behavior for empty rules?
if name not in self.__class__._rules:
self.diagnostic.notify(
error.Severity.ERROR,
"Unknown rule : %s" % name,
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
self._lastRule = name
rule_to_eval = self.__class__._rules[name]
# TODO: add packrat cache here, same rule - same pos == same res
res = rule_to_eval(self)
if res:
res = self.rule_nodes['_']
return res
|
python
|
{
"resource": ""
}
|
q278194
|
BasicParser.eval_hook
|
test
|
def eval_hook(self, name: str, ctx: list) -> Node:
"""Evaluate the hook by its name"""
if name not in self.__class__._hooks:
# TODO: don't always throw error, could have return True by default
self.diagnostic.notify(
error.Severity.ERROR,
"Unknown hook : %s" % name,
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
self._lastRule = '#' + name
res = self.__class__._hooks[name](self, *ctx)
if type(res) is not bool:
raise TypeError("Your hook %r didn't return a bool value" % name)
return res
|
python
|
{
"resource": ""
}
|
q278195
|
BasicParser.peek_text
|
test
|
def peek_text(self, text: str) -> bool:
"""Same as readText but doesn't consume the stream."""
start = self._stream.index
stop = start + len(text)
if stop > self._stream.eos_index:
return False
return self._stream[self._stream.index:stop] == text
|
python
|
{
"resource": ""
}
|
q278196
|
BasicParser.one_char
|
test
|
def one_char(self) -> bool:
"""Read one byte in stream"""
if self.read_eof():
return False
self._stream.incpos()
return True
|
python
|
{
"resource": ""
}
|
q278197
|
BasicParser.read_char
|
test
|
def read_char(self, c: str) -> bool:
"""
Consume the c head byte, increment current index and return True
else return False. It use peekchar and it's the same as '' in BNF.
"""
if self.read_eof():
return False
self._stream.save_context()
if c == self._stream.peek_char:
self._stream.incpos()
return self._stream.validate_context()
return self._stream.restore_context()
|
python
|
{
"resource": ""
}
|
q278198
|
BasicParser.read_until_eof
|
test
|
def read_until_eof(self) -> bool:
"""Consume all the stream. Same as EOF in BNF."""
if self.read_eof():
return True
# TODO: read ALL
self._stream.save_context()
while not self.read_eof():
self._stream.incpos()
return self._stream.validate_context()
|
python
|
{
"resource": ""
}
|
q278199
|
BasicParser.ignore_blanks
|
test
|
def ignore_blanks(self) -> bool:
"""Consume whitespace characters."""
self._stream.save_context()
if not self.read_eof() and self._stream.peek_char in " \t\v\f\r\n":
while (not self.read_eof()
and self._stream.peek_char in " \t\v\f\r\n"):
self._stream.incpos()
return self._stream.validate_context()
return self._stream.validate_context()
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.