desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Turn a token into a label. (Internal)'
| def classify(self, type, value, context):
| if (type == token.NAME):
self.used_names.add(value)
ilabel = self.grammar.keywords.get(value)
if (ilabel is not None):
return ilabel
ilabel = self.grammar.tokens.get(type)
if (ilabel is None):
raise ParseError('bad token', type, value, context)
return ilabel
|
'Shift a token. (Internal)'
| def shift(self, type, value, newstate, context):
| (dfa, state, node) = self.stack[(-1)]
newnode = (type, value, context, None)
newnode = self.convert(self.grammar, newnode)
if (newnode is not None):
node[(-1)].append(newnode)
self.stack[(-1)] = (dfa, newstate, node)
|
'Push a nonterminal. (Internal)'
| def push(self, type, newdfa, newstate, context):
| (dfa, state, node) = self.stack[(-1)]
newnode = (type, None, context, [])
self.stack[(-1)] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
|
'Pop a nonterminal. (Internal)'
| def pop(self):
| (popdfa, popstate, popnode) = self.stack.pop()
newnode = self.convert(self.grammar, popnode)
if (newnode is not None):
if self.stack:
(dfa, state, node) = self.stack[(-1)]
node[(-1)].append(newnode)
else:
self.rootnode = newnode
self.rootnode.used_names = self.used_names
|
'Dump the grammar tables to a pickle file.'
| def dump(self, filename):
| f = open(filename, 'wb')
pickle.dump(self.__dict__, f, 2)
f.close()
|
'Load the grammar tables from a pickle file.'
| def load(self, filename):
| f = open(filename, 'rb')
d = pickle.load(f)
f.close()
self.__dict__.update(d)
|
'Copy the grammar.'
| def copy(self):
| new = self.__class__()
for dict_attr in ('symbol2number', 'number2symbol', 'dfas', 'keywords', 'tokens', 'symbol2label'):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
|
'Dump the grammar tables to standard output, for debugging.'
| def report(self):
| from pprint import pprint
print 's2n'
pprint(self.symbol2number)
print 'n2s'
pprint(self.number2symbol)
print 'states'
pprint(self.states)
print 'dfas'
pprint(self.dfas)
print 'labels'
pprint(self.labels)
print 'start', self.start
|
'Parse a series of tokens and return the syntax tree.'
| def parse_tokens(self, tokens, debug=False):
| p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u''
for quintuple in tokens:
(type, value, start, end, line_text) = quintuple
if (start != (lineno, column)):
assert ((lineno, column) <= start), ((lineno, column), start)
(s_lineno, s_column) = start
if (lineno < s_lineno):
prefix += ('\n' * (s_lineno - lineno))
lineno = s_lineno
column = 0
if (column < s_column):
prefix += line_text[column:s_column]
column = s_column
if (type in (tokenize.COMMENT, tokenize.NL)):
prefix += value
(lineno, column) = end
if value.endswith('\n'):
lineno += 1
column = 0
continue
if (type == token.OP):
type = grammar.opmap[value]
if debug:
self.logger.debug('%s %r (prefix=%r)', token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug('Stop.')
break
prefix = ''
(lineno, column) = end
if value.endswith('\n'):
lineno += 1
column = 0
else:
raise parse.ParseError('incomplete input', type, value, (prefix, start))
return p.rootnode
|
'Parse a stream and return the syntax tree.'
| def parse_stream_raw(self, stream, debug=False):
| tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
|
'Parse a stream and return the syntax tree.'
| def parse_stream(self, stream, debug=False):
| return self.parse_stream_raw(stream, debug)
|
'Parse a file and return the syntax tree.'
| def parse_file(self, filename, encoding=None, debug=False):
| stream = codecs.open(filename, 'r', encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
|
'Parse a string and return the syntax tree.'
| def parse_string(self, text, debug=False):
| tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
|
'Load the grammar tables from the text files written by pgen.'
| def run(self, graminit_h, graminit_c):
| self.parse_graminit_h(graminit_h)
self.parse_graminit_c(graminit_c)
self.finish_off()
|
'Parse the .h file written by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back.'
| def parse_graminit_h(self, filename):
| try:
f = open(filename)
except IOError as err:
print ("Can't open %s: %s" % (filename, err))
return False
self.symbol2number = {}
self.number2symbol = {}
lineno = 0
for line in f:
lineno += 1
mo = re.match('^#define\\s+(\\w+)\\s+(\\d+)$', line)
if ((not mo) and line.strip()):
print ("%s(%s): can't parse %s" % (filename, lineno, line.strip()))
else:
(symbol, number) = mo.groups()
number = int(number)
assert (symbol not in self.symbol2number)
assert (number not in self.number2symbol)
self.symbol2number[symbol] = number
self.number2symbol[number] = symbol
return True
|
'Parse the .c file written by pgen. (Internal)
The file looks as follows. The first two lines are always this:
#include "pgenheaders.h"
#include "grammar.h"
After that come four blocks:
1) one or more state definitions
2) a table defining dfas
3) a table defining labels
4) a struct defining the grammar
A state definition has the following form:
- one or more arc arrays, each of the form:
static arc arcs_<n>_<m>[<k>] = {
{<i>, <j>},
- followed by a state array, of the form:
static state states_<s>[<t>] = {
{<k>, arcs_<n>_<m>},'
| def parse_graminit_c(self, filename):
| try:
f = open(filename)
except IOError as err:
print ("Can't open %s: %s" % (filename, err))
return False
lineno = 0
(lineno, line) = ((lineno + 1), f.next())
assert (line == '#include "pgenheaders.h"\n'), (lineno, line)
(lineno, line) = ((lineno + 1), f.next())
assert (line == '#include "grammar.h"\n'), (lineno, line)
(lineno, line) = ((lineno + 1), f.next())
allarcs = {}
states = []
while line.startswith('static arc '):
while line.startswith('static arc '):
mo = re.match('static arc arcs_(\\d+)_(\\d+)\\[(\\d+)\\] = {$', line)
assert mo, (lineno, line)
(n, m, k) = map(int, mo.groups())
arcs = []
for _ in range(k):
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+{(\\d+), (\\d+)},$', line)
assert mo, (lineno, line)
(i, j) = map(int, mo.groups())
arcs.append((i, j))
(lineno, line) = ((lineno + 1), f.next())
assert (line == '};\n'), (lineno, line)
allarcs[(n, m)] = arcs
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('static state states_(\\d+)\\[(\\d+)\\] = {$', line)
assert mo, (lineno, line)
(s, t) = map(int, mo.groups())
assert (s == len(states)), (lineno, line)
state = []
for _ in range(t):
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+{(\\d+), arcs_(\\d+)_(\\d+)},$', line)
assert mo, (lineno, line)
(k, n, m) = map(int, mo.groups())
arcs = allarcs[(n, m)]
assert (k == len(arcs)), (lineno, line)
state.append(arcs)
states.append(state)
(lineno, line) = ((lineno + 1), f.next())
assert (line == '};\n'), (lineno, line)
(lineno, line) = ((lineno + 1), f.next())
self.states = states
dfas = {}
mo = re.match('static dfa dfas\\[(\\d+)\\] = {$', line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
for i in range(ndfas):
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+{(\\d+), "(\\w+)", (\\d+), (\\d+), states_(\\d+),$', line)
assert mo, (lineno, line)
symbol = mo.group(2)
(number, x, y, z) = map(int, mo.group(1, 3, 4, 5))
assert (self.symbol2number[symbol] == number), (lineno, line)
assert (self.number2symbol[number] == symbol), (lineno, line)
assert (x == 0), (lineno, line)
state = states[z]
assert (y == len(state)), (lineno, line)
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+("(?:\\\\\\d\\d\\d)*")},$', line)
assert mo, (lineno, line)
first = {}
rawbitset = eval(mo.group(1))
for (i, c) in enumerate(rawbitset):
byte = ord(c)
for j in range(8):
if (byte & (1 << j)):
first[((i * 8) + j)] = 1
dfas[number] = (state, first)
(lineno, line) = ((lineno + 1), f.next())
assert (line == '};\n'), (lineno, line)
self.dfas = dfas
labels = []
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('static label labels\\[(\\d+)\\] = {$', line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
for i in range(nlabels):
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+{(\\d+), (0|"\\w+")},$', line)
assert mo, (lineno, line)
(x, y) = mo.groups()
x = int(x)
if (y == '0'):
y = None
else:
y = eval(y)
labels.append((x, y))
(lineno, line) = ((lineno + 1), f.next())
assert (line == '};\n'), (lineno, line)
self.labels = labels
(lineno, line) = ((lineno + 1), f.next())
assert (line == 'grammar _PyParser_Grammar = {\n'), (lineno, line)
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+(\\d+),$', line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
assert (ndfas == len(self.dfas))
(lineno, line) = ((lineno + 1), f.next())
assert (line == ' DCTB dfas,\n'), (lineno, line)
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+{(\\d+), labels},$', line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
assert (nlabels == len(self.labels)), (lineno, line)
(lineno, line) = ((lineno + 1), f.next())
mo = re.match('\\s+(\\d+)$', line)
assert mo, (lineno, line)
start = int(mo.group(1))
assert (start in self.number2symbol), (lineno, line)
self.start = start
(lineno, line) = ((lineno + 1), f.next())
assert (line == '};\n'), (lineno, line)
try:
(lineno, line) = ((lineno + 1), f.next())
except StopIteration:
pass
else:
assert 0, (lineno, line)
|
'Create additional useful structures. (Internal).'
| def finish_off(self):
| self.keywords = {}
self.tokens = {}
for (ilabel, (type, value)) in enumerate(self.labels):
if ((type == token.NAME) and (value is not None)):
self.keywords[value] = ilabel
elif (value is None):
self.tokens[type] = ilabel
|
'Help the next test'
| def _Call(self, name, args=None, prefix=None):
| children = []
if isinstance(args, list):
for arg in args:
children.append(arg)
children.append(Comma())
children.pop()
return Call(Name(name), children, prefix)
|
'Setup a test source tree and output destination tree.'
| def setup_test_source_trees(self):
| self.temp_dir = tempfile.mkdtemp()
self.py2_src_dir = os.path.join(self.temp_dir, 'python2_project')
self.py3_dest_dir = os.path.join(self.temp_dir, 'python3_project')
os.mkdir(self.py2_src_dir)
os.mkdir(self.py3_dest_dir)
self.setup_files = []
open(os.path.join(self.py2_src_dir, '__init__.py'), 'w').close()
self.setup_files.append('__init__.py')
shutil.copy(PY2_TEST_MODULE, self.py2_src_dir)
self.setup_files.append(os.path.basename(PY2_TEST_MODULE))
self.trivial_py2_file = os.path.join(self.py2_src_dir, 'trivial.py')
self.init_py2_file = os.path.join(self.py2_src_dir, '__init__.py')
with open(self.trivial_py2_file, 'w') as trivial:
trivial.write("print 'I need a simple conversion.'")
self.setup_files.append('trivial.py')
|
'2to3 a single directory with a new output dir and suffix.'
| def test_filename_changing_on_output_single_dir(self):
| self.setup_test_source_trees()
out = StringIO.StringIO()
err = StringIO.StringIO()
suffix = 'TEST'
ret = self.run_2to3_capture(['-n', '--add-suffix', suffix, '--write-unchanged-files', '--no-diffs', '--output-dir', self.py3_dest_dir, self.py2_src_dir], StringIO.StringIO(''), out, err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(' implies -w.', stderr)
self.assertIn(('Output in %r will mirror the input directory %r layout' % (self.py3_dest_dir, self.py2_src_dir)), stderr)
self.assertEqual(set(((name + suffix) for name in self.setup_files)), set(os.listdir(self.py3_dest_dir)))
for name in self.setup_files:
self.assertIn(('Writing converted %s to %s' % (os.path.join(self.py2_src_dir, name), os.path.join(self.py3_dest_dir, (name + suffix)))), stderr)
sep = re.escape(os.sep)
self.assertRegexpMatches(stderr, 'No changes to .*/__init__\\.py'.replace('/', sep))
self.assertNotRegex(stderr, 'No changes to .*/trivial\\.py'.replace('/', sep))
|
'2to3 two files in one directory with a new output dir.'
| def test_filename_changing_on_output_two_files(self):
| self.setup_test_source_trees()
err = StringIO.StringIO()
py2_files = [self.trivial_py2_file, self.init_py2_file]
expected_files = set((os.path.basename(name) for name in py2_files))
ret = self.run_2to3_capture((['-n', '-w', '--write-unchanged-files', '--no-diffs', '--output-dir', self.py3_dest_dir] + py2_files), StringIO.StringIO(''), StringIO.StringIO(), err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(('Output in %r will mirror the input directory %r layout' % (self.py3_dest_dir, self.py2_src_dir)), stderr)
self.assertEqual(expected_files, set(os.listdir(self.py3_dest_dir)))
|
'2to3 a single file with a new output dir.'
| def test_filename_changing_on_output_single_file(self):
| self.setup_test_source_trees()
err = StringIO.StringIO()
ret = self.run_2to3_capture(['-n', '-w', '--no-diffs', '--output-dir', self.py3_dest_dir, self.trivial_py2_file], StringIO.StringIO(''), StringIO.StringIO(), err)
self.assertEqual(ret, 0)
stderr = err.getvalue()
self.assertIn(('Output in %r will mirror the input directory %r layout' % (self.py3_dest_dir, self.py2_src_dir)), stderr)
self.assertEqual(set([os.path.basename(self.trivial_py2_file)]), set(os.listdir(self.py3_dest_dir)))
|
'Reduces a fixer\'s pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached'
| def add_fixer(self, fixer):
| self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer)
|
'Recursively adds a linear pattern to the AC automaton'
| def add(self, pattern, start):
| if (not pattern):
return [start]
if isinstance(pattern[0], tuple):
match_nodes = []
for alternative in pattern[0]:
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
if (pattern[0] not in start.transition_table):
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes
|
'The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys'
| def run(self, leaves):
| current_ac_node = self.root
results = defaultdict(list)
for leaf in leaves:
current_ast_node = leaf
while current_ast_node:
current_ast_node.was_checked = True
for child in current_ast_node.children:
if (isinstance(child, pytree.Leaf) and (child.value == u';')):
current_ast_node.was_checked = False
break
if (current_ast_node.type == 1):
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if (node_token in current_ac_node.transition_table):
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if (not (fixer in results)):
results[fixer] = []
results[fixer].append(current_ast_node)
else:
current_ac_node = self.root
if ((current_ast_node.parent is not None) and current_ast_node.parent.was_checked):
break
if (node_token in current_ac_node.transition_table):
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if (not (fixer in results.keys())):
results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results
|
'Prints a graphviz diagram of the BM automaton(for debugging)'
| def print_ac(self):
| print 'digraph g{'
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print ('%d -> %d [label=%s] //%s' % (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if (subnode_key == 1):
print subnode.content
print_node(subnode)
print_node(self.root)
print '}'
|
'Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup \'.bak\' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix=\'3\'.'
| def __init__(self, fixers, options, explicit, nobackups, show_diffs, input_base_dir='', output_dir='', append_suffix=''):
| self.nobackups = nobackups
self.show_diffs = show_diffs
if (input_base_dir and (not input_base_dir.endswith(os.sep))):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
|
'Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol\'s type (an int >= 256).'
| def __init__(self, grammar):
| for (name, symbol) in grammar.symbol2number.iteritems():
setattr(self, name, symbol)
|
'Transform for the basic import case. Replaces the old
import name with a comma separated list of its
replacements.'
| def transform_import(self, node, results):
| import_mod = results.get('module')
pref = import_mod.prefix
names = []
for name in MAPPING[import_mod.value][:(-1)]:
names.extend([Name(name[0], prefix=pref), Comma()])
names.append(Name(MAPPING[import_mod.value][(-1)][0], prefix=pref))
import_mod.replace(names)
|
'Transform for imports of specific module elements. Replaces
the module to be imported from with the appropriate new
module.'
| def transform_member(self, node, results):
| mod_member = results.get('mod_member')
pref = mod_member.prefix
member = results.get('member')
if member:
if isinstance(member, list):
member = member[0]
new_name = None
for change in MAPPING[mod_member.value]:
if (member.value in change[1]):
new_name = change[0]
break
if new_name:
mod_member.replace(Name(new_name, prefix=pref))
else:
self.cannot_convert(node, 'This is an invalid module element')
else:
modules = []
mod_dict = {}
members = results['members']
for member in members:
if (member.type == syms.import_as_name):
as_name = member.children[2].value
member_name = member.children[0].value
else:
member_name = member.value
as_name = None
if (member_name != u','):
for change in MAPPING[mod_member.value]:
if (member_name in change[1]):
if (change[0] not in mod_dict):
modules.append(change[0])
mod_dict.setdefault(change[0], []).append(member)
new_nodes = []
indentation = find_indentation(node)
first = True
def handle_name(name, prefix):
if (name.type == syms.import_as_name):
kids = [Name(name.children[0].value, prefix=prefix), name.children[1].clone(), name.children[2].clone()]
return [Node(syms.import_as_name, kids)]
return [Name(name.value, prefix=prefix)]
for module in modules:
elts = mod_dict[module]
names = []
for elt in elts[:(-1)]:
names.extend(handle_name(elt, pref))
names.append(Comma())
names.extend(handle_name(elts[(-1)], pref))
new = FromImport(module, names)
if ((not first) or node.parent.prefix.endswith(indentation)):
new.prefix = indentation
new_nodes.append(new)
first = False
if new_nodes:
nodes = []
for new_node in new_nodes[:(-1)]:
nodes.extend([new_node, Newline()])
nodes.append(new_nodes[(-1)])
node.replace(nodes)
else:
self.cannot_convert(node, 'All module elements are invalid')
|
'Transform for calls to module members in code.'
| def transform_dot(self, node, results):
| module_dot = results.get('bare_with_attr')
member = results.get('member')
new_name = None
if isinstance(member, list):
member = member[0]
for change in MAPPING[module_dot.value]:
if (member.value in change[1]):
new_name = change[0]
break
if new_name:
module_dot.replace(Name(new_name, prefix=module_dot.prefix))
else:
self.cannot_convert(node, 'This is an invalid module element')
|
'Initializer.
Args:
fixer_names: a list of fixers to import
options: a dict with configuration.
explicit: a list of fixers to run even if they are explicit.'
| def __init__(self, fixer_names, options=None, explicit=None):
| self.fixers = fixer_names
self.explicit = (explicit or [])
self.options = self._default_options.copy()
if (options is not None):
self.options.update(options)
if self.options['print_function']:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
self.write_unchanged_files = self.options.get('write_unchanged_files')
self.errors = []
self.logger = logging.getLogger('RefactoringTool')
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar, convert=pytree.convert, logger=self.logger)
(self.pre_order, self.post_order) = self.get_fixers()
self.files = []
self.BM = bm.BottomMatcher()
self.bmi_pre_order = []
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
elif (fixer in self.pre_order):
self.bmi_pre_order.append(fixer)
elif (fixer in self.post_order):
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
|
'Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.'
| def get_fixers(self):
| pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ['*'])
fix_name = fix_mod_path.rsplit('.', 1)[(-1)]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split('_')
class_name = (self.CLASS_PREFIX + ''.join([p.title() for p in parts]))
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError(("Can't find %s.%s" % (fix_name, class_name)))
fixer = fix_class(self.options, self.fixer_log)
if (fixer.explicit and (self.explicit is not True) and (fix_mod_path not in self.explicit)):
self.log_message('Skipping optional fixer: %s', fix_name)
continue
self.log_debug('Adding transformation: %s', fix_name)
if (fixer.order == 'pre'):
pre_order_fixers.append(fixer)
elif (fixer.order == 'post'):
post_order_fixers.append(fixer)
else:
raise FixerError(('Illegal fixer order: %r' % fixer.order))
key_func = operator.attrgetter('run_order')
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
|
'Called when an error occurs.'
| def log_error(self, msg, *args, **kwds):
| raise
|
'Hook to log a message.'
| def log_message(self, msg, *args):
| if args:
msg = (msg % args)
self.logger.info(msg)
|
'Called with the old version, new version, and filename of a
refactored file.'
| def print_output(self, old_text, new_text, filename, equal):
| pass
|
'Refactor a list of files and directories.'
| def refactor(self, items, write=False, doctests_only=False):
| for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
|
'Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with \'.\' are skipped.'
| def refactor_dir(self, dir_name, write=False, doctests_only=False):
| py_ext = (os.extsep + 'py')
for (dirpath, dirnames, filenames) in os.walk(dir_name):
self.log_debug('Descending into %s', dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if ((not name.startswith('.')) and (os.path.splitext(name)[1] == py_ext)):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
dirnames[:] = [dn for dn in dirnames if (not dn.startswith('.'))]
|
'Do our best to decode a Python source file correctly.'
| def _read_python_source(self, filename):
| try:
f = open(filename, 'rb')
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return (None, None)
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, 'r', encoding=encoding) as f:
return (_from_system_newlines(f.read()), encoding)
|
'Refactors a file.'
| def refactor_file(self, filename, write=False, doctests_only=False):
| (input, encoding) = self._read_python_source(filename)
if (input is None):
return
input += u'\n'
if doctests_only:
self.log_debug('Refactoring doctests in %s', filename)
output = self.refactor_docstring(input, filename)
if (self.write_unchanged_files or (output != input)):
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug('No doctest changes in %s', filename)
else:
tree = self.refactor_string(input, filename)
if (self.write_unchanged_files or (tree and tree.was_changed)):
self.processed_file(unicode(tree)[:(-1)], filename, write=write, encoding=encoding)
else:
self.log_debug('No changes in %s', filename)
|
'Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.'
| def refactor_string(self, data, name):
| features = _detect_future_features(data)
if ('print_function' in features):
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug('Refactoring %s', name)
self.refactor_tree(tree, name)
return tree
|
'Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.'
| def refactor_tree(self, tree, name):
| for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if ((fixer in match_set) and match_set[fixer]):
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if (node in match_set[fixer]):
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
continue
if (node.fixers_applied and (fixer in node.fixers_applied)):
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if (new is not None):
node.replace(new)
for node in new.post_order():
if (not node.fixers_applied):
node.fixers_applied = []
node.fixers_applied.append(fixer)
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if (not (fxr in match_set)):
match_set[fxr] = []
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
|
'Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None'
| def traverse_by(self, fixers, traversal):
| if (not fixers):
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if (new is not None):
node.replace(new)
node = new
|
'Called when a file has been refactored and there may be changes.'
| def processed_file(self, new_text, filename, old_text=None, write=False, encoding=None):
| self.files.append(filename)
if (old_text is None):
old_text = self._read_python_source(filename)[0]
if (old_text is None):
return
equal = (old_text == new_text)
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug('No changes to %s', filename)
if (not self.write_unchanged_files):
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug('Not writing changes to %s', filename)
|
'Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.'
| def write_file(self, new_text, filename, old_text, encoding=None):
| try:
f = _open_with_encoding(filename, 'w', encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug('Wrote changes to %s', filename)
self.wrote = True
|
'Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can\'t use the doctest module\'s parser,
since, like most parsers, it is not geared towards preserving
the original source.)'
| def refactor_docstring(self, input, filename):
| result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if (block is not None):
result.extend(self.refactor_doctest(block, block_lineno, indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif ((indent is not None) and (line.startswith((indent + self.PS2)) or (line == ((indent + self.PS2.rstrip()) + u'\n')))):
block.append(line)
else:
if (block is not None):
result.extend(self.refactor_doctest(block, block_lineno, indent, filename))
block = None
indent = None
result.append(line)
if (block is not None):
result.extend(self.refactor_doctest(block, block_lineno, indent, filename))
return u''.join(result)
|
'Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).'
| def refactor_doctest(self, block, lineno, indent, filename):
| try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug('Source: %s', line.rstrip(u'\n'))
self.log_error("Can't parse docstring in %s line %s: %s: %s", filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = unicode(tree).splitlines(True)
(clipped, new) = (new[:(lineno - 1)], new[(lineno - 1):])
assert (clipped == ([u'\n'] * (lineno - 1))), clipped
if (not new[(-1)].endswith(u'\n')):
new[(-1)] += u'\n'
block = [((indent + self.PS1) + new.pop(0))]
if new:
block += [((indent + self.PS2) + line) for line in new]
return block
|
'Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.'
| def parse_block(self, block, lineno, indent):
| tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
|
'Wraps a tokenize stream to systematically modify start/end.'
| def wrap_toks(self, block, lineno, indent):
| tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
for (type, value, (line0, col0), (line1, col1), line_text) in tokens:
line0 += (lineno - 1)
line1 += (lineno - 1)
(yield (type, value, (line0, col0), (line1, col1), line_text))
|
'Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.'
| def gen_lines(self, block, indent):
| prefix1 = (indent + self.PS1)
prefix2 = (indent + self.PS2)
prefix = prefix1
for line in block:
if line.startswith(prefix):
(yield line[len(prefix):])
elif (line == (prefix.rstrip() + u'\n')):
(yield u'\n')
else:
raise AssertionError(('line=%r, prefix=%r' % (line, prefix)))
prefix = prefix2
while True:
(yield '')
|
'Constructor that prevents Base from being instantiated.'
| def __new__(cls, *args, **kwds):
| assert (cls is not Base), 'Cannot instantiate Base'
return object.__new__(cls)
|
'Compare two nodes for equality.
This calls the method _eq().'
| def __eq__(self, other):
| if (self.__class__ is not other.__class__):
return NotImplemented
return self._eq(other)
|
'Compare two nodes for inequality.
This calls the method _eq().'
| def __ne__(self, other):
| if (self.__class__ is not other.__class__):
return NotImplemented
return (not self._eq(other))
|
'Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.'
| def _eq(self, other):
| raise NotImplementedError
|
'Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.'
| def clone(self):
| raise NotImplementedError
|
'Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.'
| def post_order(self):
| raise NotImplementedError
|
'Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.'
| def pre_order(self):
| raise NotImplementedError
|
'Set the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.'
| def set_prefix(self, prefix):
| warnings.warn('set_prefix() is deprecated; use the prefix property', DeprecationWarning, stacklevel=2)
self.prefix = prefix
|
'Return the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.'
| def get_prefix(self):
| warnings.warn('get_prefix() is deprecated; use the prefix property', DeprecationWarning, stacklevel=2)
return self.prefix
|
'Replace this node with a new one in the parent.'
| def replace(self, new):
| assert (self.parent is not None), str(self)
assert (new is not None)
if (not isinstance(new, list)):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if (ch is self):
assert (not found), (self.parent.children, self, new)
if (new is not None):
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
|
'Return the line number which generated the invocant node.'
| def get_lineno(self):
| node = self
while (not isinstance(node, Leaf)):
if (not node.children):
return
node = node.children[0]
return node.lineno
|
'Remove the node from the tree. Returns the position of the node in its
parent\'s children before it was removed.'
| def remove(self):
| if self.parent:
for (i, node) in enumerate(self.parent.children):
if (node is self):
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
|
'The node immediately following the invocant in their parent\'s children
list. If the invocant does not have a next sibling, it is None'
| @property
def next_sibling(self):
| if (self.parent is None):
return None
for (i, child) in enumerate(self.parent.children):
if (child is self):
try:
return self.parent.children[(i + 1)]
except IndexError:
return None
|
'The node immediately preceding the invocant in their parent\'s children
list. If the invocant does not have a previous sibling, it is None.'
| @property
def prev_sibling(self):
| if (self.parent is None):
return None
for (i, child) in enumerate(self.parent.children):
if (child is self):
if (i == 0):
return None
return self.parent.children[(i - 1)]
|
'Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix'
| def get_suffix(self):
| next_sib = self.next_sibling
if (next_sib is None):
return u''
return next_sib.prefix
|
'Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.'
| def __init__(self, type, children, context=None, prefix=None, fixers_applied=None):
| assert (type >= 256), type
self.type = type
self.children = list(children)
for ch in self.children:
assert (ch.parent is None), repr(ch)
ch.parent = self
if (prefix is not None):
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
|
'Return a canonical string representation.'
| def __repr__(self):
| return ('%s(%s, %r)' % (self.__class__.__name__, type_repr(self.type), self.children))
|
'Return a pretty string representation.
This reproduces the input source exactly.'
| def __unicode__(self):
| return u''.join(map(unicode, self.children))
|
'Compare two nodes for equality.'
| def _eq(self, other):
| return ((self.type, self.children) == (other.type, other.children))
|
'Return a cloned (deep) copy of self.'
| def clone(self):
| return Node(self.type, [ch.clone() for ch in self.children], fixers_applied=self.fixers_applied)
|
'Return a post-order iterator for the tree.'
| def post_order(self):
| for child in self.children:
for node in child.post_order():
(yield node)
(yield self)
|
'Return a pre-order iterator for the tree.'
| def pre_order(self):
| (yield self)
for child in self.children:
for node in child.pre_order():
(yield node)
|
'The whitespace and comments preceding this node in the input.'
| def _prefix_getter(self):
| if (not self.children):
return ''
return self.children[0].prefix
|
'Equivalent to \'node.children[i] = child\'. This method also sets the
child\'s parent attribute appropriately.'
| def set_child(self, i, child):
| child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
|
'Equivalent to \'node.children.insert(i, child)\'. This method also sets
the child\'s parent attribute appropriately.'
| def insert_child(self, i, child):
| child.parent = self
self.children.insert(i, child)
self.changed()
|
'Equivalent to \'node.children.append(child)\'. This method also sets the
child\'s parent attribute appropriately.'
| def append_child(self, child):
| child.parent = self
self.children.append(child)
self.changed()
|
'Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.'
| def __init__(self, type, value, context=None, prefix=None, fixers_applied=[]):
| assert (0 <= type < 256), type
if (context is not None):
(self._prefix, (self.lineno, self.column)) = context
self.type = type
self.value = value
if (prefix is not None):
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
|
'Return a canonical string representation.'
| def __repr__(self):
| return ('%s(%r, %r)' % (self.__class__.__name__, self.type, self.value))
|
'Return a pretty string representation.
This reproduces the input source exactly.'
| def __unicode__(self):
| return (self.prefix + unicode(self.value))
|
'Compare two nodes for equality.'
| def _eq(self, other):
| return ((self.type, self.value) == (other.type, other.value))
|
'Return a cloned (deep) copy of self.'
| def clone(self):
| return Leaf(self.type, self.value, (self.prefix, (self.lineno, self.column)), fixers_applied=self.fixers_applied)
|
'Return a post-order iterator for the tree.'
| def post_order(self):
| (yield self)
|
'Return a pre-order iterator for the tree.'
| def pre_order(self):
| (yield self)
|
'The whitespace and comments preceding this token in the input.'
| def _prefix_getter(self):
| return self._prefix
|
'Constructor that prevents BasePattern from being instantiated.'
| def __new__(cls, *args, **kwds):
| assert (cls is not BasePattern), 'Cannot instantiate BasePattern'
return object.__new__(cls)
|
'A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.'
| def optimize(self):
| return self
|
'Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.'
| def match(self, node, results=None):
| if ((self.type is not None) and (node.type != self.type)):
return False
if (self.content is not None):
r = None
if (results is not None):
r = {}
if (not self._submatch(node, r)):
return False
if r:
results.update(r)
if ((results is not None) and self.name):
results[self.name] = node
return True
|
'Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.'
| def match_seq(self, nodes, results=None):
| if (len(nodes) != 1):
return False
return self.match(nodes[0], results)
|
'Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.'
| def generate_matches(self, nodes):
| r = {}
if (nodes and self.match(nodes[0], r)):
(yield (1, r))
|
'Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.'
| def __init__(self, type=None, content=None, name=None):
| if (type is not None):
assert (0 <= type < 256), type
if (content is not None):
assert isinstance(content, basestring), repr(content)
self.type = type
self.content = content
self.name = name
|
'Override match() to insist on a leaf node.'
| def match(self, node, results=None):
| if (not isinstance(node, Leaf)):
return False
return BasePattern.match(self, node, results)
|
'Match the pattern\'s content to the node\'s children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.'
| def _submatch(self, node, results=None):
| return (self.content == node.value)
|
'Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node\'s children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.'
| def __init__(self, type=None, content=None, name=None):
| if (type is not None):
assert (type >= 256), type
if (content is not None):
assert (not isinstance(content, basestring)), repr(content)
content = list(content)
for (i, item) in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
|
'Match the pattern\'s content to the node\'s children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.'
| def _submatch(self, node, results=None):
| if self.wildcards:
for (c, r) in generate_matches(self.content, node.children):
if (c == len(node.children)):
if (results is not None):
results.update(r)
return True
return False
if (len(self.content) != len(node.children)):
return False
for (subpattern, child) in zip(self.content, node.children):
if (not subpattern.match(child, results)):
return False
return True
|
'Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to \'.\' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*'
| def __init__(self, content=None, min=0, max=HUGE, name=None):
| assert (0 <= min <= max <= HUGE), (min, max)
if (content is not None):
content = tuple(map(tuple, content))
assert len(content), repr(content)
for alt in content:
assert len(alt), repr(alt)
self.content = content
self.min = min
self.max = max
self.name = name
|
'Optimize certain stacked wildcard patterns.'
| def optimize(self):
| subpattern = None
if ((self.content is not None) and (len(self.content) == 1) and (len(self.content[0]) == 1)):
subpattern = self.content[0][0]
if ((self.min == 1) and (self.max == 1)):
if (self.content is None):
return NodePattern(name=self.name)
if ((subpattern is not None) and (self.name == subpattern.name)):
return subpattern.optimize()
if ((self.min <= 1) and isinstance(subpattern, WildcardPattern) and (subpattern.min <= 1) and (self.name == subpattern.name)):
return WildcardPattern(subpattern.content, (self.min * subpattern.min), (self.max * subpattern.max), subpattern.name)
return self
|
'Does this pattern exactly match a node?'
| def match(self, node, results=None):
| return self.match_seq([node], results)
|
'Does this pattern exactly match a sequence of nodes?'
| def match_seq(self, nodes, results=None):
| for (c, r) in self.generate_matches(nodes):
if (c == len(nodes)):
if (results is not None):
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
|
'Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.'
| def generate_matches(self, nodes):
| if (self.content is None):
for count in xrange(self.min, (1 + min(len(nodes), self.max))):
r = {}
if self.name:
r[self.name] = nodes[:count]
(yield (count, r))
elif (self.name == 'bare_name'):
(yield self._bare_name_matches(nodes))
else:
if hasattr(sys, 'getrefcount'):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for (count, r) in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
(yield (count, r))
except RuntimeError:
for (count, r) in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
(yield (count, r))
finally:
if hasattr(sys, 'getrefcount'):
sys.stderr = save_stderr
|
'Helper to iteratively yield the matches.'
| def _iterative_matches(self, nodes):
| nodelen = len(nodes)
if (0 >= self.min):
(yield (0, {}))
results = []
for alt in self.content:
for (c, r) in generate_matches(alt, nodes):
(yield (c, r))
results.append((c, r))
while results:
new_results = []
for (c0, r0) in results:
if ((c0 < nodelen) and (c0 <= self.max)):
for alt in self.content:
for (c1, r1) in generate_matches(alt, nodes[c0:]):
if (c1 > 0):
r = {}
r.update(r0)
r.update(r1)
(yield ((c0 + c1), r))
new_results.append(((c0 + c1), r))
results = new_results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.