desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Parse the .c file written by pgen. (Internal) The file looks as follows. The first two lines are always this: #include "pgenheaders.h" #include "grammar.h" After that come four blocks: 1) one or more state definitions 2) a table defining dfas 3) a table defining labels 4) a struct defining the grammar A state definition has the following form: - one or more arc arrays, each of the form: static arc arcs_<n>_<m>[<k>] = { {<i>, <j>}, - followed by a state array, of the form: static state states_<s>[<t>] = { {<k>, arcs_<n>_<m>},'
def parse_graminit_c(self, filename):
try: f = open(filename) except IOError as err: print ("Can't open %s: %s" % (filename, err)) return False lineno = 0 (lineno, line) = ((lineno + 1), f.next()) assert (line == '#include "pgenheaders.h"\n'), (lineno, line) (lineno, line) = ((lineno + 1), f.next()) assert (line == '#include "grammar.h"\n'), (lineno, line) (lineno, line) = ((lineno + 1), f.next()) allarcs = {} states = [] while line.startswith('static arc '): while line.startswith('static arc '): mo = re.match('static arc arcs_(\\d+)_(\\d+)\\[(\\d+)\\] = {$', line) assert mo, (lineno, line) (n, m, k) = map(int, mo.groups()) arcs = [] for _ in range(k): (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+{(\\d+), (\\d+)},$', line) assert mo, (lineno, line) (i, j) = map(int, mo.groups()) arcs.append((i, j)) (lineno, line) = ((lineno + 1), f.next()) assert (line == '};\n'), (lineno, line) allarcs[(n, m)] = arcs (lineno, line) = ((lineno + 1), f.next()) mo = re.match('static state states_(\\d+)\\[(\\d+)\\] = {$', line) assert mo, (lineno, line) (s, t) = map(int, mo.groups()) assert (s == len(states)), (lineno, line) state = [] for _ in range(t): (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+{(\\d+), arcs_(\\d+)_(\\d+)},$', line) assert mo, (lineno, line) (k, n, m) = map(int, mo.groups()) arcs = allarcs[(n, m)] assert (k == len(arcs)), (lineno, line) state.append(arcs) states.append(state) (lineno, line) = ((lineno + 1), f.next()) assert (line == '};\n'), (lineno, line) (lineno, line) = ((lineno + 1), f.next()) self.states = states dfas = {} mo = re.match('static dfa dfas\\[(\\d+)\\] = {$', line) assert mo, (lineno, line) ndfas = int(mo.group(1)) for i in range(ndfas): (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+{(\\d+), "(\\w+)", (\\d+), (\\d+), states_(\\d+),$', line) assert mo, (lineno, line) symbol = mo.group(2) (number, x, y, z) = map(int, mo.group(1, 3, 4, 5)) assert (self.symbol2number[symbol] == number), (lineno, line) assert (self.number2symbol[number] == symbol), (lineno, line) assert (x == 0), (lineno, line) state = states[z] assert (y == len(state)), (lineno, line) (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+("(?:\\\\\\d\\d\\d)*")},$', line) assert mo, (lineno, line) first = {} rawbitset = eval(mo.group(1)) for (i, c) in enumerate(rawbitset): byte = ord(c) for j in range(8): if (byte & (1 << j)): first[((i * 8) + j)] = 1 dfas[number] = (state, first) (lineno, line) = ((lineno + 1), f.next()) assert (line == '};\n'), (lineno, line) self.dfas = dfas labels = [] (lineno, line) = ((lineno + 1), f.next()) mo = re.match('static label labels\\[(\\d+)\\] = {$', line) assert mo, (lineno, line) nlabels = int(mo.group(1)) for i in range(nlabels): (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+{(\\d+), (0|"\\w+")},$', line) assert mo, (lineno, line) (x, y) = mo.groups() x = int(x) if (y == '0'): y = None else: y = eval(y) labels.append((x, y)) (lineno, line) = ((lineno + 1), f.next()) assert (line == '};\n'), (lineno, line) self.labels = labels (lineno, line) = ((lineno + 1), f.next()) assert (line == 'grammar _PyParser_Grammar = {\n'), (lineno, line) (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+(\\d+),$', line) assert mo, (lineno, line) ndfas = int(mo.group(1)) assert (ndfas == len(self.dfas)) (lineno, line) = ((lineno + 1), f.next()) assert (line == ' DCTB dfas,\n'), (lineno, line) (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+{(\\d+), labels},$', line) assert mo, (lineno, line) nlabels = int(mo.group(1)) assert (nlabels == len(self.labels)), (lineno, line) (lineno, line) = ((lineno + 1), f.next()) mo = re.match('\\s+(\\d+)$', line) assert mo, (lineno, line) start = int(mo.group(1)) assert (start in self.number2symbol), (lineno, line) self.start = start (lineno, line) = ((lineno + 1), f.next()) assert (line == '};\n'), (lineno, line) try: (lineno, line) = ((lineno + 1), f.next()) except StopIteration: pass else: assert 0, (lineno, line)
'Create additional useful structures. (Internal).'
def finish_off(self):
self.keywords = {} self.tokens = {} for (ilabel, (type, value)) in enumerate(self.labels): if ((type == token.NAME) and (value is not None)): self.keywords[value] = ilabel elif (value is None): self.tokens[type] = ilabel
'Help the next test'
def _Call(self, name, args=None, prefix=None):
children = [] if isinstance(args, list): for arg in args: children.append(arg) children.append(Comma()) children.pop() return Call(Name(name), children, prefix)
'Reduces a fixer\'s pattern tree to a linear path and adds it to the matcher(a common Aho-Corasick automaton). The fixer is appended on the matching states and called when they are reached'
def add_fixer(self, fixer):
self.fixers.append(fixer) tree = reduce_tree(fixer.pattern_tree) linear = tree.get_linear_subpattern() match_nodes = self.add(linear, start=self.root) for match_node in match_nodes: match_node.fixers.append(fixer)
'Recursively adds a linear pattern to the AC automaton'
def add(self, pattern, start):
if (not pattern): return [start] if isinstance(pattern[0], tuple): match_nodes = [] for alternative in pattern[0]: end_nodes = self.add(alternative, start=start) for end in end_nodes: match_nodes.extend(self.add(pattern[1:], end)) return match_nodes else: if (pattern[0] not in start.transition_table): next_node = BMNode() start.transition_table[pattern[0]] = next_node else: next_node = start.transition_table[pattern[0]] if pattern[1:]: end_nodes = self.add(pattern[1:], start=next_node) else: end_nodes = [next_node] return end_nodes
'The main interface with the bottom matcher. The tree is traversed from the bottom using the constructed automaton. Nodes are only checked once as the tree is retraversed. When the automaton fails, we give it one more shot(in case the above tree matches as a whole with the rejected leaf), then we break for the next leaf. There is the special case of multiple arguments(see code comments) where we recheck the nodes Args: The leaves of the AST tree to be matched Returns: A dictionary of node matches with fixers as the keys'
def run(self, leaves):
current_ac_node = self.root results = defaultdict(list) for leaf in leaves: current_ast_node = leaf while current_ast_node: current_ast_node.was_checked = True for child in current_ast_node.children: if (isinstance(child, pytree.Leaf) and (child.value == u';')): current_ast_node.was_checked = False break if (current_ast_node.type == 1): node_token = current_ast_node.value else: node_token = current_ast_node.type if (node_token in current_ac_node.transition_table): current_ac_node = current_ac_node.transition_table[node_token] for fixer in current_ac_node.fixers: if (not (fixer in results)): results[fixer] = [] results[fixer].append(current_ast_node) else: current_ac_node = self.root if ((current_ast_node.parent is not None) and current_ast_node.parent.was_checked): break if (node_token in current_ac_node.transition_table): current_ac_node = current_ac_node.transition_table[node_token] for fixer in current_ac_node.fixers: if (not (fixer in results.keys())): results[fixer] = [] results[fixer].append(current_ast_node) current_ast_node = current_ast_node.parent return results
'Prints a graphviz diagram of the BM automaton(for debugging)'
def print_ac(self):
print 'digraph g{' def print_node(node): for subnode_key in node.transition_table.keys(): subnode = node.transition_table[subnode_key] print ('%d -> %d [label=%s] //%s' % (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) if (subnode_key == 1): print subnode.content print_node(subnode) print_node(self.root) print '}'
'Initializer. Creates an attribute for each grammar symbol (nonterminal), whose value is the symbol\'s type (an int >= 256).'
def __init__(self, grammar):
for (name, symbol) in grammar.symbol2number.iteritems(): setattr(self, name, symbol)
'Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements.'
def transform_import(self, node, results):
import_mod = results.get('module') pref = import_mod.prefix names = [] for name in MAPPING[import_mod.value][:(-1)]: names.extend([Name(name[0], prefix=pref), Comma()]) names.append(Name(MAPPING[import_mod.value][(-1)][0], prefix=pref)) import_mod.replace(names)
'Transform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module.'
def transform_member(self, node, results):
mod_member = results.get('mod_member') pref = mod_member.prefix member = results.get('member') if member: if isinstance(member, list): member = member[0] new_name = None for change in MAPPING[mod_member.value]: if (member.value in change[1]): new_name = change[0] break if new_name: mod_member.replace(Name(new_name, prefix=pref)) else: self.cannot_convert(node, 'This is an invalid module element') else: modules = [] mod_dict = {} members = results['members'] for member in members: if (member.type == syms.import_as_name): as_name = member.children[2].value member_name = member.children[0].value else: member_name = member.value as_name = None if (member_name != u','): for change in MAPPING[mod_member.value]: if (member_name in change[1]): if (change[0] not in mod_dict): modules.append(change[0]) mod_dict.setdefault(change[0], []).append(member) new_nodes = [] indentation = find_indentation(node) first = True def handle_name(name, prefix): if (name.type == syms.import_as_name): kids = [Name(name.children[0].value, prefix=prefix), name.children[1].clone(), name.children[2].clone()] return [Node(syms.import_as_name, kids)] return [Name(name.value, prefix=prefix)] for module in modules: elts = mod_dict[module] names = [] for elt in elts[:(-1)]: names.extend(handle_name(elt, pref)) names.append(Comma()) names.extend(handle_name(elts[(-1)], pref)) new = FromImport(module, names) if ((not first) or node.parent.prefix.endswith(indentation)): new.prefix = indentation new_nodes.append(new) first = False if new_nodes: nodes = [] for new_node in new_nodes[:(-1)]: nodes.extend([new_node, Newline()]) nodes.append(new_nodes[(-1)]) node.replace(nodes) else: self.cannot_convert(node, 'All module elements are invalid')
'Transform for calls to module members in code.'
def transform_dot(self, node, results):
module_dot = results.get('bare_with_attr') member = results.get('member') new_name = None if isinstance(member, list): member = member[0] for change in MAPPING[module_dot.value]: if (member.value in change[1]): new_name = change[0] break if new_name: module_dot.replace(Name(new_name, prefix=module_dot.prefix)) else: self.cannot_convert(node, 'This is an invalid module element')
'Initializer. Args: fixer_names: a list of fixers to import options: an dict with configuration. explicit: a list of fixers to run even if they are explicit.'
def __init__(self, fixer_names, options=None, explicit=None):
self.fixers = fixer_names self.explicit = (explicit or []) self.options = self._default_options.copy() if (options is not None): self.options.update(options) if self.options['print_function']: self.grammar = pygram.python_grammar_no_print_statement else: self.grammar = pygram.python_grammar self.errors = [] self.logger = logging.getLogger('RefactoringTool') self.fixer_log = [] self.wrote = False self.driver = driver.Driver(self.grammar, convert=pytree.convert, logger=self.logger) (self.pre_order, self.post_order) = self.get_fixers() self.files = [] self.BM = bm.BottomMatcher() self.bmi_pre_order = [] self.bmi_post_order = [] for fixer in chain(self.post_order, self.pre_order): if fixer.BM_compatible: self.BM.add_fixer(fixer) elif (fixer in self.pre_order): self.bmi_pre_order.append(fixer) elif (fixer in self.post_order): self.bmi_post_order.append(fixer) self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
'Inspects the options to load the requested patterns and handlers. Returns: (pre_order, post_order), where pre_order is the list of fixers that want a pre-order AST traversal, and post_order is the list that want post-order traversal.'
def get_fixers(self):
pre_order_fixers = [] post_order_fixers = [] for fix_mod_path in self.fixers: mod = __import__(fix_mod_path, {}, {}, ['*']) fix_name = fix_mod_path.rsplit('.', 1)[(-1)] if fix_name.startswith(self.FILE_PREFIX): fix_name = fix_name[len(self.FILE_PREFIX):] parts = fix_name.split('_') class_name = (self.CLASS_PREFIX + ''.join([p.title() for p in parts])) try: fix_class = getattr(mod, class_name) except AttributeError: raise FixerError(("Can't find %s.%s" % (fix_name, class_name))) fixer = fix_class(self.options, self.fixer_log) if (fixer.explicit and (self.explicit is not True) and (fix_mod_path not in self.explicit)): self.log_message('Skipping implicit fixer: %s', fix_name) continue self.log_debug('Adding transformation: %s', fix_name) if (fixer.order == 'pre'): pre_order_fixers.append(fixer) elif (fixer.order == 'post'): post_order_fixers.append(fixer) else: raise FixerError(('Illegal fixer order: %r' % fixer.order)) key_func = operator.attrgetter('run_order') pre_order_fixers.sort(key=key_func) post_order_fixers.sort(key=key_func) return (pre_order_fixers, post_order_fixers)
'Called when an error occurs.'
def log_error(self, msg, *args, **kwds):
raise
'Hook to log a message.'
def log_message(self, msg, *args):
if args: msg = (msg % args) self.logger.info(msg)
'Called with the old version, new version, and filename of a refactored file.'
def print_output(self, old_text, new_text, filename, equal):
pass
'Refactor a list of files and directories.'
def refactor(self, items, write=False, doctests_only=False):
for dir_or_file in items: if os.path.isdir(dir_or_file): self.refactor_dir(dir_or_file, write, doctests_only) else: self.refactor_file(dir_or_file, write, doctests_only)
'Descends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with \'.\' are skipped.'
def refactor_dir(self, dir_name, write=False, doctests_only=False):
py_ext = (os.extsep + 'py') for (dirpath, dirnames, filenames) in os.walk(dir_name): self.log_debug('Descending into %s', dirpath) dirnames.sort() filenames.sort() for name in filenames: if ((not name.startswith('.')) and (os.path.splitext(name)[1] == py_ext)): fullname = os.path.join(dirpath, name) self.refactor_file(fullname, write, doctests_only) dirnames[:] = [dn for dn in dirnames if (not dn.startswith('.'))]
'Do our best to decode a Python source file correctly.'
def _read_python_source(self, filename):
try: f = open(filename, 'rb') except IOError as err: self.log_error("Can't open %s: %s", filename, err) return (None, None) try: encoding = tokenize.detect_encoding(f.readline)[0] finally: f.close() with _open_with_encoding(filename, 'r', encoding=encoding) as f: return (_from_system_newlines(f.read()), encoding)
'Refactors a file.'
def refactor_file(self, filename, write=False, doctests_only=False):
(input, encoding) = self._read_python_source(filename) if (input is None): return input += u'\n' if doctests_only: self.log_debug('Refactoring doctests in %s', filename) output = self.refactor_docstring(input, filename) if (output != input): self.processed_file(output, filename, input, write, encoding) else: self.log_debug('No doctest changes in %s', filename) else: tree = self.refactor_string(input, filename) if (tree and tree.was_changed): self.processed_file(unicode(tree)[:(-1)], filename, write=write, encoding=encoding) else: self.log_debug('No changes in %s', filename)
'Refactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse.'
def refactor_string(self, data, name):
features = _detect_future_features(data) if ('print_function' in features): self.driver.grammar = pygram.python_grammar_no_print_statement try: tree = self.driver.parse_string(data) except Exception as err: self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err) return finally: self.driver.grammar = self.grammar tree.future_features = features self.log_debug('Refactoring %s', name) self.refactor_tree(tree, name) return tree
'Refactors a parse tree (modifying the tree in place). For compatible patterns the bottom matcher module is used. Otherwise the tree is traversed node-to-node for matches. Args: tree: a pytree.Node instance representing the root of the tree to be refactored. name: a human-readable name for this tree. Returns: True if the tree was modified, False otherwise.'
def refactor_tree(self, tree, name):
for fixer in chain(self.pre_order, self.post_order): fixer.start_tree(tree, name) self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) self.traverse_by(self.bmi_post_order_heads, tree.post_order()) match_set = self.BM.run(tree.leaves()) while any(match_set.values()): for fixer in self.BM.fixers: if ((fixer in match_set) and match_set[fixer]): match_set[fixer].sort(key=pytree.Base.depth, reverse=True) if fixer.keep_line_order: match_set[fixer].sort(key=pytree.Base.get_lineno) for node in list(match_set[fixer]): if (node in match_set[fixer]): match_set[fixer].remove(node) try: find_root(node) except AssertionError: continue if (node.fixers_applied and (fixer in node.fixers_applied)): continue results = fixer.match(node) if results: new = fixer.transform(node, results) if (new is not None): node.replace(new) for node in new.post_order(): if (not node.fixers_applied): node.fixers_applied = [] node.fixers_applied.append(fixer) new_matches = self.BM.run(new.leaves()) for fxr in new_matches: if (not (fxr in match_set)): match_set[fxr] = [] match_set[fxr].extend(new_matches[fxr]) for fixer in chain(self.pre_order, self.post_order): fixer.finish_tree(tree, name) return tree.was_changed
'Traverse an AST, applying a set of fixers to each node. This is a helper method for refactor_tree(). Args: fixers: a list of fixer instances. traversal: a generator that yields AST nodes. Returns: None'
def traverse_by(self, fixers, traversal):
if (not fixers): return for node in traversal: for fixer in fixers[node.type]: results = fixer.match(node) if results: new = fixer.transform(node, results) if (new is not None): node.replace(new) node = new
'Called when a file has been refactored, and there are changes.'
def processed_file(self, new_text, filename, old_text=None, write=False, encoding=None):
self.files.append(filename) if (old_text is None): old_text = self._read_python_source(filename)[0] if (old_text is None): return equal = (old_text == new_text) self.print_output(old_text, new_text, filename, equal) if equal: self.log_debug('No changes to %s', filename) return if write: self.write_file(new_text, filename, old_text, encoding) else: self.log_debug('Not writing changes to %s', filename)
'Writes a string to a file. It first shows a unified diff between the old text and the new text, and then rewrites the file; the latter is only done if the write option is set.'
def write_file(self, new_text, filename, old_text, encoding=None):
try: f = _open_with_encoding(filename, 'w', encoding=encoding) except os.error as err: self.log_error("Can't create %s: %s", filename, err) return try: f.write(_to_system_newlines(new_text)) except os.error as err: self.log_error("Can't write %s: %s", filename, err) finally: f.close() self.log_debug('Wrote changes to %s', filename) self.wrote = True
'Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can\'t use the doctest module\'s parser, since, like most parsers, it is not geared towards preserving the original source.)'
def refactor_docstring(self, input, filename):
result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(True): lineno += 1 if line.lstrip().startswith(self.PS1): if (block is not None): result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif ((indent is not None) and (line.startswith((indent + self.PS2)) or (line == ((indent + self.PS2.rstrip()) + u'\n')))): block.append(line) else: if (block is not None): result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if (block is not None): result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return u''.join(result)
'Refactors one doctest. A doctest is given as a block of lines, the first of which starts with ">>>" (possibly indented), while the remaining lines start with "..." (identically indented).'
def refactor_doctest(self, block, lineno, indent, filename):
try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug('Source: %s', line.rstrip(u'\n')) self.log_error("Can't parse docstring in %s line %s: %s: %s", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = unicode(tree).splitlines(True) (clipped, new) = (new[:(lineno - 1)], new[(lineno - 1):]) assert (clipped == ([u'\n'] * (lineno - 1))), clipped if (not new[(-1)].endswith(u'\n')): new[(-1)] += u'\n' block = [((indent + self.PS1) + new.pop(0))] if new: block += [((indent + self.PS2) + line) for line in new] return block
'Parses a block into a tree. This is necessary to get correct line number / offset information in the parser diagnostics and embedded into the parse tree.'
def parse_block(self, block, lineno, indent):
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) tree.future_features = frozenset() return tree
'Wraps a tokenize stream to systematically modify start/end.'
def wrap_toks(self, block, lineno, indent):
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) for (type, value, (line0, col0), (line1, col1), line_text) in tokens: line0 += (lineno - 1) line1 += (lineno - 1) (yield (type, value, (line0, col0), (line1, col1), line_text))
'Generates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line.'
def gen_lines(self, block, indent):
prefix1 = (indent + self.PS1) prefix2 = (indent + self.PS2) prefix = prefix1 for line in block: if line.startswith(prefix): (yield line[len(prefix):]) elif (line == (prefix.rstrip() + u'\n')): (yield u'\n') else: raise AssertionError(('line=%r, prefix=%r' % (line, prefix))) prefix = prefix2 while True: (yield '')
'Constructor that prevents Base from being instantiated.'
def __new__(cls, *args, **kwds):
assert (cls is not Base), 'Cannot instantiate Base' return object.__new__(cls)
'Compare two nodes for equality. This calls the method _eq().'
def __eq__(self, other):
if (self.__class__ is not other.__class__): return NotImplemented return self._eq(other)
'Compare two nodes for inequality. This calls the method _eq().'
def __ne__(self, other):
if (self.__class__ is not other.__class__): return NotImplemented return (not self._eq(other))
'Compare two nodes for equality. This is called by __eq__ and __ne__. It is only called if the two nodes have the same type. This must be implemented by the concrete subclass. Nodes should be considered equal if they have the same structure, ignoring the prefix string and other context information.'
def _eq(self, other):
raise NotImplementedError
'Return a cloned (deep) copy of self. This must be implemented by the concrete subclass.'
def clone(self):
raise NotImplementedError
'Return a post-order iterator for the tree. This must be implemented by the concrete subclass.'
def post_order(self):
raise NotImplementedError
'Return a pre-order iterator for the tree. This must be implemented by the concrete subclass.'
def pre_order(self):
raise NotImplementedError
'Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly.'
def set_prefix(self, prefix):
warnings.warn('set_prefix() is deprecated; use the prefix property', DeprecationWarning, stacklevel=2) self.prefix = prefix
'Return the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly.'
def get_prefix(self):
warnings.warn('get_prefix() is deprecated; use the prefix property', DeprecationWarning, stacklevel=2) return self.prefix
'Replace this node with a new one in the parent.'
def replace(self, new):
assert (self.parent is not None), str(self) assert (new is not None) if (not isinstance(new, list)): new = [new] l_children = [] found = False for ch in self.parent.children: if (ch is self): assert (not found), (self.parent.children, self, new) if (new is not None): l_children.extend(new) found = True else: l_children.append(ch) assert found, (self.children, self, new) self.parent.changed() self.parent.children = l_children for x in new: x.parent = self.parent self.parent = None
'Return the line number which generated the invocant node.'
def get_lineno(self):
node = self while (not isinstance(node, Leaf)): if (not node.children): return node = node.children[0] return node.lineno
'Remove the node from the tree. Returns the position of the node in its parent\'s children before it was removed.'
def remove(self):
if self.parent: for (i, node) in enumerate(self.parent.children): if (node is self): self.parent.changed() del self.parent.children[i] self.parent = None return i
'The node immediately following the invocant in their parent\'s children list. If the invocant does not have a next sibling, it is None'
@property def next_sibling(self):
if (self.parent is None): return None for (i, child) in enumerate(self.parent.children): if (child is self): try: return self.parent.children[(i + 1)] except IndexError: return None
'The node immediately preceding the invocant in their parent\'s children list. If the invocant does not have a previous sibling, it is None.'
@property def prev_sibling(self):
if (self.parent is None): return None for (i, child) in enumerate(self.parent.children): if (child is self): if (i == 0): return None return self.parent.children[(i - 1)]
'Return the string immediately following the invocant node. This is effectively equivalent to node.next_sibling.prefix'
def get_suffix(self):
next_sib = self.next_sibling if (next_sib is None): return u'' return next_sib.prefix
'Initializer. Takes a type constant (a symbol number >= 256), a sequence of child nodes, and an optional context keyword argument. As a side effect, the parent pointers of the children are updated.'
def __init__(self, type, children, context=None, prefix=None, fixers_applied=None):
assert (type >= 256), type self.type = type self.children = list(children) for ch in self.children: assert (ch.parent is None), repr(ch) ch.parent = self if (prefix is not None): self.prefix = prefix if fixers_applied: self.fixers_applied = fixers_applied[:] else: self.fixers_applied = None
'Return a canonical string representation.'
def __repr__(self):
return ('%s(%s, %r)' % (self.__class__.__name__, type_repr(self.type), self.children))
'Return a pretty string representation. This reproduces the input source exactly.'
def __unicode__(self):
return u''.join(map(unicode, self.children))
'Compare two nodes for equality.'
def _eq(self, other):
return ((self.type, self.children) == (other.type, other.children))
'Return a cloned (deep) copy of self.'
def clone(self):
return Node(self.type, [ch.clone() for ch in self.children], fixers_applied=self.fixers_applied)
'Return a post-order iterator for the tree.'
def post_order(self):
for child in self.children: for node in child.post_order(): (yield node) (yield self)
'Return a pre-order iterator for the tree.'
def pre_order(self):
(yield self) for child in self.children: for node in child.pre_order(): (yield node)
'The whitespace and comments preceding this node in the input.'
def _prefix_getter(self):
if (not self.children): return '' return self.children[0].prefix
'Equivalent to \'node.children[i] = child\'. This method also sets the child\'s parent attribute appropriately.'
def set_child(self, i, child):
child.parent = self self.children[i].parent = None self.children[i] = child self.changed()
'Equivalent to \'node.children.insert(i, child)\'. This method also sets the child\'s parent attribute appropriately.'
def insert_child(self, i, child):
child.parent = self self.children.insert(i, child) self.changed()
'Equivalent to \'node.children.append(child)\'. This method also sets the child\'s parent attribute appropriately.'
def append_child(self, child):
child.parent = self self.children.append(child) self.changed()
'Initializer. Takes a type constant (a token number < 256), a string value, and an optional context keyword argument.'
def __init__(self, type, value, context=None, prefix=None, fixers_applied=[]):
assert (0 <= type < 256), type if (context is not None): (self._prefix, (self.lineno, self.column)) = context self.type = type self.value = value if (prefix is not None): self._prefix = prefix self.fixers_applied = fixers_applied[:]
'Return a canonical string representation.'
def __repr__(self):
return ('%s(%r, %r)' % (self.__class__.__name__, self.type, self.value))
'Return a pretty string representation. This reproduces the input source exactly.'
def __unicode__(self):
return (self.prefix + unicode(self.value))
'Compare two nodes for equality.'
def _eq(self, other):
return ((self.type, self.value) == (other.type, other.value))
'Return a cloned (deep) copy of self.'
def clone(self):
return Leaf(self.type, self.value, (self.prefix, (self.lineno, self.column)), fixers_applied=self.fixers_applied)
'Return a post-order iterator for the tree.'
def post_order(self):
(yield self)
'Return a pre-order iterator for the tree.'
def pre_order(self):
(yield self)
'The whitespace and comments preceding this token in the input.'
def _prefix_getter(self):
return self._prefix
'Constructor that prevents BasePattern from being instantiated.'
def __new__(cls, *args, **kwds):
assert (cls is not BasePattern), 'Cannot instantiate BasePattern' return object.__new__(cls)
'A subclass can define this as a hook for optimizations. Returns either self or another node with the same effect.'
def optimize(self):
return self
'Does this pattern exactly match a node? Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. Default implementation for non-wildcard patterns.'
def match(self, node, results=None):
if ((self.type is not None) and (node.type != self.type)): return False if (self.content is not None): r = None if (results is not None): r = {} if (not self._submatch(node, r)): return False if r: results.update(r) if ((results is not None) and self.name): results[self.name] = node return True
'Does this pattern exactly match a sequence of nodes? Default implementation for non-wildcard patterns.'
def match_seq(self, nodes, results=None):
if (len(nodes) != 1): return False return self.match(nodes[0], results)
'Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns.'
def generate_matches(self, nodes):
r = {} if (nodes and self.match(nodes[0], r)): (yield (1, r))
'Initializer. Takes optional type, content, and name. The type, if given must be a token type (< 256). If not given, this matches any *leaf* node; the content may still be required. The content, if given, must be a string. If a name is given, the matching node is stored in the results dict under that key.'
def __init__(self, type=None, content=None, name=None):
if (type is not None): assert (0 <= type < 256), type if (content is not None): assert isinstance(content, basestring), repr(content) self.type = type self.content = content self.name = name
'Override match() to insist on a leaf node.'
def match(self, node, results=None):
if (not isinstance(node, Leaf)): return False return BasePattern.match(self, node, results)
'Match the pattern\'s content to the node\'s children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated.'
def _submatch(self, node, results=None):
return (self.content == node.value)
'Initializer. Takes optional type, content, and name. The type, if given, must be a symbol type (>= 256). If the type is None this matches *any* single node (leaf or not), except if content is not None, in which it only matches non-leaf nodes that also match the content pattern. The content, if not None, must be a sequence of Patterns that must match the node\'s children exactly. If the content is given, the type must not be None. If a name is given, the matching node is stored in the results dict under that key.'
def __init__(self, type=None, content=None, name=None):
if (type is not None): assert (type >= 256), type if (content is not None): assert (not isinstance(content, basestring)), repr(content) content = list(content) for (i, item) in enumerate(content): assert isinstance(item, BasePattern), (i, item) if isinstance(item, WildcardPattern): self.wildcards = True self.type = type self.content = content self.name = name
'Match the pattern\'s content to the node\'s children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated.'
def _submatch(self, node, results=None):
if self.wildcards: for (c, r) in generate_matches(self.content, node.children): if (c == len(node.children)): if (results is not None): results.update(r) return True return False if (len(self.content) != len(node.children)): return False for (subpattern, child) in zip(self.content, node.children): if (not subpattern.match(child, results)): return False return True
'Initializer. Args: content: optional sequence of subsequences of patterns; if absent, matches one node; if present, each subsequence is an alternative [*] min: optional minimum number of times to match, default 0 max: optional maximum number of times to match, default HUGE name: optional name assigned to this match [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is equivalent to (a b c | d e | f g h); if content is None, this is equivalent to \'.\' in regular expression terms. The min and max parameters work as follows: min=0, max=maxint: .* min=1, max=maxint: .+ min=0, max=1: .? min=1, max=1: . If content is not None, replace the dot with the parenthesized list of alternatives, e.g. (a b c | d e | f g h)*'
def __init__(self, content=None, min=0, max=HUGE, name=None):
assert (0 <= min <= max <= HUGE), (min, max) if (content is not None): content = tuple(map(tuple, content)) assert len(content), repr(content) for alt in content: assert len(alt), repr(alt) self.content = content self.min = min self.max = max self.name = name
'Optimize certain stacked wildcard patterns.'
def optimize(self):
subpattern = None if ((self.content is not None) and (len(self.content) == 1) and (len(self.content[0]) == 1)): subpattern = self.content[0][0] if ((self.min == 1) and (self.max == 1)): if (self.content is None): return NodePattern(name=self.name) if ((subpattern is not None) and (self.name == subpattern.name)): return subpattern.optimize() if ((self.min <= 1) and isinstance(subpattern, WildcardPattern) and (subpattern.min <= 1) and (self.name == subpattern.name)): return WildcardPattern(subpattern.content, (self.min * subpattern.min), (self.max * subpattern.max), subpattern.name) return self
'Does this pattern exactly match a node?'
def match(self, node, results=None):
return self.match_seq([node], results)
'Does this pattern exactly match a sequence of nodes?'
def match_seq(self, nodes, results=None):
for (c, r) in self.generate_matches(nodes): if (c == len(nodes)): if (results is not None): results.update(r) if self.name: results[self.name] = list(nodes) return True return False
'Generator yielding matches for a sequence of nodes. Args: nodes: sequence of nodes Yields: (count, results) tuples where: count: the match comprises nodes[:count]; results: dict containing named submatches.'
def generate_matches(self, nodes):
if (self.content is None): for count in xrange(self.min, (1 + min(len(nodes), self.max))): r = {} if self.name: r[self.name] = nodes[:count] (yield (count, r)) elif (self.name == 'bare_name'): (yield self._bare_name_matches(nodes)) else: if hasattr(sys, 'getrefcount'): save_stderr = sys.stderr sys.stderr = StringIO() try: for (count, r) in self._recursive_matches(nodes, 0): if self.name: r[self.name] = nodes[:count] (yield (count, r)) except RuntimeError: for (count, r) in self._iterative_matches(nodes): if self.name: r[self.name] = nodes[:count] (yield (count, r)) finally: if hasattr(sys, 'getrefcount'): sys.stderr = save_stderr
'Helper to iteratively yield the matches.'
def _iterative_matches(self, nodes):
nodelen = len(nodes) if (0 >= self.min): (yield (0, {})) results = [] for alt in self.content: for (c, r) in generate_matches(alt, nodes): (yield (c, r)) results.append((c, r)) while results: new_results = [] for (c0, r0) in results: if ((c0 < nodelen) and (c0 <= self.max)): for alt in self.content: for (c1, r1) in generate_matches(alt, nodes[c0:]): if (c1 > 0): r = {} r.update(r0) r.update(r1) (yield ((c0 + c1), r)) new_results.append(((c0 + c1), r)) results = new_results
'Special optimized matcher for bare_name.'
def _bare_name_matches(self, nodes):
count = 0 r = {} done = False max = len(nodes) while ((not done) and (count < max)): done = True for leaf in self.content: if leaf[0].match(nodes[count], r): count += 1 done = False break r[self.name] = nodes[:count] return (count, r)
'Helper to recursively yield the matches.'
def _recursive_matches(self, nodes, count):
assert (self.content is not None) if (count >= self.min): (yield (0, {})) if (count < self.max): for alt in self.content: for (c0, r0) in generate_matches(alt, nodes): for (c1, r1) in self._recursive_matches(nodes[c0:], (count + 1)): r = {} r.update(r0) r.update(r1) (yield ((c0 + c1), r))
'Initializer. The argument is either a pattern or None. If it is None, this only matches an empty sequence (effectively \'$\' in regex lingo). If it is not None, this matches whenever the argument pattern doesn\'t have any matches.'
def __init__(self, content=None):
if (content is not None): assert isinstance(content, BasePattern), repr(content) self.content = content
'Initializer. Subclass may override. Args: options: an dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to.'
def __init__(self, options, log):
self.options = options self.log = log self.compile_pattern()
'Compiles self.PATTERN into self.pattern. Subclass may override if it doesn\'t want to use self.{pattern,PATTERN} in .match().'
def compile_pattern(self):
if (self.PATTERN is not None): PC = PatternCompiler() (self.pattern, self.pattern_tree) = PC.compile_pattern(self.PATTERN, with_tree=True)
'Set the filename, and a logger derived from it. The main refactoring tool should call this.'
def set_filename(self, filename):
self.filename = filename self.logger = logging.getLogger(filename)
'Returns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override.'
def match(self, node):
results = {'node': node} return (self.pattern.match(node, results) and results)
'Returns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override.'
def transform(self, node, results):
raise NotImplementedError()
'Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers.'
def new_name(self, template=u'xxx_todo_changeme'):
name = template while (name in self.used_names): name = (template + unicode(self.numbers.next())) self.used_names.add(name) return name
'Warn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can\'t be converted.'
def cannot_convert(self, node, reason=None):
lineno = node.get_lineno() for_output = node.clone() for_output.prefix = u'' msg = 'Line %d: could not convert: %s' self.log_message((msg % (lineno, for_output))) if reason: self.log_message(reason)
'Used for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can\'t be converted.'
def warning(self, node, reason):
lineno = node.get_lineno() self.log_message(('Line %d: %s' % (lineno, reason)))
'Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.'
def start_tree(self, tree, filename):
self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True
'Some fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.'
def finish_tree(self, tree, filename):
pass
'Construct a SequenceMatcher. Optional arg isjunk is None (the default), or a one-argument function that takes a sequence element and returns true iff the element is junk. None is equivalent to passing "lambda x: 0", i.e. no elements are considered to be junk. For example, pass lambda x: x in " \t" if you\'re comparing lines as sequences of characters, and don\'t want to synch up on blanks or hard tabs. Optional arg a is the first of two sequences to be compared. By default, an empty string. The elements of a must be hashable. See also .set_seqs() and .set_seq1(). Optional arg b is the second of two sequences to be compared. By default, an empty string. The elements of b must be hashable. See also .set_seqs() and .set_seq2(). Optional arg autojunk should be set to False to disable the "automatic junk heuristic" that treats popular elements as junk (see module documentation for more information).'
def __init__(self, isjunk=None, a='', b='', autojunk=True):
self.isjunk = isjunk self.a = self.b = None self.autojunk = autojunk self.set_seqs(a, b)
'Set the two sequences to be compared. >>> s = SequenceMatcher() >>> s.set_seqs("abcd", "bcde") >>> s.ratio() 0.75'
def set_seqs(self, a, b):
self.set_seq1(a) self.set_seq2(b)
'Set the first sequence to be compared. The second sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq1("bcde") >>> s.ratio() 1.0 SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence S against many sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for each of the other sequences. See also set_seqs() and set_seq2().'
def set_seq1(self, a):
if (a is self.a): return self.a = a self.matching_blocks = self.opcodes = None
'Set the second sequence to be compared. The first sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq2("abcd") >>> s.ratio() 1.0 SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence S against many sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for each of the other sequences. See also set_seqs() and set_seq1().'
def set_seq2(self, b):
if (b is self.b): return self.b = b self.matching_blocks = self.opcodes = None self.fullbcount = None self.__chain_b()
'Find longest matching block in a[alo:ahi] and b[blo:bhi]. If isjunk is not defined: Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where alo <= i <= i+k <= ahi blo <= j <= j+k <= bhi and for all (i\',j\',k\') meeting those conditions, k >= k\' i <= i\' and if i == i\', j <= j\' In other words, of all maximal matching blocks, return one that starts earliest in a, and of all those maximal matching blocks that start earliest in a, return the one that starts earliest in b. >>> s = SequenceMatcher(None, " abcd", "abcd abcd") >>> s.find_longest_match(0, 5, 0, 9) Match(a=0, b=4, size=5) If isjunk is defined, first the longest matching block is determined as above, but with the additional restriction that no junk element appears in the block. Then that block is extended as far as possible by matching (only) junk elements on both sides. So the resulting block never matches on junk except as identical junk happens to be adjacent to an "interesting" match. Here\'s the same example as before, but considering blanks to be junk. That prevents " abcd" from matching the " abcd" at the tail end of the second sequence directly. Instead only the "abcd" can match, and matches the leftmost "abcd" in the second sequence: >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd") >>> s.find_longest_match(0, 5, 0, 9) Match(a=1, b=0, size=4) If no blocks match, return (alo, blo, 0). >>> s = SequenceMatcher(None, "ab", "c") >>> s.find_longest_match(0, 2, 0, 1) Match(a=0, b=0, size=0)'
def find_longest_match(self, alo, ahi, blo, bhi):
(a, b, b2j, isbjunk) = (self.a, self.b, self.b2j, self.isbjunk) (besti, bestj, bestsize) = (alo, blo, 0) j2len = {} nothing = [] for i in xrange(alo, ahi): j2lenget = j2len.get newj2len = {} for j in b2j.get(a[i], nothing): if (j < blo): continue if (j >= bhi): break k = newj2len[j] = (j2lenget((j - 1), 0) + 1) if (k > bestsize): (besti, bestj, bestsize) = (((i - k) + 1), ((j - k) + 1), k) j2len = newj2len while ((besti > alo) and (bestj > blo) and (not isbjunk(b[(bestj - 1)])) and (a[(besti - 1)] == b[(bestj - 1)])): (besti, bestj, bestsize) = ((besti - 1), (bestj - 1), (bestsize + 1)) while (((besti + bestsize) < ahi) and ((bestj + bestsize) < bhi) and (not isbjunk(b[(bestj + bestsize)])) and (a[(besti + bestsize)] == b[(bestj + bestsize)])): bestsize += 1 while ((besti > alo) and (bestj > blo) and isbjunk(b[(bestj - 1)]) and (a[(besti - 1)] == b[(bestj - 1)])): (besti, bestj, bestsize) = ((besti - 1), (bestj - 1), (bestsize + 1)) while (((besti + bestsize) < ahi) and ((bestj + bestsize) < bhi) and isbjunk(b[(bestj + bestsize)]) and (a[(besti + bestsize)] == b[(bestj + bestsize)])): bestsize = (bestsize + 1) return Match(besti, bestj, bestsize)
'Return list of triples describing matching subsequences. Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in i and in j. New in Python 2.5, it\'s also guaranteed that if (i, j, n) and (i\', j\', n\') are adjacent triples in the list, and the second is not the last triple in the list, then i+n != i\' or j+n != j\'. IOW, adjacent triples never describe adjacent equal blocks. The last triple is a dummy, (len(a), len(b), 0), and is the only triple with n==0. >>> s = SequenceMatcher(None, "abxcd", "abcd") >>> s.get_matching_blocks() [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]'
def get_matching_blocks(self):
if (self.matching_blocks is not None): return self.matching_blocks (la, lb) = (len(self.a), len(self.b)) queue = [(0, la, 0, lb)] matching_blocks = [] while queue: (alo, ahi, blo, bhi) = queue.pop() (i, j, k) = x = self.find_longest_match(alo, ahi, blo, bhi) if k: matching_blocks.append(x) if ((alo < i) and (blo < j)): queue.append((alo, i, blo, j)) if (((i + k) < ahi) and ((j + k) < bhi)): queue.append(((i + k), ahi, (j + k), bhi)) matching_blocks.sort() i1 = j1 = k1 = 0 non_adjacent = [] for (i2, j2, k2) in matching_blocks: if (((i1 + k1) == i2) and ((j1 + k1) == j2)): k1 += k2 else: if k1: non_adjacent.append((i1, j1, k1)) (i1, j1, k1) = (i2, j2, k2) if k1: non_adjacent.append((i1, j1, k1)) non_adjacent.append((la, lb, 0)) self.matching_blocks = non_adjacent return map(Match._make, self.matching_blocks)
'Return list of 5-tuples describing how to turn a into b. Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the tuple preceding it, and likewise for j1 == the previous j2. The tags are strings, with these meanings: \'replace\': a[i1:i2] should be replaced by b[j1:j2] \'delete\': a[i1:i2] should be deleted. Note that j1==j2 in this case. \'insert\': b[j1:j2] should be inserted at a[i1:i1]. Note that i1==i2 in this case. \'equal\': a[i1:i2] == b[j1:j2] >>> a = "qabxcd" >>> b = "abycdf" >>> s = SequenceMatcher(None, a, b) >>> for tag, i1, i2, j1, j2 in s.get_opcodes(): ... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])) delete a[0:1] (q) b[0:0] () equal a[1:3] (ab) b[0:2] (ab) replace a[3:4] (x) b[2:3] (y) equal a[4:6] (cd) b[3:5] (cd) insert a[6:6] () b[5:6] (f)'
def get_opcodes(self):
if (self.opcodes is not None): return self.opcodes i = j = 0 self.opcodes = answer = [] for (ai, bj, size) in self.get_matching_blocks(): tag = '' if ((i < ai) and (j < bj)): tag = 'replace' elif (i < ai): tag = 'delete' elif (j < bj): tag = 'insert' if tag: answer.append((tag, i, ai, j, bj)) (i, j) = ((ai + size), (bj + size)) if size: answer.append(('equal', ai, i, bj, j)) return answer