markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Here are all the `return` statements in `middle()`:
all_statements(middle_tree(), ast.Return) all_statements_and_functions(middle_tree(), ast.If)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We can randomly pick an element:
import random random_node = random.choice(all_statements(middle_tree())) astor.to_source(random_node)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Mutating StatementsThe main part in mutation, however, is to actually mutate the code of the program under test. To this end, we introduce a `StatementMutator` class – a subclass of `NodeTransformer`, described in the [official Python `ast` reference](http://docs.python.org/3/library/ast). The constructor provides various keyword arguments to configure the mutator.
from ast import NodeTransformer import copy class StatementMutator(NodeTransformer): """Mutate statements in an AST for automated repair.""" def __init__(self, suspiciousness_func: Optional[Callable[[Tuple[Callable, int]], float]] = None, source: Optional[List[ast.AST]] = None, log: bool = False) -> None: """ Constructor. `suspiciousness_func` is a function that takes a location (function, line_number) and returns a suspiciousness value between 0 and 1.0. If not given, all locations get the same suspiciousness of 1.0. `source` is a list of statements to choose from. """ super().__init__() self.log = log if suspiciousness_func is None: def suspiciousness_func(location: Tuple[Callable, int]) -> float: return 1.0 assert suspiciousness_func is not None self.suspiciousness_func: Callable = suspiciousness_func if source is None: source = [] self.source = source if self.log > 1: for i, node in enumerate(self.source): print(f"Source for repairs #{i}:") print_content(astor.to_source(node), '.py') print() print() self.mutations = 0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Choosing Suspicious Statements to MutateWe start with deciding which AST nodes to mutate. The method `node_suspiciousness()` returns the suspiciousness for a given node, by invoking the suspiciousness function `suspiciousness_func` given during initialization.
import warnings class StatementMutator(StatementMutator): def node_suspiciousness(self, stmt: ast.AST, func_name: str) -> float: if not hasattr(stmt, 'lineno'): warnings.warn(f"{self.format_node(stmt)}: Expected line number") return 0.0 suspiciousness = self.suspiciousness_func((func_name, stmt.lineno)) if suspiciousness is None: # not executed return 0.0 return suspiciousness def format_node(self, node: ast.AST) -> str: ...
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The method `node_to_be_mutated()` picks a node (statement) to be mutated. It determines the suspiciousness of all statements, and invokes `random.choices()`, using the suspiciousness as weight. Unsuspicious statements (with zero weight) will not be chosen.
class StatementMutator(StatementMutator): def node_to_be_mutated(self, tree: ast.AST) -> ast.AST: statements = all_statements_and_functions(tree) assert len(statements) > 0, "No statements" weights = [self.node_suspiciousness(stmt, func_name) for stmt, func_name in statements] stmts = [stmt for stmt, func_name in statements] if self.log > 1: print("Weights:") for i, stmt in enumerate(statements): node, func_name = stmt print(f"{weights[i]:.2} {self.format_node(node)}") if sum(weights) == 0.0: # No suspicious line return random.choice(stmts) else: return random.choices(stmts, weights=weights)[0]
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Choosing a Mutation Method The method `visit()` is invoked on all nodes. For nodes marked with a `mutate_me` attribute, it randomly chooses a mutation method (`choose_op()`) and then invokes it on the node.According to the rules of `NodeTransformer`, the mutation method can return* a new node or a list of nodes, replacing the current node;* `None`, deleting it; or* the node itself, keeping things as they are.
import re RE_SPACE = re.compile(r'[ \t\n]+') class StatementMutator(StatementMutator): def choose_op(self) -> Callable: return random.choice([self.insert, self.swap, self.delete]) def visit(self, node: ast.AST) -> ast.AST: super().visit(node) # Visits (and transforms?) children if not node.mutate_me: # type: ignore return node op = self.choose_op() new_node = op(node) self.mutations += 1 if self.log: print(f"{node.lineno:4}:{op.__name__ + ':':7} " f"{self.format_node(node)} " f"becomes {self.format_node(new_node)}") return new_node
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Swapping StatementsOur first mutator is `swap()`, which replaces the current node `NODE` by a random node found in `source` (using a newly defined `choose_statement()`).As a rule of thumb, we try to avoid inserting entire subtrees with all attached statements; and try to respect only the first line of a node. If the new node has the form ```pythonif P: BODY```we thus only insert ```pythonif P: pass```since the statements in `BODY` have a later chance to get inserted. The same holds for all constructs that have a `BODY`, i.e. `while`, `for`, `try`, `with`, and more.
class StatementMutator(StatementMutator): def choose_statement(self) -> ast.AST: return copy.deepcopy(random.choice(self.source)) class StatementMutator(StatementMutator): def swap(self, node: ast.AST) -> ast.AST: """Replace `node` with a random node from `source`""" new_node = self.choose_statement() if isinstance(new_node, ast.stmt): # The source `if P: X` is added as `if P: pass` if hasattr(new_node, 'body'): new_node.body = [ast.Pass()] # type: ignore if hasattr(new_node, 'orelse'): new_node.orelse = [] # type: ignore if hasattr(new_node, 'finalbody'): new_node.finalbody = [] # type: ignore # ast.copy_location(new_node, node) return new_node
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Inserting StatementsOur next mutator is `insert()`, which randomly chooses some node from `source` and inserts it after the current node `NODE`. (If `NODE` is a `return` statement, then we insert the new node _before_ `NODE`.)If the statement to be inserted has the form```pythonif P: BODY```we only insert the "header" of the `if`, resulting in```pythonif P: NODE```Again, this applies to all constructs that have a `BODY`, i.e., `while`, `for`, `try`, `with`, and more.
class StatementMutator(StatementMutator): def insert(self, node: ast.AST) -> Union[ast.AST, List[ast.AST]]: """Insert a random node from `source` after `node`""" new_node = self.choose_statement() if isinstance(new_node, ast.stmt) and hasattr(new_node, 'body'): # Inserting `if P: X` as `if P:` new_node.body = [node] # type: ignore if hasattr(new_node, 'orelse'): new_node.orelse = [] # type: ignore if hasattr(new_node, 'finalbody'): new_node.finalbody = [] # type: ignore # ast.copy_location(new_node, node) return new_node # Only insert before `return`, not after it if isinstance(node, ast.Return): if isinstance(new_node, ast.Return): return new_node else: return [new_node, node] return [node, new_node]
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Deleting StatementsOur last mutator is `delete()`, which deletes the current node `NODE`. The standard case is to replace `NODE` by a `pass` statement.If the statement to be deleted has the form```pythonif P: BODY```we only delete the "header" of the `if`, resulting in```pythonBODY```Again, this applies to all constructs that have a `BODY`, i.e., `while`, `for`, `try`, `with`, and more. If the statement to be deleted has multiple branches, a random branch is chosen (e.g., the `else` branch of an `if` statement).
class StatementMutator(StatementMutator): def delete(self, node: ast.AST) -> None: """Delete `node`.""" branches = [attr for attr in ['body', 'orelse', 'finalbody'] if hasattr(node, attr) and getattr(node, attr)] if branches: # Replace `if P: S` by `S` branch = random.choice(branches) new_node = getattr(node, branch) return new_node if isinstance(node, ast.stmt): # Avoid empty bodies; make this a `pass` statement new_node = ast.Pass() ast.copy_location(new_node, node) return new_node return None # Just delete from bookutils import quiz quiz("Why are statements replaced by `pass` rather than deleted?", [ "Because `if P: pass` is valid Python, while `if P:` is not", "Because in Python, bodies for `if`, `while`, etc. cannot be empty", "Because a `pass` node makes a target for future mutations", "Because it causes the tests to pass" ], '[3 ^ n for n in range(3)]')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Indeed, Python's `compile()` will fail if any of the bodies is an empty list. Also, it leaves us a statement that can be evolved further. HelpersFor logging purposes, we introduce a helper function `format_node()` that returns a short string representation of the node.
class StatementMutator(StatementMutator): NODE_MAX_LENGTH = 20 def format_node(self, node: ast.AST) -> str: """Return a string representation for `node`.""" if node is None: return "None" if isinstance(node, list): return "; ".join(self.format_node(elem) for elem in node) s = RE_SPACE.sub(' ', astor.to_source(node)).strip() if len(s) > self.NODE_MAX_LENGTH - len("..."): s = s[:self.NODE_MAX_LENGTH] + "..." return repr(s)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
All TogetherLet us now create the main entry point, which is `mutate()`. It picks the node to be mutated and marks it with a `mutate_me` attribute. By calling `visit()`, it then sets off the `NodeTransformer` transformation.
class StatementMutator(StatementMutator): def mutate(self, tree: ast.AST) -> ast.AST: """Mutate the given AST `tree` in place. Return mutated tree.""" assert isinstance(tree, ast.AST) tree = copy.deepcopy(tree) if not self.source: self.source = all_statements(tree) for node in ast.walk(tree): node.mutate_me = False # type: ignore node = self.node_to_be_mutated(tree) node.mutate_me = True # type: ignore self.mutations = 0 tree = self.visit(tree) if self.mutations == 0: warnings.warn("No mutations found") ast.fix_missing_locations(tree) return tree
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here are a number of transformations applied by `StatementMutator`:
mutator = StatementMutator(log=True) for i in range(10): new_tree = mutator.mutate(middle_tree())
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
This is the effect of the last mutator applied on `middle`:
print_content(astor.to_source(new_tree), '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
FitnessNow that we can apply random mutations to code, let us find out how good these mutations are. Given our test suites for `middle`, we can check for a given code candidate how many of the previously passing test cases it passes, and how many of the failing test cases it passes. The more tests pass, the higher the _fitness_ of the candidate. Not all passing tests have the same value, though. We want to prevent _regressions_ – that is, having a fix that breaks a previously passing test. The values of `WEIGHT_PASSING` and `WEIGHT_FAILING` set the relative weight (or importance) of passing vs. failing tests; we see that keeping passing tests passing is far more important then fixing failing tests.
WEIGHT_PASSING = 0.99 WEIGHT_FAILING = 0.01 def middle_fitness(tree: ast.AST) -> float: """Compute fitness of a `middle()` candidate given in `tree`""" original_middle = middle try: code = compile(tree, '<fitness>', 'exec') except ValueError: return 0 # Compilation error exec(code, globals()) passing_passed = 0 failing_passed = 0 # Test how many of the passing runs pass for x, y, z in MIDDLE_PASSING_TESTCASES: try: middle_test(x, y, z) passing_passed += 1 except AssertionError: pass passing_ratio = passing_passed / len(MIDDLE_PASSING_TESTCASES) # Test how many of the failing runs pass for x, y, z in MIDDLE_FAILING_TESTCASES: try: middle_test(x, y, z) failing_passed += 1 except AssertionError: pass failing_ratio = failing_passed / len(MIDDLE_FAILING_TESTCASES) fitness = (WEIGHT_PASSING * passing_ratio + WEIGHT_FAILING * failing_ratio) globals()['middle'] = original_middle return fitness
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Our faulty `middle()` program has a fitness of `WEIGHT_PASSING` (99%), because it passes all the passing tests (but none of the failing ones).
middle_fitness(middle_tree())
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Our "sort of fixed" version of `middle()` gets a much lower fitness:
middle_fitness(ast.parse("def middle(x, y, z): return x"))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
In the [chapter on statistical debugging](StatisticalDebugger), we also defined a fixed version of `middle()`. This gets a fitness of 1.0, passing all tests. (We won't use this fixed version for automated repairs.)
from StatisticalDebugger import middle_fixed middle_fixed_source = \ inspect.getsource(middle_fixed).replace('middle_fixed', 'middle').strip() middle_fitness(ast.parse(middle_fixed_source))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
PopulationWe now set up a _population_ of fix candidates to evolve over time. A higher population size will yield more candidates to check, but also need more time to test; a lower population size will yield fewer candidates, but allow for more evolution steps. We choose a population size of 40 (from \cite{LeGoues2012}).
POPULATION_SIZE = 40 middle_mutator = StatementMutator() MIDDLE_POPULATION = [middle_tree()] + \ [middle_mutator.mutate(middle_tree()) for i in range(POPULATION_SIZE - 1)]
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We sort the fix candidates according to their fitness. This actually runs all tests on all candidates.
MIDDLE_POPULATION.sort(key=middle_fitness, reverse=True)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The candidate with the highest fitness is still our original (faulty) `middle()` code:
print(astor.to_source(MIDDLE_POPULATION[0]), middle_fitness(MIDDLE_POPULATION[0]))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
At the other end of the spectrum, the candidate with the lowest fitness has some vital functionality removed:
print(astor.to_source(MIDDLE_POPULATION[-1]), middle_fitness(MIDDLE_POPULATION[-1]))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
EvolutionTo evolve our population of candidates, we fill up the population with mutations created from the population, using a `StatementMutator` as described above to create these mutations. Then we reduce the population to its original size, keeping the fittest candidates.
def evolve_middle() -> None: global MIDDLE_POPULATION source = all_statements(middle_tree()) mutator = StatementMutator(source=source) n = len(MIDDLE_POPULATION) offspring: List[ast.AST] = [] while len(offspring) < n: parent = random.choice(MIDDLE_POPULATION) offspring.append(mutator.mutate(parent)) MIDDLE_POPULATION += offspring MIDDLE_POPULATION.sort(key=middle_fitness, reverse=True) MIDDLE_POPULATION = MIDDLE_POPULATION[:n]
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
This is what happens when evolving our population for the first time; the original source is still our best candidate.
evolve_middle() tree = MIDDLE_POPULATION[0] print(astor.to_source(tree), middle_fitness(tree)) # docassert assert middle_fitness(tree) < 1.0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
However, nothing keeps us from evolving for a few generations more...
for i in range(50): evolve_middle() best_middle_tree = MIDDLE_POPULATION[0] fitness = middle_fitness(best_middle_tree) print(f"\rIteration {i:2}: fitness = {fitness} ", end="") if fitness >= 1.0: break # docassert assert middle_fitness(best_middle_tree) >= 1.0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Success! We find a candidate that actually passes all tests, including the failing ones. Here is the candidate:
print_content(astor.to_source(best_middle_tree), '.py', start_line_number=1)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
... and yes, it passes all tests:
original_middle = middle code = compile(best_middle_tree, '<string>', 'exec') exec(code, globals()) for x, y, z in MIDDLE_PASSING_TESTCASES + MIDDLE_FAILING_TESTCASES: middle_test(x, y, z) middle = original_middle
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
As the code is already validated by hundreds of test cases, it is very valuable for the programmer. Even if the programmer decides not to use the code as is, the location gives very strong hints on which code to examine and where to apply a fix. However, a closer look at our fix candidate shows that there is some amount of redundancy – that is, superfluous statements.
quiz("Some of the lines in our fix candidate are redundant. " "Which are these?", [ "Line 3: `if x < y:`", "Line 4: `if x < z:`", "Line 5: `return y`", "Line 13: `return z`" ], '[eval(chr(100 - x)) for x in [48, 50]]')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Simplifying As demonstrated in the chapter on [reducing failure-inducing inputs](DeltaDebugger.ipynb), we can use delta debugging on code to get rid of these superfluous statements. The trick for simplification is to have the test function (`test_middle_lines()`) declare a fitness of 1.0 as a "failure". Delta debugging will then simplify the input as long as the "failure" (and hence the maximum fitness obtained) persists.
from DeltaDebugger import DeltaDebugger middle_lines = astor.to_source(best_middle_tree).strip().split('\n') def test_middle_lines(lines: List[str]) -> None: source = "\n".join(lines) tree = ast.parse(source) assert middle_fitness(tree) < 1.0 # "Fail" only while fitness is 1.0 with DeltaDebugger() as dd: test_middle_lines(middle_lines) reduced_lines = dd.min_args()['lines'] reduced_source = "\n".join(reduced_lines) repaired_source = astor.to_source(ast.parse(reduced_source)) # normalize print_content(repaired_source, '.py') # docassert assert len(reduced_lines) < len(middle_lines)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Success! Delta Debugging has eliminated the superfluous statements. We can present the difference to the original as a patch:
original_source = astor.to_source(ast.parse(middle_source)) # normalize from ChangeDebugger import diff, print_patch # minor dependency for patch in diff(original_source, repaired_source): print_patch(patch)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We can present this patch to the programmer, who will then immediately know what to fix in the `middle()` code. CrossoverSo far, we have only applied one kind of genetic operators – mutation. There is a second one, though, also inspired by natural selection. The *crossover* operation mutates two strands of genes, as illustrated in the following picture. We have two parents (red and blue), each as a sequence of genes. To create "crossed" children, we pick a _crossover point_ and exchange the strands at this very point:![](https://upload.wikimedia.org/wikipedia/commons/thumb/5/56/OnePointCrossover.svg/500px-OnePointCrossover.svg.png) We implement a `CrossoverOperator` class that implements such an operation on two randomly chosen statement lists of two programs. It is used as```pythoncrossover = CrossoverOperator()crossover.crossover(tree_p1, tree_p2)```where `tree_p1` and `tree_p2` are two ASTs that are changed in place. Excursion: Implementing Crossover Crossing Statement Lists Applied on programs, a crossover mutation takes two parents and "crosses" a list of statements. As an example, if our "parents" `p1()` and `p2()` are defined as follows:
def p1(): # type: ignore a = 1 b = 2 c = 3 def p2(): # type: ignore x = 1 y = 2 z = 3
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Then a crossover operation would produce one child with a body```pythona = 1y = 2z = 3```and another child with a body```pythonx = 1b = 2c = 3``` We can easily implement this in a `CrossoverOperator` class in a method `cross_bodies()`.
class CrossoverOperator: """A class for performing statement crossover of Python programs""" def __init__(self, log: bool = False): """Constructor. If `log` is set, turn on logging.""" self.log = log def cross_bodies(self, body_1: List[ast.AST], body_2: List[ast.AST]) -> \ Tuple[List[ast.AST], List[ast.AST]]: """Crossover the statement lists `body_1` x `body_2`. Return new lists.""" assert isinstance(body_1, list) assert isinstance(body_2, list) crossover_point_1 = len(body_1) // 2 crossover_point_2 = len(body_2) // 2 return (body_1[:crossover_point_1] + body_2[crossover_point_2:], body_2[:crossover_point_2] + body_1[crossover_point_1:])
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here's the `CrossoverOperatorMutator` applied on `p1` and `p2`:
tree_p1: ast.Module = ast.parse(inspect.getsource(p1)) tree_p2: ast.Module = ast.parse(inspect.getsource(p2)) body_p1 = tree_p1.body[0].body # type: ignore body_p2 = tree_p2.body[0].body # type: ignore body_p1 crosser = CrossoverOperator() tree_p1.body[0].body, tree_p2.body[0].body = crosser.cross_bodies(body_p1, body_p2) # type: ignore print_content(astor.to_source(tree_p1), '.py') print_content(astor.to_source(tree_p2), '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Applying Crossover on ProgramsApplying the crossover operation on arbitrary programs is a bit more complex, though. We first have to _find_ lists of statements that we actually can cross over. The `can_cross()` method returns True if we have a list of statements that we can cross. Python modules and classes are excluded, because changing the ordering of definitions will not have much impact on the program functionality, other than introducing errors due to dependencies.
class CrossoverOperator(CrossoverOperator): # In modules and class defs, the ordering of elements does not matter (much) SKIP_LIST = {ast.Module, ast.ClassDef} def can_cross(self, tree: ast.AST, body_attr: str = 'body') -> bool: if any(isinstance(tree, cls) for cls in self.SKIP_LIST): return False body = getattr(tree, body_attr, []) return body and len(body) >= 2
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here comes our method `crossover_attr()` which searches for crossover possibilities. It takes two ASTs `t1` and `t2` and an attribute (typically `'body'`) and retrieves the attribute lists $l_1$ (from `t1.`) and $l_2$ (from `t2.`).If $l_1$ and $l_2$ can be crossed, it crosses them, and is done. Otherwise* If there is a pair of elements $e_1 \in l_1$ and $e_2 \in l_2$ that has the same name – say, functions of the same name –, it applies itself to $e_1$ and $e_2$.* Otherwise, it creates random pairs of elements $e_1 \in l_1$ and $e_2 \in l_2$ and applies itself on these very pairs.`crossover_attr()` changes `t1` and `t2` in place and returns True if a crossover was found; it returns False otherwise.
class CrossoverOperator(CrossoverOperator): def crossover_attr(self, t1: ast.AST, t2: ast.AST, body_attr: str) -> bool: """ Crossover the bodies `body_attr` of two trees `t1` and `t2`. Return True if successful. """ assert isinstance(t1, ast.AST) assert isinstance(t2, ast.AST) assert isinstance(body_attr, str) if not getattr(t1, body_attr, None) or not getattr(t2, body_attr, None): return False if self.crossover_branches(t1, t2): return True if self.log > 1: print(f"Checking {t1}.{body_attr} x {t2}.{body_attr}") body_1 = getattr(t1, body_attr) body_2 = getattr(t2, body_attr) # If both trees have the attribute, we can cross their bodies if self.can_cross(t1, body_attr) and self.can_cross(t2, body_attr): if self.log: print(f"Crossing {t1}.{body_attr} x {t2}.{body_attr}") new_body_1, new_body_2 = self.cross_bodies(body_1, body_2) setattr(t1, body_attr, new_body_1) setattr(t2, body_attr, new_body_2) return True # Strategy 1: Find matches in class/function of same name for child_1 in body_1: if hasattr(child_1, 'name'): for child_2 in body_2: if (hasattr(child_2, 'name') and child_1.name == child_2.name): if self.crossover_attr(child_1, child_2, body_attr): return True # Strategy 2: Find matches anywhere for child_1 in random.sample(body_1, len(body_1)): for child_2 in random.sample(body_2, len(body_2)): if self.crossover_attr(child_1, child_2, body_attr): return True return False
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We have a special case for `if` nodes, where we can cross their body and `else` branches. (In Python, `for` and `while` also have `else` branches, but swapping these with loop bodies is likely to create havoc.)
class CrossoverOperator(CrossoverOperator): def crossover_branches(self, t1: ast.AST, t2: ast.AST) -> bool: """Special case: `t1` = `if P: S1 else: S2` x `t2` = `if P': S1' else: S2'` becomes `t1` = `if P: S2' else: S1'` and `t2` = `if P': S2 else: S1` Returns True if successful. """ assert isinstance(t1, ast.AST) assert isinstance(t2, ast.AST) if (hasattr(t1, 'body') and hasattr(t1, 'orelse') and hasattr(t2, 'body') and hasattr(t2, 'orelse')): t1 = cast(ast.If, t1) # keep mypy happy t2 = cast(ast.If, t2) if self.log: print(f"Crossing branches {t1} x {t2}") t1.body, t1.orelse, t2.body, t2.orelse = \ t2.orelse, t2.body, t1.orelse, t1.body return True return False
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The method `crossover()` is the main entry point. It checks for the special `if` case as described above; if not, it searches for possible crossover points. It raises `CrossoverError` if not successful.
class CrossoverOperator(CrossoverOperator): def crossover(self, t1: ast.AST, t2: ast.AST) -> Tuple[ast.AST, ast.AST]: """Do a crossover of ASTs `t1` and `t2`. Raises `CrossoverError` if no crossover is found.""" assert isinstance(t1, ast.AST) assert isinstance(t2, ast.AST) for body_attr in ['body', 'orelse', 'finalbody']: if self.crossover_attr(t1, t2, body_attr): return t1, t2 raise CrossoverError("No crossover found") class CrossoverError(ValueError): pass
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
End of Excursion Crossover in Action Let us put our `CrossoverOperator` in action. Here is a test case for crossover, involving more deeply nested structures:
def p1(): # type: ignore if True: print(1) print(2) print(3) def p2(): # type: ignore if True: print(a) print(b) else: print(c) print(d)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We invoke the `crossover()` method with two ASTs from `p1` and `p2`:
crossover = CrossoverOperator() tree_p1 = ast.parse(inspect.getsource(p1)) tree_p2 = ast.parse(inspect.getsource(p2)) crossover.crossover(tree_p1, tree_p2);
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here is the crossed offspring, mixing statement lists of `p1` and `p2`:
print_content(astor.to_source(tree_p1), '.py') print_content(astor.to_source(tree_p2), '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here is our special case for `if` nodes in action, crossing our `middle()` tree with `p2`.
middle_t1, middle_t2 = crossover.crossover(middle_tree(), ast.parse(inspect.getsource(p2)))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We see how the resulting offspring encompasses elements of both sources:
print_content(astor.to_source(middle_t1), '.py') print_content(astor.to_source(middle_t2), '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
A Repairer ClassSo far, we have applied all our techniques on the `middle()` program only. Let us now create a `Repairer` class that applies automatic program repair on arbitrary Python programs. The idea is that you can apply it on some statistical debugger, for which you have gathered passing and failing test cases, and then invoke its `repair()` method to find a "best" fix candidate:```pythondebugger = OchiaiDebugger()with debugger: with debugger: ...repairer = Repairer(debugger)repairer.repair()``` Excursion: Implementing Repairer The main argument to the `Repairer` constructor is the `debugger` to get information from. On top of that, it also allows to customize the classes used for mutation, crossover, and reduction. Setting `targets` allows to define a set of functions to repair; setting `sources` allows to set a set of sources to take repairs from. The constructor then sets up the environment for running tests and repairing, as described below.
from StackInspector import StackInspector # minor dependency class Repairer(StackInspector): """A class for automatic repair of Python programs""" def __init__(self, debugger: RankingDebugger, *, targets: Optional[List[Any]] = None, sources: Optional[List[Any]] = None, log: Union[bool, int] = False, mutator_class: Type = StatementMutator, crossover_class: Type = CrossoverOperator, reducer_class: Type = DeltaDebugger, globals: Optional[Dict[str, Any]] = None): """Constructor. `debugger`: a `RankingDebugger` to take tests and coverage from. `targets`: a list of functions/modules to be repaired. (default: the covered functions in `debugger`, except tests) `sources`: a list of functions/modules to take repairs from. (default: same as `targets`) `globals`: if given, a `globals()` dict for executing targets (default: `globals()` of caller)""" assert isinstance(debugger, RankingDebugger) self.debugger = debugger self.log = log if targets is None: targets = self.default_functions() if not targets: raise ValueError("No targets to repair") if sources is None: sources = self.default_functions() if not sources: raise ValueError("No sources to take repairs from") if self.debugger.function() is None: raise ValueError("Multiple entry points observed") self.target_tree: ast.AST = self.parse(targets) self.source_tree: ast.AST = self.parse(sources) self.log_tree("Target code to be repaired:", self.target_tree) if ast.dump(self.target_tree) != ast.dump(self.source_tree): self.log_tree("Source code to take repairs from:", self.source_tree) self.fitness_cache: Dict[str, float] = {} self.mutator: StatementMutator = \ mutator_class( source=all_statements(self.source_tree), suspiciousness_func=self.debugger.suspiciousness, log=(self.log >= 3)) self.crossover: CrossoverOperator = crossover_class(log=(self.log >= 3)) self.reducer: DeltaDebugger = reducer_class(log=(self.log >= 3)) if globals is None: globals = self.caller_globals() # see below self.globals = globals
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
When we access or execute functions, we do so in the caller's environment, not ours. The `caller_globals()` method from `StackInspector` acts as replacement for `globals()`. Helper FunctionsThe constructor uses a number of helper functions to create its environment.
class Repairer(Repairer): def getsource(self, item: Union[str, Any]) -> str: """Get the source for `item`. Can also be a string.""" if isinstance(item, str): item = self.globals[item] return inspect.getsource(item) class Repairer(Repairer): def default_functions(self) -> List[Callable]: """Return the set of functions to be repaired. Functions whose names start or end in `test` are excluded.""" def is_test(name: str) -> bool: return name.startswith('test') or name.endswith('test') return [func for func in self.debugger.covered_functions() if not is_test(func.__name__)] class Repairer(Repairer): def log_tree(self, description: str, tree: Any) -> None: """Print out `tree` as source code prefixed by `description`.""" if self.log: print(description) print_content(astor.to_source(tree), '.py') print() print() class Repairer(Repairer): def parse(self, items: List[Any]) -> ast.AST: """Read in a list of items into a single tree""" tree = ast.parse("") for item in items: if isinstance(item, str): item = self.globals[item] item_lines, item_first_lineno = inspect.getsourcelines(item) try: item_tree = ast.parse("".join(item_lines)) except IndentationError: # inner function or likewise warnings.warn(f"Can't parse {item.__name__}") continue ast.increment_lineno(item_tree, item_first_lineno - 1) tree.body += item_tree.body return tree
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Running TestsNow that we have set the environment for `Repairer`, we can implement one step of automatic repair after the other. The method `run_test_set()` runs the given `test_set` (`DifferenceDebugger.PASS` or `DifferenceDebugger.FAIL`), returning the number of passed tests. If `validate` is set, it checks whether the outcomes are as expected.
class Repairer(Repairer): def run_test_set(self, test_set: str, validate: bool = False) -> int: """ Run given `test_set` (`DifferenceDebugger.PASS` or `DifferenceDebugger.FAIL`). If `validate` is set, check expectations. Return number of passed tests. """ passed = 0 collectors = self.debugger.collectors[test_set] function = self.debugger.function() assert function is not None # FIXME: function may have been redefined for c in collectors: if self.log >= 4: print(f"Testing {c.id()}...", end="") try: function(**c.args()) except Exception as err: if self.log >= 4: print(f"failed ({err.__class__.__name__})") if validate and test_set == self.debugger.PASS: raise err.__class__( f"{c.id()} should have passed, but failed") continue passed += 1 if self.log >= 4: print("passed") if validate and test_set == self.debugger.FAIL: raise FailureNotReproducedError( f"{c.id()} should have failed, but passed") return passed class FailureNotReproducedError(ValueError): pass
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here is how we use `run_tests_set()`:
repairer = Repairer(middle_debugger) assert repairer.run_test_set(middle_debugger.PASS) == \ len(MIDDLE_PASSING_TESTCASES) assert repairer.run_test_set(middle_debugger.FAIL) == 0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The method `run_tests()` runs passing and failing tests, weighing the passed test cases to obtain the overall fitness.
class Repairer(Repairer): def weight(self, test_set: str) -> float: """ Return the weight of `test_set` (`DifferenceDebugger.PASS` or `DifferenceDebugger.FAIL`). """ return { self.debugger.PASS: WEIGHT_PASSING, self.debugger.FAIL: WEIGHT_FAILING }[test_set] def run_tests(self, validate: bool = False) -> float: """Run passing and failing tests, returning weighted fitness.""" fitness = 0.0 for test_set in [self.debugger.PASS, self.debugger.FAIL]: passed = self.run_test_set(test_set, validate=validate) ratio = passed / len(self.debugger.collectors[test_set]) fitness += self.weight(test_set) * ratio return fitness
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The method `validate()` ensures the observed tests can be adequately reproduced.
class Repairer(Repairer): def validate(self) -> None: fitness = self.run_tests(validate=True) assert fitness == self.weight(self.debugger.PASS) repairer = Repairer(middle_debugger) repairer.validate()
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
(Re)defining FunctionsOur `run_tests()` methods above do not yet redefine the function to be repaired. This is done by the `fitness()` function, which compiles and defines the given repair candidate `tree` before testing it. It caches and returns the fitness.
class Repairer(Repairer): def fitness(self, tree: ast.AST) -> float: """Test `tree`, returning its fitness""" key = cast(str, ast.dump(tree)) if key in self.fitness_cache: return self.fitness_cache[key] # Save defs original_defs: Dict[str, Any] = {} for name in self.toplevel_defs(tree): if name in self.globals: original_defs[name] = self.globals[name] else: warnings.warn(f"Couldn't find definition of {repr(name)}") assert original_defs, f"Couldn't find any definition" if self.log >= 3: print("Repair candidate:") print_content(astor.to_source(tree), '.py') print() # Create new definition try: code = compile(tree, '<Repairer>', 'exec') except ValueError: # Compilation error code = None if code is None: if self.log >= 3: print(f"Fitness = 0.0 (compilation error)") fitness = 0.0 return fitness # Execute new code, defining new functions in `self.globals` exec(code, self.globals) # Set new definitions in the namespace (`__globals__`) # of the function we will be calling. function = self.debugger.function() assert function is not None assert hasattr(function, '__globals__') for name in original_defs: function.__globals__[name] = self.globals[name] # type: ignore fitness = self.run_tests(validate=False) # Restore definitions for name in original_defs: function.__globals__[name] = original_defs[name] # type: ignore self.globals[name] = original_defs[name] if self.log >= 3: print(f"Fitness = {fitness}") self.fitness_cache[key] = fitness return fitness
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The helper function `toplevel_defs()` helps saving and restoring the environment before and after redefining the function under repair.
class Repairer(Repairer): def toplevel_defs(self, tree: ast.AST) -> List[str]: """Return a list of names of defined functions and classes in `tree`""" visitor = DefinitionVisitor() visitor.visit(tree) assert hasattr(visitor, 'definitions') return visitor.definitions class DefinitionVisitor(NodeVisitor): def __init__(self) -> None: self.definitions: List[str] = [] def add_definition(self, node: Union[ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef]) -> None: self.definitions.append(node.name) def visit_FunctionDef(self, node: ast.FunctionDef) -> None: self.add_definition(node) def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None: self.add_definition(node) def visit_ClassDef(self, node: ast.ClassDef) -> None: self.add_definition(node)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here's an example for `fitness()`:
repairer = Repairer(middle_debugger, log=1) good_fitness = repairer.fitness(middle_tree()) good_fitness # docassert assert good_fitness >= 0.99, "fitness() failed" bad_middle_tree = ast.parse("def middle(x, y, z): return x") bad_fitness = repairer.fitness(bad_middle_tree) bad_fitness # docassert assert bad_fitness < 0.5, "fitness() failed"
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
RepairingNow for the actual `repair()` method, which creates a `population` and then evolves it until the fitness is 1.0 or the given number of iterations is spent.
import traceback class Repairer(Repairer): def initial_population(self, size: int) -> List[ast.AST]: """Return an initial population of size `size`""" return [self.target_tree] + \ [self.mutator.mutate(copy.deepcopy(self.target_tree)) for i in range(size - 1)] def repair(self, population_size: int = POPULATION_SIZE, iterations: int = 100) -> \ Tuple[ast.AST, float]: """ Repair the function we collected test runs from. Use a population size of `population_size` and at most `iterations` iterations. Returns a pair (`ast`, `fitness`) where `ast` is the AST of the repaired function, and `fitness` is its fitness (between 0 and 1.0) """ self.validate() population = self.initial_population(population_size) last_key = ast.dump(self.target_tree) for iteration in range(iterations): population = self.evolve(population) best_tree = population[0] fitness = self.fitness(best_tree) if self.log: print(f"Evolving population: " f"iteration{iteration:4}/{iterations} " f"fitness = {fitness:.5} \r", end="") if self.log >= 2: best_key = ast.dump(best_tree) if best_key != last_key: print() print() self.log_tree(f"New best code (fitness = {fitness}):", best_tree) last_key = best_key if fitness >= 1.0: break if self.log: print() if self.log and self.log < 2: self.log_tree(f"Best code (fitness = {fitness}):", best_tree) best_tree = self.reduce(best_tree) fitness = self.fitness(best_tree) self.log_tree(f"Reduced code (fitness = {fitness}):", best_tree) return best_tree, fitness
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
EvolvingThe evolution of our population takes place in the `evolve()` method. In contrast to the `evolve_middle()` function, above, we use crossover to create the offspring, which we still mutate afterwards.
class Repairer(Repairer): def evolve(self, population: List[ast.AST]) -> List[ast.AST]: """Evolve the candidate population by mutating and crossover.""" n = len(population) # Create offspring as crossover of parents offspring: List[ast.AST] = [] while len(offspring) < n: parent_1 = copy.deepcopy(random.choice(population)) parent_2 = copy.deepcopy(random.choice(population)) try: self.crossover.crossover(parent_1, parent_2) except CrossoverError: pass # Just keep parents offspring += [parent_1, parent_2] # Mutate offspring offspring = [self.mutator.mutate(tree) for tree in offspring] # Add it to population population += offspring # Keep the fitter part of the population population.sort(key=self.fitness_key, reverse=True) population = population[:n] return population
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
A second difference is that we not only sort by fitness, but also by tree size – with equal fitness, a smaller tree thus will be favored. This helps keeping fixes and patches small.
class Repairer(Repairer): def fitness_key(self, tree: ast.AST) -> Tuple[float, int]: """Key to be used for sorting the population""" tree_size = len([node for node in ast.walk(tree)]) return (self.fitness(tree), -tree_size)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
SimplifyingThe last step in repairing is simplifying the code. As demonstrated in the chapter on [reducing failure-inducing inputs](DeltaDebugger.ipynb), we can use delta debugging on code to get rid of superfluous statements. To this end, we convert the tree to lines, run delta debugging on them, and then convert it back to a tree.
class Repairer(Repairer): def reduce(self, tree: ast.AST) -> ast.AST: """Simplify `tree` using delta debugging.""" original_fitness = self.fitness(tree) source_lines = astor.to_source(tree).split('\n') with self.reducer: self.test_reduce(source_lines, original_fitness) reduced_lines = self.reducer.min_args()['source_lines'] reduced_source = "\n".join(reduced_lines) return ast.parse(reduced_source)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
As dicussed above, we simplify the code by having the test function (`test_reduce()`) declare reaching the maximum fitness obtained so far as a "failure". Delta debugging will then simplify the input as long as the "failure" (and hence the maximum fitness obtained) persists.
class Repairer(Repairer): def test_reduce(self, source_lines: List[str], original_fitness: float) -> None: """Test function for delta debugging.""" try: source = "\n".join(source_lines) tree = ast.parse(source) fitness = self.fitness(tree) assert fitness < original_fitness except AssertionError: raise except SyntaxError: raise except IndentationError: raise except Exception: # traceback.print_exc() # Uncomment to see internal errors raise
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
End of Excursion Repairer in ActionLet us go and apply `Repairer` in practice. We initialize it with `middle_debugger`, which has (still) collected the passing and failing runs for `middle_test()`. We also set `log` for some diagnostics along the way.
repairer = Repairer(middle_debugger, log=True)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We now invoke `repair()` to evolve our population. After a few iterations, we find a best tree with perfect fitness.
best_tree, fitness = repairer.repair() print_content(astor.to_source(best_tree), '.py') fitness # docassert assert fitness >= 1.0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Again, we have a perfect solution. Here, we did not even need to simplify the code in the last iteration, as our `fitness_key()` function favors smaller implementations. Removing HTML MarkupLet us apply `Repairer` on our other ongoing example, namely `remove_html_markup()`.
def remove_html_markup(s): # type: ignore tag = False quote = False out = "" for c in s: if c == '<' and not quote: tag = True elif c == '>' and not quote: tag = False elif c == '"' or c == "'" and tag: quote = not quote elif not tag: out = out + c return out def remove_html_markup_tree() -> ast.AST: return ast.parse(inspect.getsource(remove_html_markup))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
To run `Repairer` on `remove_html_markup()`, we need a test and a test suite. `remove_html_markup_test()` raises an exception if applying `remove_html_markup()` on the given `html` string does not yield the `plain` string.
def remove_html_markup_test(html: str, plain: str) -> None: outcome = remove_html_markup(html) assert outcome == plain, \ f"Got {repr(outcome)}, expected {repr(plain)}"
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Now for the test suite. We use a simple fuzzing scheme to create dozens of passing and failing test cases in `REMOVE_HTML_PASSING_TESTCASES` and `REMOVE_HTML_FAILING_TESTCASES`, respectively. Excursion: Creating HTML Test Cases
def random_string(length: int = 5, start: int = ord(' '), end: int = ord('~')) -> str: return "".join(chr(random.randrange(start, end + 1)) for i in range(length)) random_string() def random_id(length: int = 2) -> str: return random_string(start=ord('a'), end=ord('z')) random_id() def random_plain() -> str: return random_string().replace('<', '').replace('>', '') def random_string_noquotes() -> str: return random_string().replace('"', '').replace("'", '') def random_html(depth: int = 0) -> Tuple[str, str]: prefix = random_plain() tag = random_id() if depth > 0: html, plain = random_html(depth - 1) else: html = plain = random_plain() attr = random_id() value = '"' + random_string_noquotes() + '"' postfix = random_plain() return f'{prefix}<{tag} {attr}={value}>{html}</{tag}>{postfix}', \ prefix + plain + postfix random_html() def remove_html_testcase(expected: bool = True) -> Tuple[str, str]: while True: html, plain = random_html() outcome = (remove_html_markup(html) == plain) if outcome == expected: return html, plain REMOVE_HTML_TESTS = 100 REMOVE_HTML_PASSING_TESTCASES = \ [remove_html_testcase(True) for i in range(REMOVE_HTML_TESTS)] REMOVE_HTML_FAILING_TESTCASES = \ [remove_html_testcase(False) for i in range(REMOVE_HTML_TESTS)]
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
End of Excursion Here is a passing test case:
REMOVE_HTML_PASSING_TESTCASES[0] html, plain = REMOVE_HTML_PASSING_TESTCASES[0] remove_html_markup_test(html, plain)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here is a failing test case (containing a double quote in the plain text)
REMOVE_HTML_FAILING_TESTCASES[0] with ExpectError(): html, plain = REMOVE_HTML_FAILING_TESTCASES[0] remove_html_markup_test(html, plain)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We run our tests, collecting the outcomes in `html_debugger`.
html_debugger = OchiaiDebugger() for html, plain in (REMOVE_HTML_PASSING_TESTCASES + REMOVE_HTML_FAILING_TESTCASES): with html_debugger: remove_html_markup_test(html, plain)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The suspiciousness distribution will not be of much help here – pretty much all lines in `remove_html_markup()` have the same suspiciousness.
html_debugger
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Let us create our repairer and run it.
html_repairer = Repairer(html_debugger, log=True) best_tree, fitness = html_repairer.repair(iterations=20) # docassert assert fitness < 1.0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We see that the "best" code is still our original code, with no changes. And we can set `iterations` to 50, 100, 200... – our `Repairer` won't be able to repair it.
quiz("Why couldn't `Repairer()` repair `remove_html_markup()`?", [ "The population is too small!", "The suspiciousness is too evenly distributed!", "We need more test cases!", "We need more iterations!", "There is no statement in the source with a correct condition!", "The population is too big!", ], '5242880 >> 20')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
You can explore all of the hypotheses above by changing the appropriate parameters, but you won't be able to change the outcome. The problem is that, unlike `middle()`, there is no statement (or combination thereof) in `remove_html_markup()` that could be used to make the failure go away. For this, we need to mutate another aspect of the code, which we will explore in the next section. Mutating ConditionsThe `Repairer` class is very configurable. The individual steps in automated repair can all be replaced by providing own classes in the keyword arguments of its `__init__()` constructor:* To change fault localization, pass a different `debugger` that is a subclass of `RankingDebugger`.* To change the mutation operator, set `mutator_class` to a subclass of `StatementMutator`.* To change the crossover operator, set `crossover_class` to a subclass of `CrossoverOperator`.* To change the reduction algorithm, set `reducer_class` to a subclass of `Reducer`.In this section, we will explore how to extend the mutation operator such that it can mutate _conditions_ for control constructs such as `if`, `while`, or `for`. To this end, we introduce a new class `ConditionMutator` subclassing `StatementMutator`. Collecting ConditionsLet us start with a few simple supporting functions. The function `all_conditions()` retrieves all control conditions from an AST.
def all_conditions(trees: Union[ast.AST, List[ast.AST]], tp: Optional[Type] = None) -> List[ast.expr]: """ Return all conditions from the AST (or AST list) `trees`. If `tp` is given, return only elements of that type. """ if not isinstance(trees, list): assert isinstance(trees, ast.AST) trees = [trees] visitor = ConditionVisitor() for tree in trees: visitor.visit(tree) conditions = visitor.conditions if tp is not None: conditions = [c for c in conditions if isinstance(c, tp)] return conditions
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
`all_conditions()` uses a `ConditionVisitor` class to walk the tree and collect the conditions:
class ConditionVisitor(NodeVisitor): def __init__(self) -> None: self.conditions: List[ast.expr] = [] self.conditions_seen: Set[str] = set() super().__init__() def add_conditions(self, node: ast.AST, attr: str) -> None: elems = getattr(node, attr, []) if not isinstance(elems, list): elems = [elems] elems = cast(List[ast.expr], elems) for elem in elems: elem_str = astor.to_source(elem) if elem_str not in self.conditions_seen: self.conditions.append(elem) self.conditions_seen.add(elem_str) def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST: self.add_conditions(node, 'values') return super().generic_visit(node) def visit_UnaryOp(self, node: ast.UnaryOp) -> ast.AST: if isinstance(node.op, ast.Not): self.add_conditions(node, 'operand') return super().generic_visit(node) def generic_visit(self, node: ast.AST) -> ast.AST: if hasattr(node, 'test'): self.add_conditions(node, 'test') return super().generic_visit(node)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here are all the conditions in `remove_html_markup()`. This is some material to construct new conditions from.
[astor.to_source(cond).strip() for cond in all_conditions(remove_html_markup_tree())]
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Mutating ConditionsHere comes our `ConditionMutator` class. We subclass from `StatementMutator` and set an attribute `self.conditions` containing all the conditions in the source. The method `choose_condition()` randomly picks a condition.
class ConditionMutator(StatementMutator): """Mutate conditions in an AST""" def __init__(self, *args: Any, **kwargs: Any) -> None: """Constructor. Arguments are as with `StatementMutator` constructor.""" super().__init__(*args, **kwargs) self.conditions = all_conditions(self.source) if self.log: print("Found conditions", [astor.to_source(cond).strip() for cond in self.conditions]) def choose_condition(self) -> ast.expr: """Return a random condition from source.""" return copy.deepcopy(random.choice(self.conditions))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The actual mutation takes place in the `swap()` method. If the node to be replaced has a `test` attribute (i.e. a controlling predicate), then we pick a random condition `cond` from the source and randomly chose from:* **set**: We change `test` to `cond`.* **not**: We invert `test`.* **and**: We replace `test` by `cond and test`.* **or**: We replace `test` by `cond or test`.Over time, this might lead to operators propagating across the population.
class ConditionMutator(ConditionMutator): def choose_bool_op(self) -> str: return random.choice(['set', 'not', 'and', 'or']) def swap(self, node: ast.AST) -> ast.AST: """Replace `node` condition by a condition from `source`""" if not hasattr(node, 'test'): return super().swap(node) node = cast(ast.If, node) cond = self.choose_condition() new_test = None choice = self.choose_bool_op() if choice == 'set': new_test = cond elif choice == 'not': new_test = ast.UnaryOp(op=ast.Not(), operand=node.test) elif choice == 'and': new_test = ast.BoolOp(op=ast.And(), values=[cond, node.test]) elif choice == 'or': new_test = ast.BoolOp(op=ast.Or(), values=[cond, node.test]) else: raise ValueError("Unknown boolean operand") if new_test: # ast.copy_location(new_test, node) node.test = new_test return node
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We can use the mutator just like `StatementMutator`, except that some of the mutations will also include new conditions:
mutator = ConditionMutator(source=all_statements(remove_html_markup_tree()), log=True) for i in range(10): new_tree = mutator.mutate(remove_html_markup_tree())
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Let us put our new mutator to action, again in a `Repairer()`. To activate it, all we need to do is to pass it as `mutator_class` keyword argument.
condition_repairer = Repairer(html_debugger, mutator_class=ConditionMutator, log=2)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We might need more iterations for this one. Let us see...
best_tree, fitness = condition_repairer.repair(iterations=200) repaired_source = astor.to_source(best_tree) print_content(repaired_source, '.py') # docassert assert fitness >= 1.0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Success again! We have automatically repaired `remove_html_markup()` – the resulting code passes all tests, including those that were previously failing. Again, we can present the fix as a patch:
original_source = astor.to_source(remove_html_markup_tree()) for patch in diff(original_source, repaired_source): print_patch(patch)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
However, looking at the patch, one may come up with doubts.
quiz("Is this actually the best solution?", [ "Yes, sure, of course. Why?", "Err - what happened to single quotes?" ], 1 << 1)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Indeed – our solution does not seem to handle single quotes anymore. Why is that so?
quiz("Why aren't single quotes handled in the solution?", [ "Because they're not important. " "I mean, y'know, who uses 'em anyway?", "Because they are not part of our tests? " "Let me look up how they are constructed..." ], 1 << 1)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Correct! Our test cases do not include single quotes – at least not in the interior of HTML tags – and thus, automatic repair did not care to preserve their handling. How can we fix this? An easy way is to include an appropriate test case in our set – a test case that passes with the original `remove_html_markup()`, yet fails with the "repaired" `remove_html_markup()` as whosn above.
with html_debugger: remove_html_markup_test("<foo quote='>abc'>me</foo>", "me")
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Let us repeat the repair with the extended test set:
best_tree, fitness = condition_repairer.repair(iterations=200)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Here is the final tree:
print_content(astor.to_source(best_tree), '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
And here is its fitness:
fitness # docassert assert fitness >= 1.0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The revised candidate now passes _all_ tests (including the tricky quote test we added last). Its condition now properly checks for `tag` _and_ both quotes. (The `tag` inside the parentheses is still redundant, but so be it.) From this example, we can learn a few lessons about the possibilities and risks of automated repair:* First, automatic repair is highly dependent on the quality of the checking tests. The risk is that the repair may overspecialize towards the test.* Second, when based on "plastic surgery", automated repair is highly dependent on the sources that program fragments are chosen from. If there is a hint of a solution somewhere in the code, there is a chance that automated repair will catch it up.* Third, automatic repair is a deeply heuristic approach. Its behavior will vary widely with any change to the parameters (and the underlying random number generators).* Fourth, automatic repair can take a long time. The examples we have in this chapter take less than a minute to compute, and neither Python nor our implementation is exactly fast. But as the search space grows, automated repair will take much longer.On the other hand, even an incomplete automated repair candidate can be much better than nothing at all – it may provide all the essential ingredients (such as the location or the involved variables) for a successful fix. When users of automated repair techniques are aware of its limitations and its assumptions, there is lots of potential in automated repair. Enjoy! Limitations The `Repairer` class is tested on our example programs, but not much more. Things that do not work include* Functions with inner functions are not repaired. Synopsis This chapter provides tools and techniques for automated repair of program code. The `Repairer` class takes a `RankingDebugger` debugger as input (such as `OchiaiDebugger` from the [chapter on statistical debugging](StatisticalDebugger.ipynb). A typical setup looks like this:```pythonfrom debuggingbook.StatisticalDebugger import OchiaiDebuggerdebugger = OchiaiDebugger()for inputs in TESTCASES: with debugger: test_foo(inputs)...repairer = Repairer(debugger)```Here, `test_foo()` is a function that raises an exception if the tested function `foo()` fails. If `foo()` passes, `test_foo()` should not raise an exception. The `repair()` method of a `Repairer` searches for a repair of the code covered in the debugger (except for methods whose name starts or ends in `test`, such that `foo()`, not `test_foo()` is repaired). `repair()` returns the best fix candidate as a pair `(tree, fitness)` where `tree` is a [Python abstract syntax tree](http://docs.python.org/3/library/ast) (AST) of the fix candidate, and `fitness` is the fitness of the candidate (a value between 0 and 1). A `fitness` of 1.0 means that the candidate passed all tests. A typical usage looks like this:```pythonimport astortree, fitness = repairer.repair()print(astor.to_source(tree), fitness)``` Here is a complete example for the `middle()` program. This is the original source code of `middle()`:
# ignore print_content(middle_source, '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
We set up a function `middle_test()` that tests it. The `middle_debugger` collects testcases and outcomes:
middle_debugger = OchiaiDebugger() for x, y, z in MIDDLE_PASSING_TESTCASES + MIDDLE_FAILING_TESTCASES: with middle_debugger: middle_test(x, y, z)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The repairer is instantiated with the debugger used (`middle_debugger`):
middle_repairer = Repairer(middle_debugger)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The `repair()` method of the repairer attempts to repair the function invoked by the test (`middle()`).
tree, fitness = middle_repairer.repair()
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The returned AST `tree` can be output via `astor.to_source()`:
print(astor.to_source(tree))
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
The `fitness` value shows how well the repaired program fits the tests. A fitness value of 1.0 shows that the repaired program satisfies all tests.
fitness # docassert assert fitness >= 1.0
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Hence, the above program indeed is a perfect repair in the sense that all previously failing tests now pass – our repair was successful. Here are the classes defined in this chapter. A `Repairer` repairs a program, using a `StatementMutator` and a `CrossoverOperator` to evolve a population of candidates.
# ignore from ClassDiagram import display_class_hierarchy # ignore display_class_hierarchy([Repairer, ConditionMutator, CrossoverOperator], abstract_classes=[ NodeVisitor, NodeTransformer ], public_methods=[ Repairer.__init__, Repairer.repair, StatementMutator.__init__, StatementMutator.mutate, ConditionMutator.__init__, CrossoverOperator.__init__, CrossoverOperator.crossover, ], project='debuggingbook')
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Lessons Learned* Automated repair based on genetic optimization uses five ingredients: 1. A _test suite_ to determine passing and failing tests 2. _Defect localization_ (typically obtained from [statistical debugging](StatisticalDebugger.ipynb) with the test suite) to determine potential locations to be fixed 3. _Random code mutations_ and _crossover operations_ to create and evolve a population of inputs 4. A _fitness function_ and a _selection strategy_ to determine the part of the population that should be evolved further 5. A _reducer_ such as [delta debugging](DeltaDebugger.ipynb) to simplify the final candidate with the highest fitness.* The result of automated repair is a _fix candidate_ with the highest fitness for the given tests.* A _fix candidate_ is not guaranteed to be correct or optimal, but gives important hints on how to fix the program.* All of the above ingredients offer plenty of settings and alternatives to experiment with. BackgroundThe seminal work in automated repair is [GenProg](https://squareslab.github.io/genprog-code/) \cite{LeGoues2012}, which heavily inspired our `Repairer` implementation. Major differences between GenProg and `Repairer` include:* GenProg includes its own defect localization (which is also dynamically updated), whereas `Repairer` builds on earlier statistical debugging.* GenProg can apply multiple mutations on programs (or none at all), whereas `Repairer` applies exactly one mutation.* The `StatementMutator` used by `Repairer` includes various special cases for program structures (`if`, `for`, `while`...), whereas GenProg operates on statements only.* GenProg has been tested on large production programs.While GenProg is _the_ seminal work in the area (and arguably the most important software engineering research contribution of the 2010s), there have been a number of important extensions of automated repair. These include:* *AutoFix* \cite{Pei2014} leverages _program contracts_ (pre- and postconditions) to generate tests and assertions automatically. Not only do such [assertions](Assertions.ipynb) help in fault localization, they also allow for much better validation of fix candidates.* *SemFix* \cite{Nguyen2013} and its successor *[Angelix](http://angelix.io)* \cite{Mechtaev2016}introduce automated program repair based on _symbolic analysis_ rather than genetic optimization. This allows to leverage program semantics, which GenProg does not consider.To learn more about automated program repair, see [program-repair.org](http://program-repair.org), the community page dedicated to research in program repair. Exercises Exercise 1: Automated Repair ParametersAutomated Repair is influenced by a large number of design choices – the size of the population, the number of iterations, the genetic optimization strategy, and more. How do changes to these design choices affect its effectiveness? * Consider the constants defined in this chapter (such as `POPULATION_SIZE` or `WEIGHT_PASSING` vs. `WEIGHT_FAILING`). How do changes affect the effectiveness of automated repair?* As an effectiveness metric, consider the number of iterations it takes to produce a fix candidate.* Since genetic optimization is a random algorithm, you need to determine effectiveness averages over a large number of runs (say, 100). Exercise 2: Elitism[_Elitism_](https://en.wikipedia.org/wiki/Genetic_algorithmElitism) (also known as _elitist selection_) is a variant of genetic selection in which a small fraction of the fittest candidates of the last population are included unchanged in the offspring.* Implement elitist selection by subclassing the `evolve()` method. Experiment with various fractions (5%, 10%, 25%) of "elites" and see how this improves results. Exercise 3: Evolving ValuesFollowing the steps of `ConditionMutator`, implement a `ValueMutator` class that replaces one constant value by another one found in the source (say, `0` by `1` or `True` by `False`).For validation, consider the following failure in the `square_root()` function from the [chapter on assertions](Assertions.ipynb):
from Assertions import square_root # minor dependency with ExpectError(): square_root_of_zero = square_root(0)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Can your `ValueMutator` automatically fix this failure? **Solution.** Your solution will be effective if it also includes named constants such as `None`.
import math def square_root_fixed(x): # type: ignore assert x >= 0 # precondition approx = 0 # <-- FIX: Change `None` to 0 guess = x / 2 while approx != guess: approx = guess guess = (approx + x / approx) / 2 assert math.isclose(approx * approx, x) return approx square_root_fixed(0)
_____no_output_____
MIT
notebooks/Repairer.ipynb
HGUISEL/debuggingbook
Dynamics 365 Business Central Trouble Shooting Guide (TSG) - Web services This notebook contains Kusto queries that can help getting to the root cause of an issue with web services for an environment. Each section in the notebook contains links to relevant documentation from the performance tuning guide [aka.ms/bcperformance](aka.ms/bcperformance), telemetry documentation in [aka.ms/bctelemetry](aka.ms/bctelemetry), as well as Kusto queries that help dive into a specific area. NB! Some of the signal used in this notebook is only available in newer versions of Business Central, so check the version of your environment if some sections do not return any data. The signal documentation states in which version a given signal was introduced. **NB!** Telemetry for SOAP endpoints does not emit HTTP status code. So the sections that query for different values of HTTP status will not show results for these requests. 1. Connect to Application Insights First you need to set the notebook Kernel to Python3, load the KQLmagic module (did you install it?) and connect to your Application Insights resource (get appid and appkey from the API access page in the Application Insights portal)
# load the KQLmagic module %reload_ext Kqlmagic # Connect to the Application Insights API %kql appinsights://appid='<add app id from the Application Insights portal>';appkey='<add API key from the Application Insights portal>'
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
2. Define filters This workbook is designed for troubleshooting a single environment. Please provide values for aadTenantId and environmentName:
aadTenantId = "<Add AAD tenant id here>" environmentName = "<add environment name here>"
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
Analyze web service usage Now you can run Kusto queries to look for possible root causes for issues about web services. Either click **Run All** above to run all sections, or scroll down to the type of analysis you want to do and manually run queries Web service requests overview Performance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developerwriting-efficient-web-services Web service telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace KQL sample: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/WebServiceCalls.kql
%%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) | extend category = tostring( customDimensions.category ) | summarize request_count=count() by category, bin(timestamp, 1d) | render timechart title= 'Number of web service requests by category' %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) | extend category = tostring( customDimensions.category ) , executionTimeInMS = toreal(totimespan(customDimensions.serverExecutionTime))/10000 //the datatype for executionTime is timespan | summarize count=count() by executionTime_ms = bin(executionTimeInMS, 100), category | order by category, executionTime_ms asc | render columnchart with (ycolumns = count, series = category, title= 'Execution time (in milliseconds) of web service requests by category' )
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
Web service throttling Operational Limits for Business Central Online: * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-onlinequery-limits Telemetry docs: * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
%%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) | extend httpStatusCode = tostring( customDimensions.httpStatusCode ) | summarize count() by bin(timestamp, 1d), httpStatusCode | render timechart title= 'Number of web service requests by http status code'
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
Web service requests (Access denied) The user who made the request doesn't have proper permissions. For more information, see * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/web-services-authentication * https://docs.microsoft.com/en-us/dynamics365/business-central/ui-define-granular-permissions Telemetry docs: * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
%%kql // // Top 10 endpoint requests with access denied // let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) and customDimensions.httpStatusCode == '401' | limit 10
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
Web service requests (Not found) The given endpoint was not valid See * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/publish-web-service Telemetry docs: * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
%%kql // // Top 10 non-valid endpoints called // let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) and customDimensions.httpStatusCode == '404' | summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId ) | order by number_of_requests desc | limit 10
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
Web service requests (Request timed out) The request took longer to complete than the threshold configured for the service See * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-onlineODataServices Telemetry docs: * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace Performance tuning guide (you need to tune these endpoints to make them go faster) * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developerwriting-efficient-web-services
%%kql // // Top 10 endpoints that times out // let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) and customDimensions.httpStatusCode == '408' | summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId ) | order by number_of_requests desc | limit 10
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
Web service requests (Too Many Requests) The request exceeded the maximum simultaneous requests allowed on the service. See * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-onlineODataServices Telemetry docs: * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace Performance tuning guide (you need to make your web service client backoff and retry) * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developerwriting-efficient-web-services
%%kql // // Top 10 endpoints get throttled // let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and customDimensions.aadTenantId == _aadTenantId and customDimensions.environmentName == _environmentName and customDimensions.eventId == 'RT0008' and timestamp > ago(7d) and customDimensions.httpStatusCode == '426' | summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId ) | order by number_of_requests desc | limit 10
_____no_output_____
MIT
samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb
dmc-dk/BCTech
Data Science Academy - Python Fundamentos - Capítulo 8 Download: http://github.com/dsacademybr
# Versão da Linguagem Python from platform import python_version print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
_____no_output_____
MIT
Data Science Academy/Cap08/Notebooks/DSA-Python-Cap08-07-StatsModels.ipynb
srgbastos/Artificial-Intelligence
Statsmodels Linear Regression Models
# Para visualização de gráficos from pylab import * %matplotlib inline import numpy as np import pandas as pd import statsmodels as st import sys import warnings if not sys.warnoptions: warnings.simplefilter("ignore") warnings.simplefilter(action='ignore', category=FutureWarning) warnings.filterwarnings("ignore", category=FutureWarning) import statsmodels.api as sm from statsmodels.sandbox.regression.predstd import wls_prediction_std np.random.seed(9876789) np.__version__ pd.__version__ st.__version__ # Criando dados artificiais nsample = 100 x = np.linspace(0, 10, 100) X = np.column_stack((x, x**2)) beta = np.array([1, 0.1, 10]) e = np.random.normal(size=nsample) X = sm.add_constant(X) y = np.dot(X, beta) + e model = sm.OLS(y, X) results = model.fit() print(results.summary()) print('Parameters: ', results.params) print('R2: ', results.rsquared) nsample = 50 sig = 0.5 x = np.linspace(0, 20, nsample) X = np.column_stack((x, np.sin(x), (x-5)**2, np.ones(nsample))) beta = [0.5, 0.5, -0.02, 5.] y_true = np.dot(X, beta) y = y_true + sig * np.random.normal(size=nsample) res = sm.OLS(y, X).fit() print(res.summary()) print('Parameters: ', res.params) print('Standard errors: ', res.bse) print('Predicted values: ', res.predict()) prstd, iv_l, iv_u = wls_prediction_std(res) fig, ax = plt.subplots(figsize=(8,6)) ax.plot(x, y, 'o', label="data") ax.plot(x, y_true, 'b-', label="True") ax.plot(x, res.fittedvalues, 'r--.', label="OLS") ax.plot(x, iv_u, 'r--') ax.plot(x, iv_l, 'r--') ax.legend(loc='best')
_____no_output_____
MIT
Data Science Academy/Cap08/Notebooks/DSA-Python-Cap08-07-StatsModels.ipynb
srgbastos/Artificial-Intelligence