markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
The helper function `toplevel_defs()` helps saving and restoring the environment before and after redefining the function under repair.
class Repairer(Repairer): def toplevel_defs(self, tree: ast.AST) -> List[str]: """Return a list of names of defined functions and classes in `tree`""" visitor = DefinitionVisitor() visitor.visit(tree) assert hasattr(visitor, 'definitions') return visitor.definitions class DefinitionVisitor(NodeVisitor): def __init__(self) -> None: self.definitions: List[str] = [] def add_definition(self, node: Union[ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef]) -> None: self.definitions.append(node.name) def visit_FunctionDef(self, node: ast.FunctionDef) -> None: self.add_definition(node) def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None: self.add_definition(node) def visit_ClassDef(self, node: ast.ClassDef) -> None: self.add_definition(node)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Here's an example for `fitness()`:
repairer = Repairer(middle_debugger, log=1) good_fitness = repairer.fitness(middle_tree()) good_fitness # ignore assert good_fitness >= 0.99, "fitness() failed" bad_middle_tree = ast.parse("def middle(x, y, z): return x") bad_fitness = repairer.fitness(bad_middle_tree) bad_fitness # ignore assert bad_fitness < 0.5, "fitness() failed"
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
RepairingNow for the actual `repair()` method, which creates a `population` and then evolves it until the fitness is 1.0 or the given number of iterations is spent.
import traceback class Repairer(Repairer): def initial_population(self, size: int) -> List[ast.AST]: """Return an initial population of size `size`""" return [self.target_tree] + \ [self.mutator.mutate(copy.deepcopy(self.target_tree)) for i in range(size - 1)] def repair(self, population_size: int = POPULATION_SIZE, iterations: int = 100) -> \ Tuple[ast.AST, float]: """ Repair the function we collected test runs from. Use a population size of `population_size` and at most `iterations` iterations. Returns a pair (`ast`, `fitness`) where `ast` is the AST of the repaired function, and `fitness` is its fitness (between 0 and 1.0) """ self.validate() population = self.initial_population(population_size) last_key = ast.dump(self.target_tree) for iteration in range(iterations): population = self.evolve(population) best_tree = population[0] fitness = self.fitness(best_tree) if self.log: print(f"Evolving population: " f"iteration{iteration:4}/{iterations} " f"fitness = {fitness:.5} \r", end="") if self.log >= 2: best_key = ast.dump(best_tree) if best_key != last_key: print() print() self.log_tree(f"New best code (fitness = {fitness}):", best_tree) last_key = best_key if fitness >= 1.0: break if self.log: print() if self.log and self.log < 2: self.log_tree(f"Best code (fitness = {fitness}):", best_tree) best_tree = self.reduce(best_tree) fitness = self.fitness(best_tree) self.log_tree(f"Reduced code (fitness = {fitness}):", best_tree) return best_tree, fitness
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
EvolvingThe evolution of our population takes place in the `evolve()` method. In contrast to the `evolve_middle()` function, above, we use crossover to create the offspring, which we still mutate afterwards.
class Repairer(Repairer): def evolve(self, population: List[ast.AST]) -> List[ast.AST]: """Evolve the candidate population by mutating and crossover.""" n = len(population) # Create offspring as crossover of parents offspring: List[ast.AST] = [] while len(offspring) < n: parent_1 = copy.deepcopy(random.choice(population)) parent_2 = copy.deepcopy(random.choice(population)) try: self.crossover.crossover(parent_1, parent_2) except CrossoverError: pass # Just keep parents offspring += [parent_1, parent_2] # Mutate offspring offspring = [self.mutator.mutate(tree) for tree in offspring] # Add it to population population += offspring # Keep the fitter part of the population population.sort(key=self.fitness_key, reverse=True) population = population[:n] return population
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
A second difference is that we not only sort by fitness, but also by tree size – with equal fitness, a smaller tree thus will be favored. This helps keeping fixes and patches small.
class Repairer(Repairer): def fitness_key(self, tree: ast.AST) -> Tuple[float, int]: """Key to be used for sorting the population""" tree_size = len([node for node in ast.walk(tree)]) return (self.fitness(tree), -tree_size)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
SimplifyingThe last step in repairing is simplifying the code. As demonstrated in the chapter on [reducing failure-inducing inputs](DeltaDebugger.ipynb), we can use delta debugging on code to get rid of superfluous statements. To this end, we convert the tree to lines, run delta debugging on them, and then convert it back to a tree.
class Repairer(Repairer): def reduce(self, tree: ast.AST) -> ast.AST: """Simplify `tree` using delta debugging.""" original_fitness = self.fitness(tree) source_lines = astor.to_source(tree).split('\n') with self.reducer: self.test_reduce(source_lines, original_fitness) reduced_lines = self.reducer.min_args()['source_lines'] reduced_source = "\n".join(reduced_lines) return ast.parse(reduced_source)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
As dicussed above, we simplify the code by having the test function (`test_reduce()`) declare reaching the maximum fitness obtained so far as a "failure". Delta debugging will then simplify the input as long as the "failure" (and hence the maximum fitness obtained) persists.
class Repairer(Repairer): def test_reduce(self, source_lines: List[str], original_fitness: float) -> None: """Test function for delta debugging.""" try: source = "\n".join(source_lines) tree = ast.parse(source) fitness = self.fitness(tree) assert fitness < original_fitness except AssertionError: raise except SyntaxError: raise except IndentationError: raise except Exception: # traceback.print_exc() # Uncomment to see internal errors raise
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
End of Excursion Repairer in ActionLet us go and apply `Repairer` in practice. We initialize it with `middle_debugger`, which has (still) collected the passing and failing runs for `middle_test()`. We also set `log` for some diagnostics along the way.
repairer = Repairer(middle_debugger, log=True)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
We now invoke `repair()` to evolve our population. After a few iterations, we find a best tree with perfect fitness.
best_tree, fitness = repairer.repair() print_content(astor.to_source(best_tree), '.py') fitness
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Again, we have a perfect solution. Here, we did not even need to simplify the code in the last iteration, as our `fitness_key()` function favors smaller implementations. Removing HTML MarkupLet us apply `Repairer` on our other ongoing example, namely `remove_html_markup()`.
def remove_html_markup(s): # type: ignore tag = False quote = False out = "" for c in s: if c == '<' and not quote: tag = True elif c == '>' and not quote: tag = False elif c == '"' or c == "'" and tag: quote = not quote elif not tag: out = out + c return out def remove_html_markup_tree() -> ast.AST: return ast.parse(inspect.getsource(remove_html_markup))
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
To run `Repairer` on `remove_html_markup()`, we need a test and a test suite. `remove_html_markup_test()` raises an exception if applying `remove_html_markup()` on the given `html` string does not yield the `plain` string.
def remove_html_markup_test(html: str, plain: str) -> None: outcome = remove_html_markup(html) assert outcome == plain, \ f"Got {repr(outcome)}, expected {repr(plain)}"
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Now for the test suite. We use a simple fuzzing scheme to create dozens of passing and failing test cases in `REMOVE_HTML_PASSING_TESTCASES` and `REMOVE_HTML_FAILING_TESTCASES`, respectively. Excursion: Creating HTML Test Cases
def random_string(length: int = 5, start: int = ord(' '), end: int = ord('~')) -> str: return "".join(chr(random.randrange(start, end + 1)) for i in range(length)) random_string() def random_id(length: int = 2) -> str: return random_string(start=ord('a'), end=ord('z')) random_id() def random_plain() -> str: return random_string().replace('<', '').replace('>', '') def random_string_noquotes() -> str: return random_string().replace('"', '').replace("'", '') def random_html(depth: int = 0) -> Tuple[str, str]: prefix = random_plain() tag = random_id() if depth > 0: html, plain = random_html(depth - 1) else: html = plain = random_plain() attr = random_id() value = '"' + random_string_noquotes() + '"' postfix = random_plain() return f'{prefix}<{tag} {attr}={value}>{html}</{tag}>{postfix}', \ prefix + plain + postfix random_html() def remove_html_testcase(expected: bool = True) -> Tuple[str, str]: while True: html, plain = random_html() outcome = (remove_html_markup(html) == plain) if outcome == expected: return html, plain REMOVE_HTML_TESTS = 100 REMOVE_HTML_PASSING_TESTCASES = \ [remove_html_testcase(True) for i in range(REMOVE_HTML_TESTS)] REMOVE_HTML_FAILING_TESTCASES = \ [remove_html_testcase(False) for i in range(REMOVE_HTML_TESTS)]
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
End of Excursion Here is a passing test case:
REMOVE_HTML_PASSING_TESTCASES[0] html, plain = REMOVE_HTML_PASSING_TESTCASES[0] remove_html_markup_test(html, plain)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Here is a failing test case (containing a double quote in the plain text)
REMOVE_HTML_FAILING_TESTCASES[0] with ExpectError(): html, plain = REMOVE_HTML_FAILING_TESTCASES[0] remove_html_markup_test(html, plain)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
We run our tests, collecting the outcomes in `html_debugger`.
html_debugger = OchiaiDebugger() for html, plain in (REMOVE_HTML_PASSING_TESTCASES + REMOVE_HTML_FAILING_TESTCASES): with html_debugger: remove_html_markup_test(html, plain)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
The suspiciousness distribution will not be of much help here – pretty much all lines in `remove_html_markup()` have the same suspiciousness.
html_debugger
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Let us create our repairer and run it.
html_repairer = Repairer(html_debugger, log=True) best_tree, fitness = html_repairer.repair(iterations=20)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
We see that the "best" code is still our original code, with no changes. And we can set `iterations` to 50, 100, 200... – our `Repairer` won't be able to repair it.
quiz("Why couldn't `Repairer()` repair `remove_html_markup()`?", [ "The population is too small!", "The suspiciousness is too evenly distributed!", "We need more test cases!", "We need more iterations!", "There is no statement in the source with a correct condition!", "The population is too big!", ], '5242880 >> 20')
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
You can explore all of the hypotheses above by changing the appropriate parameters, but you won't be able to change the outcome. The problem is that, unlike `middle()`, there is no statement (or combination thereof) in `remove_html_markup()` that could be used to make the failure go away. For this, we need to mutate another aspect of the code, which we will explore in the next section. Mutating ConditionsThe `Repairer` class is very configurable. The individual steps in automated repair can all be replaced by providing own classes in the keyword arguments of its `__init__()` constructor:* To change fault localization, pass a different `debugger` that is a subclass of `RankingDebugger`.* To change the mutation operator, set `mutator_class` to a subclass of `StatementMutator`.* To change the crossover operator, set `crossover_class` to a subclass of `CrossoverOperator`.* To change the reduction algorithm, set `reducer_class` to a subclass of `Reducer`.In this section, we will explore how to extend the mutation operator such that it can mutate _conditions_ for control constructs such as `if`, `while`, or `for`. To this end, we introduce a new class `ConditionMutator` subclassing `StatementMutator`. Collecting ConditionsLet us start with a few simple supporting functions. The function `all_conditions()` retrieves all control conditions from an AST.
def all_conditions(trees: Union[ast.AST, List[ast.AST]], tp: Optional[Type] = None) -> List[ast.expr]: """ Return all conditions from the AST (or AST list) `trees`. If `tp` is given, return only elements of that type. """ if not isinstance(trees, list): assert isinstance(trees, ast.AST) trees = [trees] visitor = ConditionVisitor() for tree in trees: visitor.visit(tree) conditions = visitor.conditions if tp is not None: conditions = [c for c in conditions if isinstance(c, tp)] return conditions
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
`all_conditions()` uses a `ConditionVisitor` class to walk the tree and collect the conditions:
class ConditionVisitor(NodeVisitor): def __init__(self) -> None: self.conditions: List[ast.expr] = [] self.conditions_seen: Set[str] = set() super().__init__() def add_conditions(self, node: ast.AST, attr: str) -> None: elems = getattr(node, attr, []) if not isinstance(elems, list): elems = [elems] elems = cast(List[ast.expr], elems) for elem in elems: elem_str = astor.to_source(elem) if elem_str not in self.conditions_seen: self.conditions.append(elem) self.conditions_seen.add(elem_str) def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST: self.add_conditions(node, 'values') return super().generic_visit(node) def visit_UnaryOp(self, node: ast.UnaryOp) -> ast.AST: if isinstance(node.op, ast.Not): self.add_conditions(node, 'operand') return super().generic_visit(node) def generic_visit(self, node: ast.AST) -> ast.AST: if hasattr(node, 'test'): self.add_conditions(node, 'test') return super().generic_visit(node)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Here are all the conditions in `remove_html_markup()`. This is some material to construct new conditions from.
[astor.to_source(cond).strip() for cond in all_conditions(remove_html_markup_tree())]
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Mutating ConditionsHere comes our `ConditionMutator` class. We subclass from `StatementMutator` and set an attribute `self.conditions` containing all the conditions in the source. The method `choose_condition()` randomly picks a condition.
class ConditionMutator(StatementMutator): """Mutate conditions in an AST""" def __init__(self, *args: Any, **kwargs: Any) -> None: """Constructor. Arguments are as with `StatementMutator` constructor.""" super().__init__(*args, **kwargs) self.conditions = all_conditions(self.source) if self.log: print("Found conditions", [astor.to_source(cond).strip() for cond in self.conditions]) def choose_condition(self) -> ast.expr: """Return a random condition from source.""" return copy.deepcopy(random.choice(self.conditions))
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
The actual mutation takes place in the `swap()` method. If the node to be replaced has a `test` attribute (i.e. a controlling predicate), then we pick a random condition `cond` from the source and randomly chose from:* **set**: We change `test` to `cond`.* **not**: We invert `test`.* **and**: We replace `test` by `cond and test`.* **or**: We replace `test` by `cond or test`.Over time, this might lead to operators propagating across the population.
class ConditionMutator(ConditionMutator): def choose_bool_op(self) -> str: return random.choice(['set', 'not', 'and', 'or']) def swap(self, node: ast.AST) -> ast.AST: """Replace `node` condition by a condition from `source`""" if not hasattr(node, 'test'): return super().swap(node) node = cast(ast.If, node) cond = self.choose_condition() new_test = None choice = self.choose_bool_op() if choice == 'set': new_test = cond elif choice == 'not': new_test = ast.UnaryOp(op=ast.Not(), operand=node.test) elif choice == 'and': new_test = ast.BoolOp(op=ast.And(), values=[cond, node.test]) elif choice == 'or': new_test = ast.BoolOp(op=ast.Or(), values=[cond, node.test]) else: raise ValueError("Unknown boolean operand") if new_test: # ast.copy_location(new_test, node) node.test = new_test return node
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
We can use the mutator just like `StatementMutator`, except that some of the mutations will also include new conditions:
mutator = ConditionMutator(source=all_statements(remove_html_markup_tree()), log=True) for i in range(10): new_tree = mutator.mutate(remove_html_markup_tree())
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Let us put our new mutator to action, again in a `Repairer()`. To activate it, all we need to do is to pass it as `mutator_class` keyword argument.
condition_repairer = Repairer(html_debugger, mutator_class=ConditionMutator, log=2)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
We might need more iterations for this one. Let us see...
best_tree, fitness = condition_repairer.repair(iterations=200) repaired_source = astor.to_source(best_tree) print_content(repaired_source, '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Success again! We have automatically repaired `remove_html_markup()` – the resulting code passes all tests, including those that were previously failing. Again, we can present the fix as a patch:
original_source = astor.to_source(remove_html_markup_tree()) for patch in diff(original_source, repaired_source): print_patch(patch)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
However, looking at the patch, one may come up with doubts.
quiz("Is this actually the best solution?", [ "Yes, sure, of course. Why?", "Err - what happened to single quotes?" ], 1 << 1)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Indeed – our solution does not seem to handle single quotes anymore. Why is that so?
quiz("Why aren't single quotes handled in the solution?", [ "Because they're not important. I mean, who uses 'em anyway?", "Because they are not part of our tests? " "Let me look up how they are constructed..." ], 1 << 1)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Correct! Our test cases do not include single quotes – at least not in the interior of HTML tags – and thus, automatic repair did not care to preserve their handling. How can we fix this? An easy way is to include an appropriate test case in our set – a test case that passes with the original `remove_html_markup()`, yet fails with the "repaired" `remove_html_markup()` as whosn above.
with html_debugger: remove_html_markup_test("<foo quote='>abc'>me</foo>", "me")
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Let us repeat the repair with the extended test set:
best_tree, fitness = condition_repairer.repair(iterations=200)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Here is the final tree:
print_content(astor.to_source(best_tree), '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
And here is its fitness:
fitness
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
The revised candidate now passes _all_ tests (including the tricky quote test we added last). Its condition now properly checks for `tag` _and_ both quotes. (The `tag` inside the parentheses is still redundant, but so be it.) From this example, we can learn a few lessons about the possibilities and risks of automated repair:* First, automatic repair is highly dependent on the quality of the checking tests. The risk is that the repair may overspecialize towards the test.* Second, automated repair is highly dependent on the sources that program fragments are chosen from. If there is a hint of a solution somewhere in the code, there is a chance that automated repair will catch it up.* Third, automatic repair is a deeply heuristic approach. Its behavior will vary widely with any change to the parameters (and the underlying random number generators)* Fourth, automatic repair can take a long time. The examples we have in this chapter take less than a minute to compute, and neither Python nor our implementation is exactly fast. But as the search space grows, automated repair will take much longer.On the other hand, even an incomplete automated repair candidate can be much better than nothing at all – it may provide all the essential ingredients (such as the location or the involved variables) for a successful fix. When users of automated repair techniques are aware of its limitations and its assumptions, there is lots of potential in automated repair. Enjoy! Limitations The `Repairer` class is hardly tested. Things that do not work include* Functions with inner functions are not repaired. Synopsis This chapter provides tools and techniques for automated repair of program code. The `Repairer()` class takes a `RankingDebugger` debugger as input (such as `OchiaiDebugger` from [the chapter on statistical debugging](StatisticalDebugger.ipynb). A typical setup looks like this:```pythonfrom debuggingbook.StatisticalDebugger import OchiaiDebuggerdebugger = OchiaiDebugger()for inputs in TESTCASES: with debugger: test_foo(inputs)...repairer = Repairer(debugger)```Here, `test_foo()` is a function that raises an exception if the tested function `foo()` fails. If `foo()` passes, `test_foo()` should not raise an exception. The `repair()` method of a `Repairer` searches for a repair of the code covered in the debugger (except for methods starting or ending in `test`, such that `foo()`, not `test_foo()` is repaired). `repair()` returns the best fix candidate as a pair `(tree, fitness)` where `tree` is a [Python abstract syntax tree](http://docs.python.org/3/library/ast) (AST) of the fix candidate, and `fitness` is the fitness of the candidate (a value between 0 and 1). A `fitness` of 1.0 means that the candidate passed all tests. A typical usage looks like this:```pythonimport astortree, fitness = repairer.repair()print(astor.to_source(tree), fitness)``` Here is a complete example for the `middle()` program. This is the original source code of `middle()`:
# ignore print_content(middle_source, '.py')
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
We set up a function `middle_test()` that tests it. The `middle_debugger` collects testcases and outcomes:
middle_debugger = OchiaiDebugger() for x, y, z in MIDDLE_PASSING_TESTCASES + MIDDLE_FAILING_TESTCASES: with middle_debugger: middle_test(x, y, z)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
The repairer attempts to repair the invoked function (`middle()`). The returned AST `tree` can be output via `astor.to_source()`:
middle_repairer = Repairer(middle_debugger) tree, fitness = middle_repairer.repair() print(astor.to_source(tree), fitness)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Here are the classes defined in this chapter. A `Repairer` repairs a program, using a `StatementMutator` and a `CrossoverOperator` to evolve a population of candidates.
# ignore from ClassDiagram import display_class_hierarchy # ignore display_class_hierarchy([Repairer, ConditionMutator, CrossoverOperator], abstract_classes=[ NodeVisitor, NodeTransformer ], public_methods=[ Repairer.__init__, Repairer.repair, StatementMutator.__init__, StatementMutator.mutate, ConditionMutator.__init__, CrossoverOperator.__init__, CrossoverOperator.crossover, ], project='debuggingbook')
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Lessons Learned* Automated repair based on genetic optimization uses five ingredients: 1. A _test suite_ to determine passing and failing tests 2. _Defect localization_ (typically obtained from [statistical debugging](StatisticalDebugger.ipynb) with the test suite) to determine potential locations to be fixed 3. _Random code mutations_ and _crossover operations_ to create and evolve a population of inputs 4. A _fitness function_ and a _selection strategy_ to determine the part of the population that should be evolved further 5. A _reducer_ such as [delta debugging](DeltaDebugger.ipynb) to simplify the final candidate with the highest fitness.* The result of automated repair is a _fix candidate_ with the highest fitness for the given tests.* A _fix candidate_ is not guaranteed to be correct or optimal, but gives important hints on how to fix the program.* All of the above ingredients offer plenty of settings and alternatives to experiment with. BackgroundThe seminal work in automated repair is [GenProg](https://squareslab.github.io/genprog-code/) \cite{LeGoues2012}, which heavily inspired our `Repairer` implementation. Major differences between GenProg and `Repairer` include:* GenProg includes its own defect localization (which is also dynamically updated), whereas `Repairer` builds on earlier statistical debugging.* GenProg can apply multiple mutations on programs (or none at all), whereas `Repairer` applies exactly one mutation.* The `StatementMutator` used by `Repairer` includes various special cases for program structures (`if`, `for`, `while`...), whereas GenProg operates on statements only.* GenProg has been tested on large production programs.While GenProg is _the_ seminal work in the area (and arguably the most important software engineering research contribution of the 2010s), there have been a number of important extensions of automated repair. These include:* *AutoFix* \cite{Pei2014} leverages _program contracts_ (pre- and postconditions) to generate tests and assertions automatically. Not only do such [assertions](Assertions.ipynb) help in fault localization, they also allow for much better validation of fix candidates.* *SemFix* \cite{Nguyen2013} presents automated program repair based on _symbolic analysis_ rather than genetic optimization. This allows to leverage program semantics, which GenProg does not consider.To learn more about automated program repair, see [program-repair.org](http://program-repair.org), the community page dedicated to research in program repair. Exercises Exercise 1: Automated Repair ParametersAutomated Repair is influenced by a large number of design choices – the size of the population, the number of iterations, the genetic optimization strategy, and more. How do changes to these design choices affect its effectiveness? * Consider the constants defined in this chapter (such as `POPULATION_SIZE` or `WEIGHT_PASSING` vs. `WEIGHT_FAILING`). How do changes affect the effectiveness of automated repair?* As an effectiveness metric, consider the number of iterations it takes to produce a fix candidate.* Since genetic optimization is a random algorithm, you need to determine effectiveness averages over a large number of runs (say, 100). Exercise 2: Elitism[_Elitism_](https://en.wikipedia.org/wiki/Genetic_algorithmElitism) (also known as _elitist selection_) is a variant of genetic selection in which a small fraction of the fittest candidates of the last population are included unchanged in the offspring.* Implement elitist selection by subclassing the `evolve()` method. Experiment with various fractions (5%, 10%, 25%) of "elites" and see how this improves results. Exercise 3: Evolving ValuesFollowing the steps of `ConditionMutator`, implement a `ValueMutator` class that replaces one constant value by another one found in the source (say, `0` by `1` or `True` by `False`).For validation, consider the following failure in the `square_root()` function from [the chapter on assertions](Assertions.ipynb):
from Assertions import square_root # minor dependency with ExpectError(): square_root_of_zero = square_root(0)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
Can your `ValueMutator` automatically fix this failure? **Solution.** Your solution will be effective if it also includes named constants such as `None`.
import math def square_root_fixed(x): # type: ignore assert x >= 0 # precondition approx = 0 # <-- FIX: Change `None` to 0 guess = x / 2 while approx != guess: approx = guess guess = (approx + x / approx) / 2 assert math.isclose(approx * approx, x) return approx square_root_fixed(0)
_____no_output_____
MIT
notebooks/Repairer.ipynb
bjrnmath/debuggingbook
BLERSSI Seldon serving Clone Cisco Kubeflow Starter pack repository
BRANCH_NAME="master" #Provide git branch name "master" or "dev" ! git clone -b $BRANCH_NAME https://github.com/CiscoAI/cisco-kubeflow-starter-pack.git
Cloning into 'cisco-kubeflow-starter-pack'... remote: Enumerating objects: 63, done. remote: Counting objects: 100% (63/63), done. remote: Compressing objects: 100% (44/44), done. remote: Total 4630 (delta 16), reused 44 (delta 11), pack-reused 4567 Receiving objects: 100% (4630/4630), 17.61 MiB | 48.72 MiB/s, done. Resolving deltas: 100% (1745/1745), done.
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Install the required packages
! pip install pandas sklearn seldon_core dill alibi==0.3.2 --user
Collecting pandas Downloading pandas-1.0.5-cp36-cp36m-manylinux1_x86_64.whl (10.1 MB)  |████████████████████████████████| 10.1 MB 21.3 MB/s eta 0:00:01 [?25hCollecting sklearn Downloading sklearn-0.0.tar.gz (1.1 kB) Collecting seldon_core Downloading seldon_core-1.2.1-py3-none-any.whl (104 kB)  |████████████████████████████████| 104 kB 134.7 MB/s eta 0:00:01 [?25hCollecting dill Downloading dill-0.3.2.zip (177 kB)  |████████████████████████████████| 177 kB 53.4 MB/s eta 0:00:01 [?25hCollecting alibi==0.3.2 Downloading alibi-0.3.2-py3-none-any.whl (81 kB)  |████████████████████████████████| 81 kB 103 kB/s eta 0:00:01 [?25hRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from pandas) (1.18.1) Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas) (2.8.1) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas) (2019.3) Collecting scikit-learn Downloading scikit_learn-0.23.1-cp36-cp36m-manylinux1_x86_64.whl (6.8 MB)  |████████████████████████████████| 6.8 MB 135.5 MB/s eta 0:00:01 [?25hCollecting Flask<2.0.0 Downloading Flask-1.1.2-py2.py3-none-any.whl (94 kB)  |████████████████████████████████| 94 kB 16.7 MB/s eta 0:00:01 [?25hCollecting Flask-OpenTracing<1.2.0,>=1.1.0 Downloading Flask-OpenTracing-1.1.0.tar.gz (8.2 kB) Requirement already satisfied: requests<3.0.0 in /usr/local/lib/python3.6/dist-packages (from seldon_core) (2.22.0) Collecting minio<6.0.0,>=4.0.9 Downloading minio-5.0.10-py2.py3-none-any.whl (75 kB)  |████████████████████████████████| 75 kB 3.9 MB/s s eta 0:00:01 [?25hRequirement already satisfied: jsonschema<4.0.0 in /usr/local/lib/python3.6/dist-packages (from seldon_core) (3.2.0) Requirement already satisfied: PyYAML<5.4 in /usr/local/lib/python3.6/dist-packages (from seldon_core) (5.3) Collecting grpcio-opentracing<1.2.0,>=1.1.4 Downloading grpcio_opentracing-1.1.4-py3-none-any.whl (14 kB) Collecting azure-storage-blob<3.0.0,>=2.0.1 Downloading azure_storage_blob-2.1.0-py2.py3-none-any.whl (88 kB)  |████████████████████████████████| 88 kB 21.2 MB/s eta 0:00:01 [?25hCollecting opentracing<2.3.0,>=2.2.0 Downloading opentracing-2.2.0.tar.gz (47 kB)  |████████████████████████████████| 47 kB 24.3 MB/s eta 0:00:01 [?25hRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from seldon_core) (45.1.0) Collecting redis<4.0.0 Downloading redis-3.5.3-py2.py3-none-any.whl (72 kB)  |████████████████████████████████| 72 kB 2.4 MB/s s eta 0:00:01 [?25hCollecting flatbuffers<2.0.0 Downloading flatbuffers-1.12-py2.py3-none-any.whl (15 kB) Collecting jaeger-client<4.2.0,>=4.1.0 Downloading jaeger-client-4.1.0.tar.gz (80 kB)  |████████████████████████████████| 80 kB 31.8 MB/s eta 0:00:01 [?25hRequirement already satisfied: grpcio<2.0.0 in /usr/local/lib/python3.6/dist-packages (from seldon_core) (1.26.0) Requirement already satisfied: protobuf<4.0.0 in /usr/local/lib/python3.6/dist-packages (from seldon_core) (3.11.2) Requirement already satisfied: prometheus-client<0.9.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from seldon_core) (0.7.1) Collecting Flask-cors<4.0.0 Downloading Flask_Cors-3.0.8-py2.py3-none-any.whl (14 kB) Collecting gunicorn<20.1.0,>=19.9.0 Downloading gunicorn-20.0.4-py2.py3-none-any.whl (77 kB)  |████████████████████████████████| 77 kB 27.1 MB/s eta 0:00:01 [?25hCollecting beautifulsoup4 Downloading beautifulsoup4-4.9.1-py3-none-any.whl (115 kB)  |████████████████████████████████| 115 kB 158.3 MB/s eta 0:00:01 [?25hRequirement already satisfied: tensorflow<2.0 in /usr/local/lib/python3.6/dist-packages (from alibi==0.3.2) (1.15.2) Collecting scikit-image Downloading scikit_image-0.17.2-cp36-cp36m-manylinux1_x86_64.whl (12.4 MB)  |████████████████████████████████| 12.4 MB 71.0 MB/s eta 0:00:01 [?25hCollecting spacy Downloading spacy-2.3.2-cp36-cp36m-manylinux1_x86_64.whl (9.9 MB)  |████████████████████████████████| 9.9 MB 36.7 MB/s eta 0:00:01 [?25hCollecting Pillow Downloading Pillow-7.2.0-cp36-cp36m-manylinux1_x86_64.whl (2.2 MB)  |████████████████████████████████| 2.2 MB 68.8 MB/s eta 0:00:01 [?25hRequirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.6.1->pandas) (1.11.0) Requirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->sklearn) (1.4.1) Collecting joblib>=0.11 Downloading joblib-0.16.0-py3-none-any.whl (300 kB)  |████████████████████████████████| 300 kB 150.5 MB/s eta 0:00:01 [?25hCollecting threadpoolctl>=2.0.0 Downloading threadpoolctl-2.1.0-py3-none-any.whl (12 kB) Collecting click>=5.1 Downloading click-7.1.2-py2.py3-none-any.whl (82 kB)  |████████████████████████████████| 82 kB 5.3 MB/s s eta 0:00:01 [?25hRequirement already satisfied: Werkzeug>=0.15 in /usr/local/lib/python3.6/dist-packages (from Flask<2.0.0->seldon_core) (0.16.1) Collecting itsdangerous>=0.24 Downloading itsdangerous-1.1.0-py2.py3-none-any.whl (16 kB) Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/lib/python3.6/dist-packages (from Flask<2.0.0->seldon_core) (2.11.0) Requirement already satisfied: idna<2.9,>=2.5 in /usr/lib/python3/dist-packages (from requests<3.0.0->seldon_core) (2.6) Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0->seldon_core) (3.0.4) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0->seldon_core) (1.25.8) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0->seldon_core) (2019.11.28) Collecting configparser Downloading configparser-5.0.0-py3-none-any.whl (22 kB) Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from jsonschema<4.0.0->seldon_core) (1.4.0) Requirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.6/dist-packages (from jsonschema<4.0.0->seldon_core) (19.3.0) Requirement already satisfied: pyrsistent>=0.14.0 in /usr/local/lib/python3.6/dist-packages (from jsonschema<4.0.0->seldon_core) (0.15.7) Collecting azure-common>=1.1.5 Downloading azure_common-1.1.25-py2.py3-none-any.whl (12 kB) Collecting azure-storage-common~=2.1 Downloading azure_storage_common-2.1.0-py2.py3-none-any.whl (47 kB)  |████████████████████████████████| 47 kB 11.2 MB/s eta 0:00:01 [?25hCollecting threadloop<2,>=1 Downloading threadloop-1.0.2.tar.gz (4.9 kB) Collecting thrift Downloading thrift-0.13.0.tar.gz (59 kB)  |████████████████████████████████| 59 kB 19.0 MB/s eta 0:00:01 [?25hRequirement already satisfied: tornado<6,>=4.3 in /usr/local/lib/python3.6/dist-packages (from jaeger-client<4.2.0,>=4.1.0->seldon_core) (5.1.1) Collecting soupsieve>1.2 Downloading soupsieve-2.0.1-py3-none-any.whl (32 kB) Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (0.9.0) Requirement already satisfied: keras-applications>=1.0.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (1.0.8) Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (0.1.8) Requirement already satisfied: tensorboard<1.16.0,>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (1.15.0) Requirement already satisfied: gast==0.2.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (0.2.2) Requirement already satisfied: tensorflow-estimator==1.15.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (1.15.1) Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (1.1.0) Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (1.1.0) Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (1.11.2) Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (0.8.1) Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow<2.0->alibi==0.3.2) (3.1.0) Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/lib/python3/dist-packages (from tensorflow<2.0->alibi==0.3.2) (0.30.0) Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->alibi==0.3.2) (3.1.2) Collecting PyWavelets>=1.1.1 Downloading PyWavelets-1.1.1-cp36-cp36m-manylinux1_x86_64.whl (4.4 MB)  |████████████████████████████████| 4.4 MB 121.5 MB/s eta 0:00:01 [?25hCollecting networkx>=2.0 Downloading networkx-2.4-py3-none-any.whl (1.6 MB)  |████████████████████████████████| 1.6 MB 49.3 MB/s eta 0:00:01 [?25hCollecting imageio>=2.3.0 Downloading imageio-2.9.0-py3-none-any.whl (3.3 MB)  |████████████████████████████████| 3.3 MB 94.7 MB/s eta 0:00:01 [?25hCollecting tifffile>=2019.7.26 Downloading tifffile-2020.7.22-py3-none-any.whl (145 kB)  |████████████████████████████████| 145 kB 132.5 MB/s eta 0:00:01 [?25hCollecting catalogue<1.1.0,>=0.0.7 Downloading catalogue-1.0.0-py2.py3-none-any.whl (7.7 kB) Collecting srsly<1.1.0,>=1.0.2 Downloading srsly-1.0.2-cp36-cp36m-manylinux1_x86_64.whl (185 kB)  |████████████████████████████████| 185 kB 68.3 MB/s eta 0:00:01 [?25hCollecting plac<1.2.0,>=0.9.6 Downloading plac-1.1.3-py2.py3-none-any.whl (20 kB) Collecting thinc==7.4.1 Downloading thinc-7.4.1-cp36-cp36m-manylinux1_x86_64.whl (2.1 MB)  |████████████████████████████████| 2.1 MB 55.5 MB/s eta 0:00:01 [?25hCollecting preshed<3.1.0,>=3.0.2 Downloading preshed-3.0.2-cp36-cp36m-manylinux1_x86_64.whl (119 kB)  |████████████████████████████████| 119 kB 36.9 MB/s eta 0:00:01 [?25hCollecting blis<0.5.0,>=0.4.0 Downloading blis-0.4.1-cp36-cp36m-manylinux1_x86_64.whl (3.7 MB)  |████████████████████████████████| 3.7 MB 48.7 MB/s eta 0:00:01 [?25hCollecting cymem<2.1.0,>=2.0.2 Downloading cymem-2.0.3-cp36-cp36m-manylinux1_x86_64.whl (32 kB) Collecting wasabi<1.1.0,>=0.4.0 Downloading wasabi-0.7.1.tar.gz (22 kB) Collecting tqdm<5.0.0,>=4.38.0 Downloading tqdm-4.48.0-py2.py3-none-any.whl (67 kB)  |████████████████████████████████| 67 kB 12.5 MB/s eta 0:00:01 [?25hCollecting murmurhash<1.1.0,>=0.28.0 Downloading murmurhash-1.0.2-cp36-cp36m-manylinux1_x86_64.whl (19 kB) Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from Jinja2>=2.10.1->Flask<2.0.0->seldon_core) (1.1.1) Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->jsonschema<4.0.0->seldon_core) (2.1.0) Requirement already satisfied: cryptography in /usr/lib/python3/dist-packages (from azure-storage-common~=2.1->azure-storage-blob<3.0.0,>=2.0.1->seldon_core) (2.1.4) Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow<2.0->alibi==0.3.2) (2.10.0) Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow<2.0->alibi==0.3.2) (3.1.1) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->alibi==0.3.2) (2.4.6) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->alibi==0.3.2) (0.10.0) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->alibi==0.3.2) (1.1.0) Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.0->scikit-image->alibi==0.3.2) (4.4.1) Building wheels for collected packages: sklearn, dill, Flask-OpenTracing, opentracing, jaeger-client, threadloop, thrift, wasabi Building wheel for sklearn (setup.py) ... [?25ldone [?25h Created wheel for sklearn: filename=sklearn-0.0-py2.py3-none-any.whl size=2397 sha256=9c4b3edf39f181793582f584d48c1ca5a85a2e388006ca3ca493a769b6932fbb Stored in directory: /home/jovyan/.cache/pip/wheels/23/9d/42/5ec745cbbb17517000a53cecc49d6a865450d1f5cb16dc8a9c Building wheel for dill (setup.py) ... [?25ldone [?25h Created wheel for dill: filename=dill-0.3.2-py3-none-any.whl size=81196 sha256=8cbd79c7ddd7d5fe4056acae1344793eaeb8c261c5621821b8f6003708767e47 Stored in directory: /home/jovyan/.cache/pip/wheels/02/49/cf/660924cd9bc5fcddc3a0246fe39800c83028d3ccea244de352 Building wheel for Flask-OpenTracing (setup.py) ... [?25ldone [?25h Created wheel for Flask-OpenTracing: filename=Flask_OpenTracing-1.1.0-py3-none-any.whl size=11453 sha256=8c805121698d33d2d8a5a3039009a7996375a41955c5fdd7036abebf085443c4 Stored in directory: /home/jovyan/.cache/pip/wheels/ad/4b/2d/24ff0da0a0b53c7c77ce59b843bcceaf644c88703241e59615 Building wheel for opentracing (setup.py) ... [?25ldone [?25h Created wheel for opentracing: filename=opentracing-2.2.0-py3-none-any.whl size=49672 sha256=46329cbf55c5a47d098ee96b8dc65b78c6837928ff1d23417578f5480f0feb0b Stored in directory: /home/jovyan/.cache/pip/wheels/39/40/44/8bace79f4514e99786236c31f1df8d1b814ff02c1e08b1d697 Building wheel for jaeger-client (setup.py) ... [?25ldone [?25h Created wheel for jaeger-client: filename=jaeger_client-4.1.0-py3-none-any.whl size=65467 sha256=c938ed0e6c88ee63b32d750aa0b28a830a894bc9911d8d695d3ca3370824dff4 Stored in directory: /home/jovyan/.cache/pip/wheels/e9/9b/8c/503d0cc13b39a551c054515683ba1d15b40324c863dc442e66 Building wheel for threadloop (setup.py) ... [?25ldone [?25h Created wheel for threadloop: filename=threadloop-1.0.2-py3-none-any.whl size=4261 sha256=991c49878e61284d0c1fef5b8406b68effb1e676262985d9b91d0aefda8f4122 Stored in directory: /home/jovyan/.cache/pip/wheels/02/54/65/9f87de48fe8fcaaee30f279973d946ad55f9df56b93b3e78da Building wheel for thrift (setup.py) ... [?25ldone [?25h Created wheel for thrift: filename=thrift-0.13.0-cp36-cp36m-linux_x86_64.whl size=346198 sha256=e48920c920dbf1ef2fdcb6f8a5b769e83b798553e76a6b6933cf722dfc4c282b Stored in directory: /home/jovyan/.cache/pip/wheels/e0/38/fc/472fe18756b177b42096961f8bd3ff2dc5c5620ac399fce52d Building wheel for wasabi (setup.py) ... [?25ldone [?25h Created wheel for wasabi: filename=wasabi-0.7.1-py3-none-any.whl size=26108 sha256=4cd99c88f99f06afb0ceac55f7c0df730c9c2621f634da573ebefb084c19fc0b Stored in directory: /home/jovyan/.cache/pip/wheels/81/48/90/cf81833b3dfce6eaf7eab4bd5fdc0e75dbca4418b263f444b8 Successfully built sklearn dill Flask-OpenTracing opentracing jaeger-client threadloop thrift wasabi Installing collected packages: pandas, joblib, threadpoolctl, scikit-learn, sklearn, click, itsdangerous, Flask, opentracing, Flask-OpenTracing, configparser, minio, grpcio-opentracing, azure-common, azure-storage-common, azure-storage-blob, redis, flatbuffers, threadloop, thrift, jaeger-client, Flask-cors, gunicorn, seldon-core, dill, soupsieve, beautifulsoup4, Pillow, PyWavelets, networkx, imageio, tifffile, scikit-image, catalogue, srsly, plac, cymem, murmurhash, preshed, blis, wasabi, tqdm, thinc, spacy, alibi  WARNING: The script flask is installed in '/home/jovyan/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.  WARNING: The script gunicorn is installed in '/home/jovyan/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.  WARNING: The scripts seldon-batch-processor, seldon-core-api-tester, seldon-core-microservice, seldon-core-microservice-tester and seldon-core-tester are installed in '/home/jovyan/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.  WARNING: The scripts imageio_download_bin and imageio_remove_bin are installed in '/home/jovyan/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.  WARNING: The scripts lsm2bin and tifffile are installed in '/home/jovyan/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.  WARNING: The script skivi is installed in '/home/jovyan/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.  WARNING: The script tqdm is installed in '/home/jovyan/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location. Successfully installed Flask-1.1.2 Flask-OpenTracing-1.1.0 Flask-cors-3.0.8 Pillow-7.2.0 PyWavelets-1.1.1 alibi-0.3.2 azure-common-1.1.25 azure-storage-blob-2.1.0 azure-storage-common-2.1.0 beautifulsoup4-4.9.1 blis-0.4.1 catalogue-1.0.0 click-7.1.2 configparser-5.0.0 cymem-2.0.3 dill-0.3.2 flatbuffers-1.12 grpcio-opentracing-1.1.4 gunicorn-20.0.4 imageio-2.9.0 itsdangerous-1.1.0 jaeger-client-4.1.0 joblib-0.16.0 minio-5.0.10 murmurhash-1.0.2 networkx-2.4 opentracing-2.2.0 pandas-1.0.5 plac-1.1.3 preshed-3.0.2 redis-3.5.3 scikit-image-0.17.2 scikit-learn-0.23.1 seldon-core-1.2.1 sklearn-0.0 soupsieve-2.0.1 spacy-2.3.2 srsly-1.0.2 thinc-7.4.1 threadloop-1.0.2 threadpoolctl-2.1.0 thrift-0.13.0 tifffile-2020.7.22 tqdm-4.48.0 wasabi-0.7.1 WARNING: You are using pip version 20.0.2; however, version 20.1.1 is available. You should consider upgrading via the '/usr/bin/python3 -m pip install --upgrade pip' command.
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Restart Notebook kernel
from IPython.display import display_html display_html("<script>Jupyter.notebook.kernel.restart()</script>",raw=True)
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Import Libraries
from __future__ import division from __future__ import print_function import tensorflow as tf import pandas as pd import numpy as np import shutil import yaml import random import re import os import dill import logging import requests import json from time import sleep from sklearn.preprocessing import OneHotEncoder from alibi.explainers import AnchorTabular from kubernetes import client as k8s_client from kubernetes import config as k8s_config from kubernetes.client.rest import ApiException k8s_config.load_incluster_config() api_client = k8s_client.CoreV1Api() custom_api=k8s_client.CustomObjectsApi()
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Get NamespaceGet current k8s namespace
def is_running_in_k8s(): return os.path.isdir('/var/run/secrets/kubernetes.io/') def get_current_k8s_namespace(): with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f: return f.readline() def get_default_target_namespace(): if not is_running_in_k8s(): return 'default' return get_current_k8s_namespace() namespace = get_default_target_namespace() print(namespace)
anonymous
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Check GPUs availability
gpus = len(tf.config.experimental.list_physical_devices('GPU')) if gpus == 0: print("Model will be trained using CPU") elif gpus >= 0: print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) tf.config.experimental.list_physical_devices('GPU') print("Model will be trained using GPU")
Model will be trained using CPU
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Declare Variables
path="cisco-kubeflow-starter-pack/apps/networking/ble-localization/onprem" BLE_RSSI = pd.read_csv(os.path.join(path, "data/iBeacon_RSSI_Labeled.csv")) #Labeled dataset # Configure model options TF_DATA_DIR = os.getenv("TF_DATA_DIR", "/tmp/data/") TF_MODEL_DIR = os.getenv("TF_MODEL_DIR", "blerssi/") TF_EXPORT_DIR = os.getenv("TF_EXPORT_DIR", "blerssi/") TF_MODEL_TYPE = os.getenv("TF_MODEL_TYPE", "DNN") TF_TRAIN_STEPS = int(os.getenv("TF_TRAIN_STEPS", 5000)) TF_BATCH_SIZE = int(os.getenv("TF_BATCH_SIZE", 128)) TF_LEARNING_RATE = float(os.getenv("TF_LEARNING_RATE", 0.001)) # Feature columns COLUMNS = list(BLE_RSSI.columns) FEATURES = COLUMNS[2:] def make_feature_cols(): input_columns = [tf.feature_column.numeric_column(k) for k in FEATURES] return input_columns
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
BLERSSI Input Dataset Attribute Informationlocation: The location of receiving RSSIs from ibeacons b3001 to b3013; symbolic values showing the column and row of the location on the map (e.g., A01 stands for column A, row 1).date: Datetime in the format of ‘d-m-yyyy hh:mm:ss’b3001 - b3013: RSSI readings corresponding to the iBeacons; numeric, integers only.
BLE_RSSI.head(10)
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Definition of Serving Input Receiver Function
feature_columns = make_feature_cols() inputs = {} for feat in feature_columns: inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype) serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(inputs)
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Train and Save BLE RSSI Model
# Feature columns COLUMNS = list(BLE_RSSI.columns) FEATURES = COLUMNS[2:] LABEL = [COLUMNS[0]] b3001 = tf.feature_column.numeric_column(key='b3001',dtype=tf.float64) b3002 = tf.feature_column.numeric_column(key='b3002',dtype=tf.float64) b3003 = tf.feature_column.numeric_column(key='b3003',dtype=tf.float64) b3004 = tf.feature_column.numeric_column(key='b3004',dtype=tf.float64) b3005 = tf.feature_column.numeric_column(key='b3005',dtype=tf.float64) b3006 = tf.feature_column.numeric_column(key='b3006',dtype=tf.float64) b3007 = tf.feature_column.numeric_column(key='b3007',dtype=tf.float64) b3008 = tf.feature_column.numeric_column(key='b3008',dtype=tf.float64) b3009 = tf.feature_column.numeric_column(key='b3009',dtype=tf.float64) b3010 = tf.feature_column.numeric_column(key='b3010',dtype=tf.float64) b3011 = tf.feature_column.numeric_column(key='b3011',dtype=tf.float64) b3012 = tf.feature_column.numeric_column(key='b3012',dtype=tf.float64) b3013 = tf.feature_column.numeric_column(key='b3013',dtype=tf.float64) feature_columns = [b3001, b3002, b3003, b3004, b3005, b3006, b3007, b3008, b3009, b3010, b3011, b3012, b3013] df_full = pd.read_csv(os.path.join(path, "data/iBeacon_RSSI_Labeled.csv")) #Labeled dataset # Input Data Preprocessing df_full = df_full.drop(['date'],axis = 1) df_full[FEATURES] = (df_full[FEATURES])/(-200) #Output Data Preprocessing dict = {'O02': 0,'P01': 1,'P02': 2,'R01': 3,'R02': 4,'S01': 5,'S02': 6,'T01': 7,'U02': 8,'U01': 9,'J03': 10,'K03': 11,'L03': 12,'M03': 13,'N03': 14,'O03': 15,'P03': 16,'Q03': 17,'R03': 18,'S03': 19,'T03': 20,'U03': 21,'U04': 22,'T04': 23,'S04': 24,'R04': 25,'Q04': 26,'P04': 27,'O04': 28,'N04': 29,'M04': 30,'L04': 31,'K04': 32,'J04': 33,'I04': 34,'I05': 35,'J05': 36,'K05': 37,'L05': 38,'M05': 39,'N05': 40,'O05': 41,'P05': 42,'Q05': 43,'R05': 44,'S05': 45,'T05': 46,'U05': 47,'S06': 48,'R06': 49,'Q06': 50,'P06': 51,'O06': 52,'N06': 53,'M06': 54,'L06': 55,'K06': 56,'J06': 57,'I06': 58,'F08': 59,'J02': 60,'J07': 61,'I07': 62,'I10': 63,'J10': 64,'D15': 65,'E15': 66,'G15': 67,'J15': 68,'L15': 69,'R15': 70,'T15': 71,'W15': 72,'I08': 73,'I03': 74,'J08': 75,'I01': 76,'I02': 77,'J01': 78,'K01': 79,'K02': 80,'L01': 81,'L02': 82,'M01': 83,'M02': 84,'N01': 85,'N02': 86,'O01': 87,'I09': 88,'D14': 89,'D13': 90,'K07': 91,'K08': 92,'N15': 93,'P15': 94,'I15': 95,'S15': 96,'U15': 97,'V15': 98,'S07': 99,'S08': 100,'L09': 101,'L08': 102,'Q02': 103,'Q01': 104} df_full['location'] = df_full['location'].map(dict) df_train=df_full.sample(frac=0.8,random_state=200) df_valid=df_full.drop(df_train.index) location_counts = BLE_RSSI.location.value_counts() x1 = np.asarray(df_train[FEATURES]) y1 = np.asarray(df_train['location']) x2 = np.asarray(df_valid[FEATURES]) y2 = np.asarray(df_valid['location']) def formatFeatures(features): formattedFeatures = {} numColumns = features.shape[1] for i in range(0, numColumns): formattedFeatures["b"+str(3001+i)] = features[:, i] return formattedFeatures trainingFeatures = formatFeatures(x1) trainingCategories = y1 testFeatures = formatFeatures(x2) testCategories = y2 # Train Input Function def train_input_fn(): dataset = tf.data.Dataset.from_tensor_slices((trainingFeatures, y1)) dataset = dataset.repeat(1000).batch(TF_BATCH_SIZE) return dataset # Test Input Function def eval_input_fn(): dataset = tf.data.Dataset.from_tensor_slices((testFeatures, y2)) return dataset.repeat(1000).batch(TF_BATCH_SIZE) # Provide list of GPUs should be used to train the model distribution=tf.distribute.experimental.ParameterServerStrategy() print('Number of devices: {}'.format(distribution.num_replicas_in_sync)) # Configuration of training model config = tf.estimator.RunConfig(train_distribute=distribution, model_dir=TF_MODEL_DIR, save_summary_steps=100, save_checkpoints_steps=100) # Build 3 layer DNN classifier model = tf.estimator.DNNClassifier(hidden_units = [13,65,110], feature_columns = feature_columns, model_dir = TF_MODEL_DIR, n_classes=105, config=config ) export_final = tf.estimator.FinalExporter(TF_EXPORT_DIR, serving_input_receiver_fn=serving_input_receiver_fn) train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=TF_TRAIN_STEPS) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=100, exporters=export_final, throttle_secs=1, start_delay_secs=1) # Train and Evaluate the model tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
INFO:tensorflow:ParameterServerStrategy with compute_devices = ('/device:CPU:0',), variable_device = '/device:CPU:0' Number of devices: 1 INFO:tensorflow:Initializing RunConfig with distribution strategies. INFO:tensorflow:Not using Distribute Coordinator. INFO:tensorflow:Using config: {'_model_dir': 'blerssi/', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': <tensorflow.python.distribute.parameter_server_strategy.ParameterServerStrategyV1 object at 0x7f93ec2959b0>, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f93ec295c50>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1, '_distribute_coordinator_mode': None} INFO:tensorflow:Not using Distribute Coordinator. INFO:tensorflow:Running training and evaluation locally (non-distributed). INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 100 or save_checkpoints_secs None. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. Instructions for updating: If using Keras pass *_constraint arguments to layers. INFO:tensorflow:Calling model_fn. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_estimator/python/estimator/canned/head.py:437: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.cast` instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/adagrad.py:76: calling Constant.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version. Instructions for updating: Call initializer instance with the dtype argument instead of passing it to the constructor INFO:tensorflow:Done calling model_fn. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/array_ops.py:1475: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Graph was finalized. INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Saving checkpoints for 0 into blerssi/model.ckpt. INFO:tensorflow:loss = 594.5514, step = 0 INFO:tensorflow:Saving checkpoints for 100 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:16Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-100 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:16 INFO:tensorflow:Saving dict for global step 100: accuracy = 0.1478125, average_loss = 3.0438955, global_step = 100, loss = 389.61862 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 100: blerssi/model.ckpt-100 INFO:tensorflow:global_step/sec: 74.5088 INFO:tensorflow:loss = 371.56686, step = 100 (1.342 sec) INFO:tensorflow:Saving checkpoints for 200 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:17Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-200 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:17 INFO:tensorflow:Saving dict for global step 200: accuracy = 0.16195312, average_loss = 2.8027809, global_step = 200, loss = 358.75595 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 200: blerssi/model.ckpt-200 INFO:tensorflow:global_step/sec: 79.6341 INFO:tensorflow:loss = 342.82297, step = 200 (1.257 sec) INFO:tensorflow:Saving checkpoints for 300 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:18Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-300 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:19 INFO:tensorflow:Saving dict for global step 300: accuracy = 0.14773437, average_loss = 2.9204443, global_step = 300, loss = 373.81686 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 300: blerssi/model.ckpt-300 INFO:tensorflow:global_step/sec: 82.1163 INFO:tensorflow:loss = 350.08023, step = 300 (1.217 sec) INFO:tensorflow:Saving checkpoints for 400 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:19Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-400 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:20 INFO:tensorflow:Saving dict for global step 400: accuracy = 0.17234375, average_loss = 2.7928438, global_step = 400, loss = 357.484 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 400: blerssi/model.ckpt-400 INFO:tensorflow:global_step/sec: 81.5196 INFO:tensorflow:loss = 330.38446, step = 400 (1.226 sec) INFO:tensorflow:Saving checkpoints for 500 into blerssi/model.ckpt. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/saver.py:963: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version. Instructions for updating: Use standard file APIs to delete files with this prefix. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:20Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-500 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:21 INFO:tensorflow:Saving dict for global step 500: accuracy = 0.16554688, average_loss = 2.8326373, global_step = 500, loss = 362.57758 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 500: blerssi/model.ckpt-500 INFO:tensorflow:global_step/sec: 72.9833 INFO:tensorflow:loss = 309.4389, step = 500 (1.370 sec) INFO:tensorflow:Saving checkpoints for 600 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:22Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-600 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:22 INFO:tensorflow:Saving dict for global step 600: accuracy = 0.16882813, average_loss = 2.8005483, global_step = 600, loss = 358.47018 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 600: blerssi/model.ckpt-600 INFO:tensorflow:global_step/sec: 82.3567 INFO:tensorflow:loss = 317.98203, step = 600 (1.214 sec) INFO:tensorflow:Saving checkpoints for 700 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:23Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-700 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:24 INFO:tensorflow:Saving dict for global step 700: accuracy = 0.18289062, average_loss = 2.8308408, global_step = 700, loss = 362.34763 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 700: blerssi/model.ckpt-700 INFO:tensorflow:global_step/sec: 81.834 INFO:tensorflow:loss = 326.9156, step = 700 (1.222 sec) INFO:tensorflow:Saving checkpoints for 800 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:24Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-800 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:25 INFO:tensorflow:Saving dict for global step 800: accuracy = 0.15484375, average_loss = 2.9795644, global_step = 800, loss = 381.38425 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 800: blerssi/model.ckpt-800 INFO:tensorflow:global_step/sec: 82.8517 INFO:tensorflow:loss = 326.4861, step = 800 (1.206 sec) INFO:tensorflow:Saving checkpoints for 900 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:25Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-900 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:26 INFO:tensorflow:Saving dict for global step 900: accuracy = 0.19359376, average_loss = 2.8420234, global_step = 900, loss = 363.779 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 900: blerssi/model.ckpt-900 INFO:tensorflow:global_step/sec: 81.6693 INFO:tensorflow:loss = 288.16187, step = 900 (1.227 sec) INFO:tensorflow:Saving checkpoints for 1000 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:27Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1000 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:27 INFO:tensorflow:Saving dict for global step 1000: accuracy = 0.18296875, average_loss = 2.8350174, global_step = 1000, loss = 362.88223 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1000: blerssi/model.ckpt-1000 INFO:tensorflow:global_step/sec: 80.4219 INFO:tensorflow:loss = 305.5712, step = 1000 (1.243 sec) INFO:tensorflow:Saving checkpoints for 1100 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:28Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1100 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:29 INFO:tensorflow:Saving dict for global step 1100: accuracy = 0.18640625, average_loss = 2.840476, global_step = 1100, loss = 363.58093 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1100: blerssi/model.ckpt-1100 INFO:tensorflow:global_step/sec: 83.3038 INFO:tensorflow:loss = 309.84857, step = 1100 (1.199 sec) INFO:tensorflow:Saving checkpoints for 1200 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:29Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1200 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:30 INFO:tensorflow:Saving dict for global step 1200: accuracy = 0.20078126, average_loss = 2.8759577, global_step = 1200, loss = 368.1226 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1200: blerssi/model.ckpt-1200 INFO:tensorflow:global_step/sec: 80.6584 INFO:tensorflow:loss = 304.45615, step = 1200 (1.241 sec) INFO:tensorflow:Saving checkpoints for 1300 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:30Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1300 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:31 INFO:tensorflow:Saving dict for global step 1300: accuracy = 0.19710937, average_loss = 2.8712735, global_step = 1300, loss = 367.523 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1300: blerssi/model.ckpt-1300 INFO:tensorflow:global_step/sec: 68.2263 INFO:tensorflow:loss = 305.92072, step = 1300 (1.466 sec) INFO:tensorflow:Saving checkpoints for 1400 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:32Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1400 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:32 INFO:tensorflow:Saving dict for global step 1400: accuracy = 0.15476562, average_loss = 2.8461099, global_step = 1400, loss = 364.30206 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1400: blerssi/model.ckpt-1400 INFO:tensorflow:global_step/sec: 83.3139 INFO:tensorflow:loss = 295.6084, step = 1400 (1.200 sec) INFO:tensorflow:Saving checkpoints for 1500 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:33Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1500 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:34 INFO:tensorflow:Saving dict for global step 1500: accuracy = 0.21453124, average_loss = 2.8982835, global_step = 1500, loss = 370.9803 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1500: blerssi/model.ckpt-1500 INFO:tensorflow:global_step/sec: 81.5076 INFO:tensorflow:loss = 307.38174, step = 1500 (1.227 sec) INFO:tensorflow:Saving checkpoints for 1600 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:34Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1600 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:35 INFO:tensorflow:Saving dict for global step 1600: accuracy = 0.200625, average_loss = 2.9850085, global_step = 1600, loss = 382.0811 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1600: blerssi/model.ckpt-1600 INFO:tensorflow:global_step/sec: 80.8971 INFO:tensorflow:loss = 290.9291, step = 1600 (1.236 sec) INFO:tensorflow:Saving checkpoints for 1700 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:36Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1700 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:36 INFO:tensorflow:Saving dict for global step 1700: accuracy = 0.19359376, average_loss = 2.8472593, global_step = 1700, loss = 364.4492 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1700: blerssi/model.ckpt-1700 INFO:tensorflow:global_step/sec: 77.9863 INFO:tensorflow:loss = 302.74707, step = 1700 (1.282 sec) INFO:tensorflow:Saving checkpoints for 1800 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:37Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1800 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:37 INFO:tensorflow:Saving dict for global step 1800: accuracy = 0.18640625, average_loss = 2.8873806, global_step = 1800, loss = 369.58472 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1800: blerssi/model.ckpt-1800 INFO:tensorflow:global_step/sec: 81.522 INFO:tensorflow:loss = 311.20178, step = 1800 (1.227 sec) INFO:tensorflow:Saving checkpoints for 1900 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:38Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-1900 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:39 INFO:tensorflow:Saving dict for global step 1900: accuracy = 0.1934375, average_loss = 2.8413296, global_step = 1900, loss = 363.6902 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 1900: blerssi/model.ckpt-1900 INFO:tensorflow:global_step/sec: 82.8489 INFO:tensorflow:loss = 294.47797, step = 1900 (1.206 sec) INFO:tensorflow:Saving checkpoints for 2000 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:39Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2000 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:40 INFO:tensorflow:Saving dict for global step 2000: accuracy = 0.21117188, average_loss = 2.896705, global_step = 2000, loss = 370.77823 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2000: blerssi/model.ckpt-2000 INFO:tensorflow:global_step/sec: 81.545 INFO:tensorflow:loss = 300.31937, step = 2000 (1.226 sec) INFO:tensorflow:Saving checkpoints for 2100 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:40Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2100 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:41 INFO:tensorflow:Saving dict for global step 2100: accuracy = 0.21125, average_loss = 2.9106176, global_step = 2100, loss = 372.55905 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2100: blerssi/model.ckpt-2100 INFO:tensorflow:global_step/sec: 72.1539 INFO:tensorflow:loss = 285.34515, step = 2100 (1.387 sec) INFO:tensorflow:Saving checkpoints for 2200 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:42Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2200 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:42 INFO:tensorflow:Saving dict for global step 2200: accuracy = 0.17585938, average_loss = 2.9142356, global_step = 2200, loss = 373.02216 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2200: blerssi/model.ckpt-2200 INFO:tensorflow:global_step/sec: 82.1222 INFO:tensorflow:loss = 301.6997, step = 2200 (1.217 sec) INFO:tensorflow:Saving checkpoints for 2300 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:43Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2300 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:44 INFO:tensorflow:Saving dict for global step 2300: accuracy = 0.218125, average_loss = 2.878163, global_step = 2300, loss = 368.40488 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2300: blerssi/model.ckpt-2300 INFO:tensorflow:global_step/sec: 82.9431 INFO:tensorflow:loss = 308.0114, step = 2300 (1.205 sec) INFO:tensorflow:Saving checkpoints for 2400 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:44Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2400 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:45 INFO:tensorflow:Saving dict for global step 2400: accuracy = 0.21820313, average_loss = 2.900616, global_step = 2400, loss = 371.27884 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2400: blerssi/model.ckpt-2400 INFO:tensorflow:global_step/sec: 83.301 INFO:tensorflow:loss = 288.26395, step = 2400 (1.201 sec) INFO:tensorflow:Saving checkpoints for 2500 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:45Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2500 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:46 INFO:tensorflow:Saving dict for global step 2500: accuracy = 0.20414062, average_loss = 3.027789, global_step = 2500, loss = 387.557 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2500: blerssi/model.ckpt-2500 INFO:tensorflow:global_step/sec: 82.5175 INFO:tensorflow:loss = 289.87027, step = 2500 (1.212 sec) INFO:tensorflow:Saving checkpoints for 2600 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:47Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2600 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:47 INFO:tensorflow:Saving dict for global step 2600: accuracy = 0.19703124, average_loss = 2.8862774, global_step = 2600, loss = 369.4435 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2600: blerssi/model.ckpt-2600 INFO:tensorflow:global_step/sec: 85.1553 INFO:tensorflow:loss = 304.54187, step = 2600 (1.175 sec) INFO:tensorflow:Saving checkpoints for 2700 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:48Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2700 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:48 INFO:tensorflow:Saving dict for global step 2700: accuracy = 0.22179687, average_loss = 2.8361683, global_step = 2700, loss = 363.02954 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2700: blerssi/model.ckpt-2700 INFO:tensorflow:global_step/sec: 82.4599 INFO:tensorflow:loss = 286.2304, step = 2700 (1.212 sec) INFO:tensorflow:Saving checkpoints for 2800 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:49Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2800 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:50 INFO:tensorflow:Saving dict for global step 2800: accuracy = 0.22179687, average_loss = 2.822359, global_step = 2800, loss = 361.26196 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2800: blerssi/model.ckpt-2800 INFO:tensorflow:global_step/sec: 82.122 INFO:tensorflow:loss = 292.93854, step = 2800 (1.218 sec) INFO:tensorflow:Saving checkpoints for 2900 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:51Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-2900 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:51 INFO:tensorflow:Saving dict for global step 2900: accuracy = 0.2075, average_loss = 2.9061038, global_step = 2900, loss = 371.9813 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2900: blerssi/model.ckpt-2900 INFO:tensorflow:global_step/sec: 70.3029 INFO:tensorflow:loss = 291.2099, step = 2900 (1.422 sec) INFO:tensorflow:Saving checkpoints for 3000 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:52Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3000 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:52 INFO:tensorflow:Saving dict for global step 3000: accuracy = 0.20398438, average_loss = 2.9259422, global_step = 3000, loss = 374.5206 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3000: blerssi/model.ckpt-3000 INFO:tensorflow:global_step/sec: 80.3511 INFO:tensorflow:loss = 291.8711, step = 3000 (1.244 sec) INFO:tensorflow:Saving checkpoints for 3100 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:53Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3100 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:54 INFO:tensorflow:Saving dict for global step 3100: accuracy = 0.22523437, average_loss = 2.8671799, global_step = 3100, loss = 366.99902 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3100: blerssi/model.ckpt-3100 INFO:tensorflow:global_step/sec: 82.5228 INFO:tensorflow:loss = 270.925, step = 3100 (1.212 sec) INFO:tensorflow:Saving checkpoints for 3200 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:54Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3200 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:55 INFO:tensorflow:Saving dict for global step 3200: accuracy = 0.22523437, average_loss = 2.894812, global_step = 3200, loss = 370.53595 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3200: blerssi/model.ckpt-3200 INFO:tensorflow:global_step/sec: 79.6329 INFO:tensorflow:loss = 294.95923, step = 3200 (1.256 sec) INFO:tensorflow:Saving checkpoints for 3300 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:55Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3300 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:56 INFO:tensorflow:Saving dict for global step 3300: accuracy = 0.2215625, average_loss = 2.9023647, global_step = 3300, loss = 371.5027 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3300: blerssi/model.ckpt-3300 INFO:tensorflow:global_step/sec: 82.8779 INFO:tensorflow:loss = 299.6723, step = 3300 (1.207 sec) INFO:tensorflow:Saving checkpoints for 3400 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:57Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3400 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:57 INFO:tensorflow:Saving dict for global step 3400: accuracy = 0.2075, average_loss = 2.8652325, global_step = 3400, loss = 366.74976 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3400: blerssi/model.ckpt-3400 INFO:tensorflow:global_step/sec: 85.4935 INFO:tensorflow:loss = 278.42737, step = 3400 (1.170 sec) INFO:tensorflow:Saving checkpoints for 3500 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:58Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3500 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:04:58 INFO:tensorflow:Saving dict for global step 3500: accuracy = 0.23226562, average_loss = 2.896808, global_step = 3500, loss = 370.7914 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3500: blerssi/model.ckpt-3500 INFO:tensorflow:global_step/sec: 83.4493 INFO:tensorflow:loss = 278.02283, step = 3500 (1.198 sec) INFO:tensorflow:Saving checkpoints for 3600 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:04:59Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3600 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:00 INFO:tensorflow:Saving dict for global step 3600: accuracy = 0.21460937, average_loss = 2.924043, global_step = 3600, loss = 374.2775 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3600: blerssi/model.ckpt-3600 INFO:tensorflow:global_step/sec: 81.4535 INFO:tensorflow:loss = 279.70343, step = 3600 (1.227 sec) INFO:tensorflow:Saving checkpoints for 3700 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:00Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3700 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:01 INFO:tensorflow:Saving dict for global step 3700: accuracy = 0.1934375, average_loss = 2.9265563, global_step = 3700, loss = 374.5992 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3700: blerssi/model.ckpt-3700 INFO:tensorflow:global_step/sec: 69.8044 INFO:tensorflow:loss = 289.2801, step = 3700 (1.432 sec) INFO:tensorflow:Saving checkpoints for 3800 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:02Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3800 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:02 INFO:tensorflow:Saving dict for global step 3800: accuracy = 0.2146875, average_loss = 2.8516412, global_step = 3800, loss = 365.01007 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3800: blerssi/model.ckpt-3800 INFO:tensorflow:global_step/sec: 86.0293 INFO:tensorflow:loss = 288.4405, step = 3800 (1.162 sec) INFO:tensorflow:Saving checkpoints for 3900 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:03Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-3900 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:03 INFO:tensorflow:Saving dict for global step 3900: accuracy = 0.23226562, average_loss = 2.8733413, global_step = 3900, loss = 367.7877 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 3900: blerssi/model.ckpt-3900 INFO:tensorflow:global_step/sec: 81.837 INFO:tensorflow:loss = 274.29977, step = 3900 (1.225 sec) INFO:tensorflow:Saving checkpoints for 4000 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:04Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4000 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:05 INFO:tensorflow:Saving dict for global step 4000: accuracy = 0.23929687, average_loss = 2.8829916, global_step = 4000, loss = 369.02292 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4000: blerssi/model.ckpt-4000 INFO:tensorflow:global_step/sec: 79.7979 INFO:tensorflow:loss = 280.6007, step = 4000 (1.251 sec) INFO:tensorflow:Saving checkpoints for 4100 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:05Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4100 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:06 INFO:tensorflow:Saving dict for global step 4100: accuracy = 0.20398438, average_loss = 2.924492, global_step = 4100, loss = 374.33496 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4100: blerssi/model.ckpt-4100 INFO:tensorflow:global_step/sec: 82.9037 INFO:tensorflow:loss = 292.06015, step = 4100 (1.207 sec) INFO:tensorflow:Saving checkpoints for 4200 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:07Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4200 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:07 INFO:tensorflow:Saving dict for global step 4200: accuracy = 0.23578125, average_loss = 2.846016, global_step = 4200, loss = 364.29004 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4200: blerssi/model.ckpt-4200 INFO:tensorflow:global_step/sec: 83.7255 INFO:tensorflow:loss = 272.29013, step = 4200 (1.194 sec) INFO:tensorflow:Saving checkpoints for 4300 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:08Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4300 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:08 INFO:tensorflow:Saving dict for global step 4300: accuracy = 0.239375, average_loss = 2.8495471, global_step = 4300, loss = 364.74203 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4300: blerssi/model.ckpt-4300 INFO:tensorflow:global_step/sec: 83.2944 INFO:tensorflow:loss = 300.6521, step = 4300 (1.200 sec) INFO:tensorflow:Saving checkpoints for 4400 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:09Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4400 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:10 INFO:tensorflow:Saving dict for global step 4400: accuracy = 0.22507812, average_loss = 2.8738248, global_step = 4400, loss = 367.84958 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4400: blerssi/model.ckpt-4400 INFO:tensorflow:global_step/sec: 82.7437 INFO:tensorflow:loss = 297.72165, step = 4400 (1.209 sec) INFO:tensorflow:Saving checkpoints for 4500 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:10Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4500 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:11 INFO:tensorflow:Saving dict for global step 4500: accuracy = 0.21453124, average_loss = 3.0025408, global_step = 4500, loss = 384.32523 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4500: blerssi/model.ckpt-4500 INFO:tensorflow:global_step/sec: 67.978 INFO:tensorflow:loss = 287.15585, step = 4500 (1.471 sec) INFO:tensorflow:Saving checkpoints for 4600 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:12Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4600 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:12 INFO:tensorflow:Saving dict for global step 4600: accuracy = 0.23585938, average_loss = 2.8682337, global_step = 4600, loss = 367.1339 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4600: blerssi/model.ckpt-4600 INFO:tensorflow:global_step/sec: 84.4059 INFO:tensorflow:loss = 273.37143, step = 4600 (1.185 sec) INFO:tensorflow:Saving checkpoints for 4700 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:13Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4700 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:13 INFO:tensorflow:Saving dict for global step 4700: accuracy = 0.26742187, average_loss = 2.9070816, global_step = 4700, loss = 372.10645 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4700: blerssi/model.ckpt-4700 INFO:tensorflow:global_step/sec: 82.6464 INFO:tensorflow:loss = 280.7273, step = 4700 (1.210 sec) INFO:tensorflow:Saving checkpoints for 4800 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:14Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4800 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:15 INFO:tensorflow:Saving dict for global step 4800: accuracy = 0.22867188, average_loss = 2.9323123, global_step = 4800, loss = 375.33597 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4800: blerssi/model.ckpt-4800 INFO:tensorflow:global_step/sec: 84.4072 INFO:tensorflow:loss = 282.58746, step = 4800 (1.185 sec) INFO:tensorflow:Saving checkpoints for 4900 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:15Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-4900 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:16 INFO:tensorflow:Saving dict for global step 4900: accuracy = 0.22859375, average_loss = 2.9506714, global_step = 4900, loss = 377.68594 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 4900: blerssi/model.ckpt-4900 INFO:tensorflow:global_step/sec: 83.8655 INFO:tensorflow:loss = 276.4771, step = 4900 (1.192 sec) INFO:tensorflow:Saving checkpoints for 5000 into blerssi/model.ckpt. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2020-07-27T12:05:16Z INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-5000 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Evaluation [10/100] INFO:tensorflow:Evaluation [20/100] INFO:tensorflow:Evaluation [30/100] INFO:tensorflow:Evaluation [40/100] INFO:tensorflow:Evaluation [50/100] INFO:tensorflow:Evaluation [60/100] INFO:tensorflow:Evaluation [70/100] INFO:tensorflow:Evaluation [80/100] INFO:tensorflow:Evaluation [90/100] INFO:tensorflow:Evaluation [100/100] INFO:tensorflow:Finished evaluation at 2020-07-27-12:05:17 INFO:tensorflow:Saving dict for global step 5000: accuracy = 0.24632813, average_loss = 2.9123015, global_step = 5000, loss = 372.7746 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 5000: blerssi/model.ckpt-5000 INFO:tensorflow:Performing the final export in the end of training. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/saved_model/signature_def_utils_impl.py:201: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version. Instructions for updating: This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info. INFO:tensorflow:Signatures INCLUDED in export for Classify: None INFO:tensorflow:Signatures INCLUDED in export for Regress: None INFO:tensorflow:Signatures INCLUDED in export for Predict: ['predict'] INFO:tensorflow:Signatures INCLUDED in export for Train: None INFO:tensorflow:Signatures INCLUDED in export for Eval: None INFO:tensorflow:Signatures EXCLUDED from export because they cannot be be served via TensorFlow Serving APIs: INFO:tensorflow:'serving_default' : Classification input must be a single string Tensor; got {'b3001': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=float32>, 'b3002': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=float32>, 'b3003': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=float32>, 'b3004': <tf.Tensor 'Placeholder_3:0' shape=(?,) dtype=float32>, 'b3005': <tf.Tensor 'Placeholder_4:0' shape=(?,) dtype=float32>, 'b3006': <tf.Tensor 'Placeholder_5:0' shape=(?,) dtype=float32>, 'b3007': <tf.Tensor 'Placeholder_6:0' shape=(?,) dtype=float32>, 'b3008': <tf.Tensor 'Placeholder_7:0' shape=(?,) dtype=float32>, 'b3009': <tf.Tensor 'Placeholder_8:0' shape=(?,) dtype=float32>, 'b3010': <tf.Tensor 'Placeholder_9:0' shape=(?,) dtype=float32>, 'b3011': <tf.Tensor 'Placeholder_10:0' shape=(?,) dtype=float32>, 'b3012': <tf.Tensor 'Placeholder_11:0' shape=(?,) dtype=float32>, 'b3013': <tf.Tensor 'Placeholder_12:0' shape=(?,) dtype=float32>} INFO:tensorflow:'classification' : Classification input must be a single string Tensor; got {'b3001': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=float32>, 'b3002': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=float32>, 'b3003': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=float32>, 'b3004': <tf.Tensor 'Placeholder_3:0' shape=(?,) dtype=float32>, 'b3005': <tf.Tensor 'Placeholder_4:0' shape=(?,) dtype=float32>, 'b3006': <tf.Tensor 'Placeholder_5:0' shape=(?,) dtype=float32>, 'b3007': <tf.Tensor 'Placeholder_6:0' shape=(?,) dtype=float32>, 'b3008': <tf.Tensor 'Placeholder_7:0' shape=(?,) dtype=float32>, 'b3009': <tf.Tensor 'Placeholder_8:0' shape=(?,) dtype=float32>, 'b3010': <tf.Tensor 'Placeholder_9:0' shape=(?,) dtype=float32>, 'b3011': <tf.Tensor 'Placeholder_10:0' shape=(?,) dtype=float32>, 'b3012': <tf.Tensor 'Placeholder_11:0' shape=(?,) dtype=float32>, 'b3013': <tf.Tensor 'Placeholder_12:0' shape=(?,) dtype=float32>} WARNING:tensorflow:Export includes no default signature! INFO:tensorflow:Restoring parameters from blerssi/model.ckpt-5000 INFO:tensorflow:Assets added to graph. INFO:tensorflow:No assets to write. INFO:tensorflow:SavedModel written to: blerssi/export/blerssi/temp-b'1595851517'/saved_model.pb INFO:tensorflow:Loss for final step: 260.41766.
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Define predict function
MODEL_EXPORT_PATH= os.path.join(TF_MODEL_DIR, "export", TF_EXPORT_DIR) def predict(request): """ Define custom predict function to be used by local prediction and explainer. Set anchor_tabular predict function so it always returns predicted class """ # Get model exporter path for dir in os.listdir(MODEL_EXPORT_PATH): if re.match('[0-9]',dir): exported_path=os.path.join(MODEL_EXPORT_PATH,dir) break else: raise Exception("Model path not found") # Prepare model input data feature_cols=["b3001", "b3002","b3003","b3004","b3005","b3006","b3007","b3008","b3009","b3010","b3011","b3012","b3013"] input={'b3001': [], 'b3002': [], 'b3003': [], 'b3004': [], 'b3005': [], 'b3006': [], 'b3007': [], 'b3008': [], 'b3009': [], 'b3010': [], 'b3011': [], 'b3012': [], 'b3013': []} X=request if np.ndim(X) != 2: for i in range(len(X)): input[feature_cols[i]].append(X[i]) else: for i in range(len(X)): for j in range(len(X[i])): input[feature_cols[j]].append(X[i][j]) # Open a Session to predict with tf.Session() as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], exported_path) predictor= tf.contrib.predictor.from_saved_model(exported_path,signature_def_key='predict') output_dict= predictor(input) sess.close() output={} output["predictions"]={"probabilities":output_dict["probabilities"].tolist()} return np.asarray(output['predictions']["probabilities"])
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Initialize and fitTo initialize the explainer, we provide a predict function, a list with the feature names to make the anchors easy to understand.
feature_cols=["b3001", "b3002", "b3003", "b3004", "b3005", "b3006", "b3007", "b3008", "b3009", "b3010", "b3011", "b3012", "b3013"] explainer = AnchorTabular(predict, feature_cols)
WARNING:tensorflow:From <ipython-input-8-69054218b064>:31: load (from tensorflow.python.saved_model.loader_impl) is deprecated and will be removed in a future version. Instructions for updating: This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.loader.load or tf.compat.v1.saved_model.load. There will be a new function for importing SavedModels in Tensorflow 2.0. INFO:tensorflow:Restoring parameters from blerssi/export/blerssi/1595851517/variables/variables WARNING:tensorflow: The TensorFlow contrib module will not be included in TensorFlow 2.0. For more information, please see: * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md * https://github.com/tensorflow/addons * https://github.com/tensorflow/io (for I/O related ops) If you depend on functionality not listed there, please file an issue. INFO:tensorflow:Restoring parameters from blerssi/export/blerssi/1595851517/variables/variables
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Discretize the ordinal features into quartiles. disc_perc is a list with percentiles used for binning
explainer.fit(x1, disc_perc=(25, 50, 75))
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Save Explainer fileSave explainer file with .dill extension. It will be used when creating the InferenceService
EXPLAINER_PATH="explainer" if not os.path.exists(EXPLAINER_PATH): os.mkdir(EXPLAINER_PATH) with open("%s/explainer.dill"%EXPLAINER_PATH, 'wb') as f: dill.dump(explainer,f)
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Create a gatewayCreate a gateway called kubeflow-gateway in namespace anonymous.
gateway=f"""apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: kubeflow-gateway namespace: {namespace} spec: selector: istio: ingressgateway servers: - hosts: - '*' port: name: http number: 80 protocol: HTTP """ gateway_spec=yaml.safe_load(gateway) custom_api.create_namespaced_custom_object(group="networking.istio.io", version="v1alpha3", namespace=namespace, plural="gateways", body=gateway_spec)
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Adding a new inference server The list of available inference servers in Seldon Core is maintained in the **seldon-config** configmap, which lives in the same namespace as your Seldon Core operator. In particular, the **predictor_servers** key holds the JSON config for each inference server.[Refer to for more information](https://docs.seldon.io/projects/seldon-core/en/v1.1.0/servers/custom.html)
api_client.patch_namespaced_config_map(name="seldon-config", namespace="kubeflow",pretty=True, body={"data":{"predictor_servers":'{"MLFLOW_SERVER":{"grpc":{"defaultImageVersion":"1.2.1","image":"seldonio/mlflowserver_grpc"},"rest":{"defaultImageVersion":"1.2.1","image":"seldonio/mlflowserver_rest"}},"SKLEARN_SERVER":{"grpc":{"defaultImageVersion":"1.2.1","image":"seldonio/sklearnserver_grpc"},"rest":{"defaultImageVersion":"1.2.1","image":"seldonio/sklearnserver_rest"}},"TENSORFLOW_SERVER":{"grpc":{"defaultImageVersion":"1.2.1","image":"seldonio/tfserving-proxy_grpc"},"rest":{"defaultImageVersion":"1.2.1","image":"seldonio/tfserving-proxy_rest"},"tensorflow":true,"tfImage":"tensorflow/serving:2.1.0"},"XGBOOST_SERVER":{"grpc":{"defaultImageVersion":"1.2.1","image":"seldonio/xgboostserver_grpc"},"rest":{"defaultImageVersion":"1.2.1","image":"seldonio/xgboostserver_rest"}}, "CUSTOM_INFERENCE_SERVER":{"rest":{"defaultImageVersion":"1.0","image":"samba07/blerssi-seldon"}}}'}})
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Seldon Serving DeploymentCreate an **SeldonDeployment** with a blerssi model
pvcname = !(echo $HOSTNAME | sed 's/.\{2\}$//') pvc = "workspace-"+pvcname[0] seldon_deploy=f"""apiVersion: machinelearning.seldon.io/v1alpha2 kind: SeldonDeployment metadata: name: blerssi namespace: {namespace} spec: name: blerssi predictors: - graph: children: [] implementation: CUSTOM_INFERENCE_SERVER modelUri: pvc://{pvc}/{MODEL_EXPORT_PATH} name: blerssi explainer: containerSpec: image: seldonio/alibiexplainer:1.2.2-dev name: explainer type: AnchorTabular modelUri: pvc://{pvc}/{EXPLAINER_PATH} name: default replicas: 1 """ seldon_deploy_spec=yaml.safe_load(seldon_deploy) custom_api.create_namespaced_custom_object(group="machinelearning.seldon.io", version="v1alpha2", namespace=namespace, plural="seldondeployments", body=seldon_deploy_spec)
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Wait for state to become available
status=False while True: seldon_status=custom_api.get_namespaced_custom_object_status(group="machinelearning.seldon.io", version="v1alpha2", namespace=namespace, plural="seldondeployments", name=seldon_deploy_spec["metadata"]["name"]) if seldon_status["status"]["state"] == "Available": status=True print("Status: %s"%seldon_status["status"]["state"]) if status: break print("Status: %s"%seldon_status["status"]["state"]) sleep(30)
Status: Creating Status: Creating Status: Available
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Run a Prediction
CLUSTER='ucs' #where your cluster running 'gcp' or 'ucs' %%bash -s "$CLUSTER" --out NODE_IP if [ $1 = "ucs" ] then echo "$(kubectl get node -o=jsonpath='{.items[0].status.addresses[0].address}')" else echo "$(kubectl get node -o=jsonpath='{.items[0].status.addresses[1].address}')" fi %%bash --out INGRESS_PORT INGRESS_GATEWAY="istio-ingressgateway" echo "$(kubectl -n istio-system get service $INGRESS_GATEWAY -o jsonpath='{.spec.ports[1].nodePort}')"
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Data for prediction
df_full = pd.read_csv(os.path.join(path,'data/iBeacon_RSSI_Unlabeled_truncated.csv')) #Labeled dataset # Input Data Preprocessing df_full = df_full.drop(['date'],axis = 1) df_full = df_full.drop(['location'],axis = 1) df_full[FEATURES] = (df_full[FEATURES])/(-200) input_data=df_full.to_numpy()[:1] input_data headers={"Content-Type": "application/json"} def inference_predict(X): data={"data":{"ndarray":X.tolist()}} url = f"http://{NODE_IP.strip()}:{INGRESS_PORT.strip()}/seldon/{namespace}/%s/api/v1.0/predictions"%seldon_deploy_spec["metadata"]["name"] response=requests.post(url, data=json.dumps(data), headers=headers) probabilities=response.json()['data']['ndarray'] for prob in probabilities: cls_id=np.argmax(prob) print("Probability: %s"%prob[cls_id]) print("Class-id: %s"%cls_id) def explain(X): if np.ndim(X)==2: data={"data":{"ndarray":X.tolist()}} else: data={"data":{"ndarray":[X.tolist()]}} url = f"http://{NODE_IP.strip()}:{INGRESS_PORT.strip()}/seldon/{namespace}/%s-explainer/default/api/v1.0/explain"%seldon_deploy_spec["metadata"]["name"] response=requests.post(url, data=json.dumps(data), headers=headers) print('Anchor: %s' % (' AND '.join(response.json()['names']))) print('Coverage: %.2f' % response.json()['coverage']) inference_predict(input_data)
Probability: 0.6692667603492737 Class-id: 14
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Prediction of the model and explain
explain(input_data)
Anchor: b3009 <= 1.00 AND 0.40 < b3004 <= 1.00 AND 0.39 < b3002 <= 1.00 AND b3012 <= 1.00 AND b3011 <= 1.00 AND b3013 <= 1.00 AND b3006 <= 1.00 AND b3003 <= 1.00 AND b3010 <= 1.00 AND b3005 <= 1.00 AND b3001 <= 1.00 AND b3007 <= 1.00 AND b3008 <= 1.00 Coverage: 0.48
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Clean Up Delete a gateway
custom_api.delete_namespaced_custom_object(group="networking.istio.io", version="v1alpha3", namespace=namespace, plural="gateways", name=gateway_spec["metadata"]["name"],body=k8s_client.V1DeleteOptions())
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Delete Seldon Serving Deployment
custom_api.delete_namespaced_custom_object(group="machinelearning.seldon.io", version="v1alpha2", namespace=namespace, plural="seldondeployments", name=seldon_deploy_spec["metadata"]["name"], body=k8s_client.V1DeleteOptions())
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
Delete model and explainer folders from notebook
!rm -rf $EXPLAINER_PATH !rm -rf $TF_MODEL_DIR
_____no_output_____
Apache-2.0
apps/networking/ble-localization/onprem/seldon/blerssi-seldon.ipynb
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
MobileCoin Example WalletThis is an example python client that interacts with `mobilecoind` to manage a MobileCoin wallet.You must start the `mobilecoind` daemon in order to run a wallet. See the mobilecoind README for more information.To run this notebook, make sure you have the requirements installed, and that you have compiled the grpc protos.```cd mobilecoind/clients/python/jupyter./install.shjupyter notebook```
from mobilecoin import Client
_____no_output_____
Apache-2.0
mobilecoind/clients/python/jupyter/wallet.ipynb
MCrank/mobilecoin
Start the Mob ClientThe client talks to your local mobilecoind. See the mobilecoind/README.md for information on how to set it up.
client = Client("localhost:4444", ssl=False)
_____no_output_____
Apache-2.0
mobilecoind/clients/python/jupyter/wallet.ipynb
MCrank/mobilecoin
Input Root Entropy for AccountNote: The root entropy is sensitive material. It is used as the seed to create your account keys. Anyone with your root entropy can steal your MobileCoin.
entropy = "4ec2c081e764f4189afba528956c05804a448f55f24cc3d04c9ef7e807a93bcd" credentials_response = client.get_account_key(bytes.fromhex(entropy))
_____no_output_____
Apache-2.0
mobilecoind/clients/python/jupyter/wallet.ipynb
MCrank/mobilecoin
Monitor your AccountMonitoring an account means that mobilecoind will persist the transactions that belong to you to a local database. This allows you to retrieve your funds and calculate your balance, as well as to construct and submit transactions.Note: MobileCoin uses accounts and subaddresses for managing funds. You can optionally specify a range of subaddresses to monitor. See mob_client.py for more information.
monitor_id_response = client.add_monitor(credentials_response.account_key)
_____no_output_____
Apache-2.0
mobilecoind/clients/python/jupyter/wallet.ipynb
MCrank/mobilecoin
Check BalanceYou will need to provide a subaddress index. Most people will only use one subaddress, and can default to 0. Exchanges or users who want to generate lots of new public addresses may use multiple subaddresses.
subaddress_index = 0 client.get_balance(monitor_id_response.monitor_id, subaddress_index)
_____no_output_____
Apache-2.0
mobilecoind/clients/python/jupyter/wallet.ipynb
MCrank/mobilecoin
Send a TransactionMobileCoin uses "request codes" to wrap public addresses. See below for how to generate request codes.
address_code = "2nTy8m2VE5UMtfqRf12gEjZmFHKNTDEtNufQZNvE713ytYvdu2kqpbcncHJUSLwmgTCkB56Li9fsGwJF9LRYEQvoQCDzqVQEJETDNQKLzqHCzd" target_address_response = client.parse_request_code(address_code) # Construct the transaction txo_list_response = client.get_unspent_tx_output_list(monitor_id_response.monitor_id, subaddress_index) outlays = [{ 'value': 10, 'receiver': target_address_response.receiver }] tx_proposal_response = client.generate_tx( monitor_id_response.monitor_id, subaddress_index, txo_list_response.output_list, outlays ) # Send the transaction to consensus validators client.submit_tx(tx_proposal_response.tx_proposal)
_____no_output_____
Apache-2.0
mobilecoind/clients/python/jupyter/wallet.ipynb
MCrank/mobilecoin
Public Address (Request Code)
public_address_response = client.get_public_address(monitor_id_response.monitor_id, subaddress_index) request_code_response = client.create_request_code(public_address_response.public_address) print(f"Request code = {request_code_response}")
_____no_output_____
Apache-2.0
mobilecoind/clients/python/jupyter/wallet.ipynb
MCrank/mobilecoin
Show me the first lines of the original file
df = pd.read_excel('/tmp/gonzalo_test/aseg.xls') df.head()
_____no_output_____
MIT
notebooks/Miscellaneous/Reshaping an Excel table.ipynb
xgrg/alfa
Show me the region names containing 'Vent' or 'WM' or 'Hippo'
names = set([each for each in df['StructName'].tolist() \ if 'WM' in each or 'Vent' in each or 'Hippo' in each]) names
_____no_output_____
MIT
notebooks/Miscellaneous/Reshaping an Excel table.ipynb
xgrg/alfa
Reshape the table and show me the first lines
df = pd.DataFrame(df[df['StructName'].isin(names)], columns=['subject', 'StructName', 'Volume_mm3']) df = df.pivot(index='subject', columns='StructName', values='Volume_mm3') df.head()
_____no_output_____
MIT
notebooks/Miscellaneous/Reshaping an Excel table.ipynb
xgrg/alfa
Save it and success !
df.to_excel('/tmp/gonzalo_test/aseg_pivot.xls') from IPython.display import Image Image(url='http://s2.quickmeme.com/img/c3/c37a6cc5f88867e5387b8787aaf67afc350b3f37f357ed0a3088241488063bce.jpg')
_____no_output_____
MIT
notebooks/Miscellaneous/Reshaping an Excel table.ipynb
xgrg/alfa
The effect of temperature and reaction time affects the %yield. Develop a model for %yield in terms of temperature and time
import pandas as mypanda import numpy as np from scipy import stats as mystats import matplotlib.pyplot as myplot from pandas.plotting import scatter_matrix from statsmodels.formula.api import ols as myols from statsmodels.stats.anova import anova_lm myData=mypanda.read_csv('datasets/Mult_Reg_Yield.csv') myData tmp=myData.Temperature yld =myData.Yield time=myData.Time
_____no_output_____
Apache-2.0
Regression_Analysis_Chemical_Process.ipynb
mohan-mj/Regression_Analysis
check for relationship now
scatter_matrix(myData) myplot.show()
C:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:1: FutureWarning: 'pandas.tools.plotting.scatter_matrix' is deprecated, import 'pandas.plotting.scatter_matrix' instead. """Entry point for launching an IPython kernel.
Apache-2.0
Regression_Analysis_Chemical_Process.ipynb
mohan-mj/Regression_Analysis
correlation between xs and y should be high
np.corrcoef(tmp,yld) np.corrcoef(time,yld) np.corrcoef(time,tmp) mymodel=myols("yld ~ time + tmp",myData) mymodel=mymodel.fit() mymodel.summary()
C:\ProgramData\Anaconda3\lib\site-packages\scipy\stats\stats.py:1334: UserWarning: kurtosistest only valid for n>=20 ... continuing anyway, n=16 "anyway, n=%i" % int(n))
Apache-2.0
Regression_Analysis_Chemical_Process.ipynb
mohan-mj/Regression_Analysis
check p value ==> only time is related to yield
mymodel=myols("yld ~ time ",myData).fit() mymodel.summary() pred=mymodel.predict() res=yld-pred res #print(yld, res) myplot.scatter(yld,pred) myplot.show() mystats.probplot(res,plot=myplot) myplot.show() mystats.normaltest(res)
C:\ProgramData\Anaconda3\lib\site-packages\scipy\stats\stats.py:1334: UserWarning: kurtosistest only valid for n>=20 ... continuing anyway, n=16 "anyway, n=%i" % int(n))
Apache-2.0
Regression_Analysis_Chemical_Process.ipynb
mohan-mj/Regression_Analysis
Implies it is normal
myplot.scatter(time,res) myplot.show() myplot.scatter(pred,res) myplot.show()
_____no_output_____
Apache-2.0
Regression_Analysis_Chemical_Process.ipynb
mohan-mj/Regression_Analysis
**Análise de Dados com Python e Pandas**
# Monta o drive no ambiente virtual permitindo acesso aos arquivos do drive from google.colab import drive drive.mount('/content/drive') # Permite escolher um arquivo da máquina para upload no colab from google.colab import files arq = files.upload() from google.colab import drive drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
*Importando a biblioteca Pandas*
#importando a biblioteca Pandas import pandas as pd
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
*Lendo arquivos*
#Lendo CSV df = pd.read_csv("/content/drive/MyDrive/Datasets/Gapminder.csv", error_bad_lines=False, sep=";") #Visualizando as 5 primeiras linhas df.head()
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
*Renomeando Colunas*
df = df.rename(columns={'country':'Country', 'continent':'Continent', 'year':'Year', 'lifeExp':'LifeExp', 'pop':'Population', 'gdpPercap':'PIB'}) df.head()
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
*Trabalhando com Linhas e Colunas do arquivo*
#Quantidade de linhas e colunas dentro do arquivo df.shape #Nome das colunas df.columns
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
#Tipo de dado em ccada coluna df.dtypes #Últimas cindo linhas por padrao do arquivo (df.tail(10) → Últimas 10 linhas...) df.tail() #Média entre os dados das respectivas linhas e colunas df.describe()
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
*Trabalhando com Filtros*
df['Continent'].unique() Oceania = df.loc[df['Continent'] == 'Oceania'] Oceania.head() Oceania['Continent'].unique() df.groupby('Continent')['Country'].nunique() df.groupby('Year')['LifeExp'].mean() df['PIB'].mean() df['PIB'].sum()
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
**Trabalhando com Planilhas de Excel** *Leitura dos Arquivos*
df1 = pd.read_excel("/content/drive/MyDrive/Datasets/Aracaju.xlsx") df2 = pd.read_excel("/content/drive/MyDrive/Datasets/Fortaleza.xlsx") df3 = pd.read_excel("/content/drive/MyDrive/Datasets/Natal.xlsx") df4 = pd.read_excel("/content/drive/MyDrive/Datasets/Recife.xlsx") df5 = pd.read_excel("/content/drive/MyDrive/Datasets/Salvador.xlsx") #Juntado todos os arquivos df = pd.concat([df1, df2, df3, df4, df5]) #Exibindo as 5 primeiras linhas df.head() #Exibindo as 5 últimas linhas df.tail() df.sample(5) #Verifincado o tipo de dado de cada coluna df.dtypes #Alterando o tipo de dado da coluna LojaID [int64 → object] df['LojaID'] = df['LojaID'].astype('object') df.dtypes
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
***Tratando valores faltantes***
#Consultando linhas com valores faltantes df.isnull().sum() #Apagando as linhas com valores nulos df.dropna(inplace=True) #Apagando as linhas com valores nulos com base apenas em 1 coluna df.dropna(subset=['Vendas'], inplace=True) #Removendo linhas que estejam com valores faltantes em todas as colunas df.dropna(how='all', inplace=True)
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
***Criando novas colunas***
#Criando a coluna de receita df['Receita'] = df['Vendas'].mul(df['Qtde']) df.head() df.tail() df['Receita/Venda'] = df['Receita'] / df['Vendas'] df.head() #Retornando maior receita df['Receita'].max() #Retornando a menor receita df['Receita'].min() #nlargest df.nlargest(3,'Receita') #nsmallest df.nsmallest(3, 'Receita') #Agrupamento por cidade df.groupby('Cidade')['Receita'].sum() #Ordenando o conjunto de dados df.sort_values('Receita', ascending=False).head(8)
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
***Trabalhando com datas***
#Transfomando a coluna de dataa em tipo inteiro df['Data'] = df['Data'].astype('int64') #Verificando o tipo de dado de cada coluna df.dtypes #Transformando a coluna de Data em Data df['Data'] = pd.to_datetime(df['Data']) df.dtypes #Agrupamento por ano df.groupby(df['Data'].dt.year)['Receita'].sum() #Criado uma nova coluna com o ano df['Ano_Venda'] = df['Data'].dt.year df.sample(5) #Extraindo o mes e o dia df['mes_venda'], df['dia_venda'] = (df['Data'].dt.month, df['Data'].dt.day) df.sample(5) #Retornando a data mais antiga df['Data'].min() #Retornanoa data mais nova df['Data'].max() #Calculando a diferenca de dias df['Diferenca_dias'] = df['Data'] - df['Data'].min() df.sample(5) #Criando a coluna de trimestre df['Trimestre'] = df['Data'].dt.quarter df.sample(5) #Filtrando as vendas de 2019 do mes de janeiro vendas_jan_19 = df.loc[(df['Data'].dt.year == 2019) & (df['Data'].dt.month == 1)] vendas_jan_19
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
**Visualizacao de Dados**
df['LojaID'].value_counts(ascending=False)
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
***Gráficos***
#Gráfico de barras df['LojaID'].value_counts(ascending=False).plot.bar(); #Gráfico de barras horizontais df['LojaID'].value_counts().plot.barh(); #Gráfco de barras horizonatal df['LojaID'].value_counts(ascending=True).plot.barh(); #Gráfico de Pizza df.groupby(df['Data'].dt.year)['Receita'].sum().plot.pie(); #Total de vendas por cidade df['Cidade'].value_counts() #Adicionando um título e alterando o nome dos eixos import matplotlib.pyplot as plt df['Cidade'].value_counts().plot.bar(title='Total de vendas por Cidade') plt.xlabel('Cidade') plt.ylabel('Total de vendas'); #Alterando a cor do gráfico import matplotlib.pyplot as plt df['Cidade'].value_counts().plot.bar(title='Total de vendas por Cidade', color='green') plt.xlabel('Cidade') plt.ylabel('Total de vendas'); #Editando o Estilo plt.style.use('ggplot') df.groupby(df['mes_venda'])['Qtde'].sum().plot(title = 'Total de Vendas') plt.xlabel('Mes') plt.ylabel('Venda') plt.legend(); df.groupby(df['mes_venda'])['Qtde'].sum() #Selecionando apenas as vendas de 2019 df_2019 = df[df['Ano_Venda'] == 2019] df_2019 #Total vendidos por mes df_2019.groupby(df_2019['mes_venda'])['Qtde'].sum().plot(marker = 'v') plt.xlabel('Mes') plt.ylabel('Total de Produtos Vendidos') plt.legend(); #Histograma plt.hist(df['Qtde'], color='darkturquoise'); plt.scatter(x=df_2019['dia_venda'], y = df_2019['Receita']); #Salvando em png df_2019.groupby(df_2019['mes_venda'])['Qtde'].sum().plot(marker = 'v') plt.title('Quantidade de produtos vendidos x mes') plt.xlabel('Mes') plt.ylabel('Total de Produtos Vendidos') plt.legend() plt.savefig('grafico Qtde x mes.png');
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
**Análise Exploratória**
plt.style.use('seaborn') #Upload de arquivo from google.colab import files arq = files.upload() #Criando nosso DataFrame df = pd.read_excel("/content/drive/MyDrive/Datasets/AdventureWorks.xlsx") df.head() #Quantidade de linhas e colunas df.shape #Verificando os tipos de dados df.dtypes #Qual a Receita total? df['Valor Venda'].sum() #Qual o Custo Total? df['Custo'] = df['Custo Unitário'].mul(df['Quantidade']) #Criando a coluna de custo df.head(1) #Qual o custo Total? round(df['Custo'].sum(), 2) #Agora que temos a receita, custo e total, podemos achar o Lucro Toal #Vamos criar uma coluna de Lucro que será Receia -Custo df['Lucro'] = df['Valor Venda'] - df['Custo'] df.head(1) #Total Lucro round(df['Lucro'].sum(), 2) #Criando uma coluna com o total de dias para enviar o produto df['Tempo_envio'] = df['Data Envio'] - df['Data Venda'] df.head(1) #extraindo apenas os dias df['Tempo_envio'] = (df['Data Envio'] - df['Data Venda']).dt.days df.head(1) #Verificando o tipo de coluna Tempo_envio df['Tempo_envio'].dtype #Média de tempo de envio por Marca df.groupby('Marca')['Tempo_envio'].mean() #Vaerificando se temos dados faltantes df.isnull().sum() #Agrupar por ano e Marca df.groupby([df['Data Venda'].dt.year, 'Marca'])['Lucro'].sum() #resetando o index lucro_ano = df.groupby([df['Data Venda'].dt.year, 'Marca'])['Lucro'].sum().reset_index() lucro_ano #Qual o total de produtos vendidos df.groupby('Produto')['Quantidade'].sum().sort_values(ascending=False) #Gráfico Total de Produtos vendidos df.groupby('Produto')['Quantidade'].sum().sort_values(ascending=True).plot.barh(title='Total Produtos Vendidos') plt.xlabel('Total') plt.ylabel('Produto'); #Selecionando apenas as vendas de 2009 df_2009 = df[df['Data Venda'].dt.year == 2009] df_2009.head() df_2009.groupby(df_2009["Data Venda"].dt.month)["Lucro"].sum().plot(title="Lucro x Mês") plt.xlabel("Mês") plt.ylabel("Lucro"); df_2009.groupby("Marca")["Lucro"].sum().plot.bar(title="Lucro x Marca") plt.xlabel("Marca") plt.ylabel("Lucro") plt.xticks(rotation='horizontal'); df["Tempo_envio"].describe() #Gráfico de Boxplot plt.boxplot(df["Tempo_envio"]); #Histograma plt.hist(df["Tempo_envio"]); #Tempo mínimo de envio df["Tempo_envio"].min() #Tempo máximo de envio df['Tempo_envio'].max() #Identificando o Outlier df[df["Tempo_envio"] == 20] df.to_csv('Project Python_Pandas.csv', index=False)
_____no_output_____
MIT
pandasProjectCognizant/project_python_Pandas.ipynb
luizpavanello/cognizant_bootcamp_DIO
Aerospike Java Client – Advanced Collection Data Types*Last updated: June 22, 2021*The goal of this tutorial is to highlight the power of working with [collection data types (CDTs)]("https://docs.aerospike.com/docs/guide/cdt.html") in Aerospike. It covers the following topics:1. Setting [contexts (CTXs)]("https://docs.aerospike.com/docs/guide/cdt-context.html") to apply operations to nested Maps and Lists.2. Showing the return type options provided by CDT get/read operations.3. Highlighting how policies shape application transactions.This [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html) requires the Aerospike Database running locally with Java kernel and Aerospike Java Client. To create a Docker container that satisfies the requirements and holds a copy of these notebooks, visit the [Aerospike Notebooks Repo](https://github.com/aerospike-examples/interactive-notebooks). PrerequisitesThis Notebook builds on the material in the following notebooks:1. [Working with Lists]("./java-working_with_lists.ipynb") 2. [Working with Maps]("./java-working_with_lists.ipynb")3. [Introduction to Transactions]("./java-intro_to_transactions.ipynb")It uses examples based on those from [Modeling Using Lists](./java-modeling_using_lists.ipynb) and Working with Maps. If any of the following is confusing, please refer to a relevant notebook. Notebook Setup Import Jupyter Java Integration Make it easier to work with Java in Jupyter.
import io.github.spencerpark.ijava.IJava; import io.github.spencerpark.jupyter.kernel.magic.common.Shell; IJava.getKernelInstance().getMagics().registerMagics(Shell.class);
_____no_output_____
MIT
notebooks/java/java-advanced_collection_data_types.ipynb
markprincely/interactive-notebooks
Start AerospikeEnsure Aerospike Database is running locally.
%sh asd
_____no_output_____
MIT
notebooks/java/java-advanced_collection_data_types.ipynb
markprincely/interactive-notebooks
Download the Aerospike Java ClientAsk Maven to download and install the project object model (POM) of the Aerospike Java Client.
%%loadFromPOM <dependencies> <dependency> <groupId>com.aerospike</groupId> <artifactId>aerospike-client</artifactId> <version>5.0.0</version> </dependency> </dependencies>
_____no_output_____
MIT
notebooks/java/java-advanced_collection_data_types.ipynb
markprincely/interactive-notebooks
Start the Aerospike Java Client and ConnectCreate an instance of the Aerospike Java Client, and connect to the demo cluster.The default cluster location for the Docker container is *localhost* port *3000*. If your cluster is not running on your local machine, modify *localhost* and *3000* to the values for your Aerospike cluster.
import com.aerospike.client.AerospikeClient; AerospikeClient client = new AerospikeClient("localhost", 3000); System.out.println("Initialized the client and connected to the cluster.");
Initialized the client and connected to the cluster.
MIT
notebooks/java/java-advanced_collection_data_types.ipynb
markprincely/interactive-notebooks
Create CDT Data, Put into Aerospike, and Print It
import com.aerospike.client.Key; import com.aerospike.client.Bin; import com.aerospike.client.policy.ClientPolicy; import com.aerospike.client.Record; import com.aerospike.client.Operation; import com.aerospike.client.Value; import com.aerospike.client.cdt.ListOperation; import com.aerospike.client.cdt.ListPolicy; import com.aerospike.client.cdt.ListOrder; import com.aerospike.client.cdt.ListWriteFlags; import com.aerospike.client.cdt.MapOperation; import com.aerospike.client.cdt.MapPolicy; import com.aerospike.client.cdt.MapOrder; import com.aerospike.client.cdt.MapWriteFlags; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; // Create whale migration list of tuples. ArrayList<Value> whaleMigration0 = new ArrayList<Value>(); whaleMigration0.add(Value.get(1420)); whaleMigration0.add(Value.get("beluga whale")); whaleMigration0.add(Value.get("Beaufort Sea")); whaleMigration0.add(Value.get("Bering Sea")); ArrayList<Value> whaleMigration1 = new ArrayList<Value>(); whaleMigration1.add(Value.get(13988)); whaleMigration1.add(Value.get("gray whale")); whaleMigration1.add(Value.get("Baja California")); whaleMigration1.add(Value.get("Chukchi Sea")); ArrayList<Value> whaleMigration2 = new ArrayList<Value>(); whaleMigration2.add(Value.get(1278)); whaleMigration2.add(Value.get("north pacific right whale")); whaleMigration2.add(Value.get("Japan")); whaleMigration2.add(Value.get("Sea of Okhotsk")); ArrayList<Value> whaleMigration3 = new ArrayList<Value>(); whaleMigration3.add(Value.get(5100)); whaleMigration3.add(Value.get("humpback whale")); whaleMigration3.add(Value.get("Columbia")); whaleMigration3.add(Value.get("Antarctic Peninsula")); ArrayList<Value> whaleMigration4 = new ArrayList<Value>(); whaleMigration4.add(Value.get(3100)); whaleMigration4.add(Value.get("southern hemisphere blue whale")); whaleMigration4.add(Value.get("Corcovado Gulf")); whaleMigration4.add(Value.get("The Galapagos")); ArrayList<Value> whaleMigration = new ArrayList<Value>(); whaleMigration.add(Value.get(whaleMigration0)); whaleMigration.add(Value.get(whaleMigration1)); whaleMigration.add(Value.get(whaleMigration2)); whaleMigration.add(Value.get(whaleMigration3)); whaleMigration.add(Value.get(whaleMigration4)); // Create Map of Whale Observations HashMap <Value, Value> mapObs = new HashMap <Value, Value>(); HashMap <String, Integer> mapCoords0 = new HashMap <String, Integer>(); mapCoords0.put("lat", -85); mapCoords0.put("long", -130); HashMap <String, Integer> mapCoords1 = new HashMap <String, Integer>(); mapCoords1.put("lat", -25); mapCoords1.put("long", -50); HashMap <String, Integer> mapCoords2 = new HashMap <String, Integer>(); mapCoords2.put("lat", 35); mapCoords2.put("long", 30); mapObs.put(Value.get(13456), Value.get(mapCoords1)); mapObs.put(Value.get(14567), Value.get(mapCoords2)); mapObs.put(Value.get(12345), Value.get(mapCoords0)); // Put data in Aerospike, get the data, and print it String nestedCDTSetName = "nestedset1"; String nestedCDTNamespaceName = "test"; Integer whaleMigrationWriteFlags = ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL; ListPolicy whaleMigrationPolicy = new ListPolicy(ListOrder.UNORDERED, whaleMigrationWriteFlags); MapPolicy mapObsPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.DEFAULT); Integer whaleKeyName = 2; String listWhaleBinName = "listwhalebin"; String mapObsBinName = "mapobsbin"; Bin bin1 = new Bin(listWhaleBinName, whaleMigration); Key whaleKey = new Key(nestedCDTNamespaceName, nestedCDTSetName, whaleKeyName); Record putDataIn = client.operate(client.writePolicyDefault, whaleKey, Operation.put(bin1), MapOperation.putItems(mapObsPolicy, mapObsBinName, mapObs) ); System.out.println(listWhaleBinName + ": " + whaleMigration + "\n\n" + mapObsBinName + ": " + mapObs );
listwhalebin: [[1420, beluga whale, Beaufort Sea, Bering Sea], [13988, gray whale, Baja California, Chukchi Sea], [1278, north pacific right whale, Japan, Sea of Okhotsk], [5100, humpback whale, Columbia, Antarctic Peninsula], [3100, southern hemisphere blue whale, Corcovado Gulf, The Galapagos]] mapobsbin: {13456={lat=-25, long=-50}, 14567={lat=35, long=30}, 12345={lat=-85, long=-130}}
MIT
notebooks/java/java-advanced_collection_data_types.ipynb
markprincely/interactive-notebooks
Using Contexts (CTXs) to work with Nested CDTsWhat are Nested CDTs and CTXs? What is a Nested CDT?The primary use case of Key-Value Stores, like Aerospike Database, is to store document-oriented data, like a JSON map. As document-oriented data grows organically, it is common for one CDT (list or map) to contain another CDT. Does the application need a list in a map in a list in a map? Aerospike fully supports nesting CDTs, so that’s no problem. What is a Context?A Context (CTX) is a reference to a nested CDT, a List or Map that is stored in a List or Map somewhere in an Aerospike Bin. All [List](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/ListOperation.html) and [Map Operations](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOperation.html) accept an optional CTX argument. Any CTX argument must refer to data of the type supported by the operation. The most common ways to access a CTX are to look up a Map CTX directly by its key within the Bin and to drill down within a List or Map by index, rank or value. A CTX can also be created within a List or Map. For more details, see the [CTX APIs](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/CDT.html). Look up a Map CTX in a Bin by MapkeyUse the `mapKey` method to look up a CTX in a Map directly by mapkey. This works for a Map anywhere in a Bin.The following is an example of finding a Map CTX in a Bin by Mapkey:
import com.aerospike.client.cdt.CTX; import com.aerospike.client.cdt.MapReturnType; Integer lookupMapKey = 14567; String latKeyName = "lat"; Record whaleSightings = client.operate(client.writePolicyDefault, whaleKey, MapOperation.getByKey(mapObsBinName, Value.get(latKeyName), MapReturnType.VALUE, CTX.mapKey(Value.get(lookupMapKey))) ); System.out.println(mapObsBinName + ": " + mapObs ); System.out.println("The " + latKeyName + " of sighting at timestamp " + lookupMapKey + ": " + whaleSightings.getValue(mapObsBinName));
mapobsbin: {13456={lat=-25, long=-50}, 14567={lat=35, long=30}, 12345={lat=-85, long=-130}} The lat of sighting at timestamp 14567: 35
MIT
notebooks/java/java-advanced_collection_data_types.ipynb
markprincely/interactive-notebooks
Drill down into a List or MapHere are the options to drill down into a CDT.Drilling down to a CTX in a List:* `listIndex`: Lookup list by index offset.* `listRank`: Lookup list by rank.* `listValue`: Lookup list by value.Drilling down to a CTX in a Map: * `mapIndex`: Lookup map by index offset.* `mapRank`: Lookup map by rank.* `mapValue`: Lookup map by value.The following is an example of drilling down within a List and Map CTX:
import com.aerospike.client.cdt.ListReturnType; // CDT Drilldown Values Integer drilldownIndex = 2; Integer drilldownRank = 1; Value listDrilldownValue = Value.get(whaleMigration1); Value mapDrilldownValue = Value.get(mapCoords0); // Variables to access parts of the selected CDT. Integer getIndex = 1; Record theRecord = client.get(null, whaleKey); Record drilldown = client.operate(client.writePolicyDefault, whaleKey, ListOperation.getByIndex(listWhaleBinName, getIndex, MapReturnType.VALUE, CTX.listIndex(drilldownIndex)), ListOperation.getByIndex(listWhaleBinName, getIndex, MapReturnType.VALUE, CTX.listRank(drilldownRank)), ListOperation.getByIndex(listWhaleBinName, getIndex, MapReturnType.VALUE, CTX.listValue(listDrilldownValue)), MapOperation.getByIndex(mapObsBinName, getIndex, MapReturnType.VALUE, CTX.mapIndex(drilldownIndex)), MapOperation.getByIndex(mapObsBinName, getIndex, MapReturnType.VALUE, CTX.mapRank(drilldownRank)), MapOperation.getByIndex(mapObsBinName, getIndex, MapReturnType.VALUE, CTX.mapValue(mapDrilldownValue)) ); List<?> returnWhaleList = drilldown.getList(listWhaleBinName); List<?> returnObsList = drilldown.getList(mapObsBinName); System.out.println("The whale migration list is: " + theRecord.getValue(listWhaleBinName) + "\n"); System.out.println("The whale name from the CTX selected by index " + drilldownIndex + ": " + returnWhaleList.get(0)); System.out.println("The whale name from the CTX selected by rank " + drilldownRank + ": " + returnWhaleList.get(1)); System.out.println("The whale name from the CTX selected by value " + listDrilldownValue + ": " + returnWhaleList.get(2) + "\n\n"); System.out.println("The observation map is: " + theRecord.getValue(mapObsBinName) + "\n"); System.out.println("The longitude of the observation from the CTX selected by index " + drilldownIndex + ": " + returnObsList.get(0)); System.out.println("The longitude of the observation from the CTX selected by rank " + drilldownRank + ": " + returnObsList.get(1)); System.out.println("The longitude of the observation from the CTX selected by value " + mapDrilldownValue + ": " + returnObsList.get(2));
The whale migration list is: [[1420, beluga whale, Beaufort Sea, Bering Sea], [13988, gray whale, Baja California, Chukchi Sea], [1278, north pacific right whale, Japan, Sea of Okhotsk], [5100, humpback whale, Columbia, Antarctic Peninsula], [3100, southern hemisphere blue whale, Corcovado Gulf, The Galapagos]] The whale name from the CTX selected by index 2: north pacific right whale The whale name from the CTX selected by rank 1: beluga whale The whale name from the CTX selected by value [13988, gray whale, Baja California, Chukchi Sea]: gray whale The observation map is: {12345={lat=-85, long=-130}, 13456={lat=-25, long=-50}, 14567={lat=35, long=30}} The longitude of the observation from the CTX selected by index 2: 30 The longitude of the observation from the CTX selected by rank 1: -50 The longitude of the observation from the CTX selected by value {lat=-85, long=-130}: -130
MIT
notebooks/java/java-advanced_collection_data_types.ipynb
markprincely/interactive-notebooks