path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Khabutdinov_Ildar/hw/done/waves_and_optics_hw.ipynb | ###Markdown
задание
###Code
def Wave(lam, x):
return np.cos(2 * np.pi / lam * x)
def WaveP(lam, x, v, t):
return Wave(lam, x + v * t)
def WaveN(lam, x, v, t):
return Wave(lam, x - v * t)
def f1(lam):
x_min = -2 * np.pi
x_max = 2 * np.pi
Nx = 101
x = []
for i in range(Nx):
x.append(x_min + (x_max - x_min) / (Nx - 1) * i)
t_min = 0
t_max = 50
v = 0.05
Nt = 101
t = []
for j in range(Nt):
t.append(t_min + ((t_max - t_min) / (Nt - 1)) * j)
M1 = np.zeros((Nt, Nx))
M2 = np.zeros((Nt, Nx))
for i in range(Nt):
for j in range(Nx):
M1[i, j] = WaveP(lam, x[j], v, t[i])
M2[i, j] = WaveN(lam, x[j], v, t[i])
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
p1 = ax1.plot(x, M1[0, :], 'r--', x, M1[39, :], 'g', x, M1[79, :], 'b')
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
p2 = ax2.plot(x, M2[0, :], 'r--', x, M2[39, :], 'g', x, M2[79, :], 'b')
X, Y = np.meshgrid(x, t)
fig3 = plt.figure()
ax3 = fig3.add_subplot(111, projection='3d')
p3 = ax3.plot_surface(X, Y, M1)
fig4 = plt.figure()
ax4 = fig4.add_subplot(111, projection='3d')
p4 = ax4.plot_surface(X, Y, M2)
fig5 = plt.figure()
ax5 = fig5.add_subplot(111)
p5 = ax5.contourf(M1, 15)
fig6 = plt.figure()
ax6 = fig6.add_subplot(111)
p6 = ax6.contourf(M2, 15)
fig7 = plt.figure()
ax7 = fig7.add_subplot(111)
p1 = ax7.plot(x, M1[0, :])
fig8 = plt.figure()
ax8 = fig8.add_subplot(111)
p8 = ax8.plot(t, M1[:, 0])
i1 = np.where(M1[:, 0] <= M1[0, 0])[0][1]
i2 = np.where(M1[0, :] <= M1[0, 0])[0][1]
T1 = t[np.where(M1[i1:, 0] >= M1[0, 0])[0][1]] - t_min
T2 = x[np.where(M1[0, i2:] >= M1[0, 0])[0][1]] - x_min
print(T1, T2, T2 / T1)
f1(1)
f1(1.5)
###Output
19.5 1.0053096491487334 0.05155434098198633
###Markdown
задание
###Code
def Intensity1(Lambda, N, A, R0, r, Nb):
Rr = np.zeros(N)
f = np.zeros(Nb)
for i in range(N):
Rr[i] = np.linalg.norm(r - R0[:, i])
for i in range(Nb):
su = 0
for j in range(N):
su += (A[j] / Rr[j] *
np.cos(2 * np.pi / Lambda * Rr[j] - 2 * np.pi / Nb * i))
f[i] = su**2
return np.mean(f)
###Output
_____no_output_____
###Markdown
Дифракция на одной щели
###Code
# Одна щель
def OneGapDiffraction(Lm=200):
N = 21
A = [1 for i in range(N)]
lam = 5e-4
R = np.zeros((N, 3))
for i in range(21):
R[:, 2][i] = -0.01 + i * 0.001
R = R.T
Np = 300
z_min = -10
z_max = 10
z = np.zeros(Np)
for i in range(Np):
z[i] = z_min + (z_max - z_min) / (Np - 1) * (i)
L = Lm
Nb = 3
I1 = np.zeros(Np)
for i in range(Np):
r = np.array([0, L, z[i]]).T
I1[i] = Intensity1(lam, N, A, R, r, Nb)
I1max = np.amax(I1)
fig8 = plt.figure()
ax8 = fig8.add_subplot(111)
p8 = ax8.plot(z, I1 / I1max)
###Output
_____no_output_____
###Markdown
Examples:
###Code
OneGapDiffraction(Lm=200)
OneGapDiffraction(Lm=100)
OneGapDiffraction(Lm=50)
###Output
_____no_output_____
###Markdown
Дифракция на двух щелях
###Code
N = 40
A = [1 for i in range(N)]
Lambda = 5e-4
R = np.zeros((N, 3))
for i in range(round(N / 2)):
R[:, 2][i] = -0.05 - N / 4 * 0.0001 + i * 0.0001
for i in range(round(N / 2), N):
R[:, 2][i] = 0.05 + N / 4 * 0.0001 - i * 0.0001
R = R.T
Np = 300
z_min = -10
z_max = 10
z = np.zeros(Np)
for i in range(Np):
z[i] = z_min + (z_max - z_min) / (Np - 1) * (i)
L = 600
Nb = 3
I1 = np.zeros(Np)
for i in range(Np):
r = np.array([0, L, z[i]]).T
I1[i] = Intensity1(Lambda, N, A, R, r, Nb)
I1max = np.amax(I1)
fig8 = plt.figure()
ax8 = fig8.add_subplot(111)
p8 = ax8.plot(z, I1 / I1max)
###Output
_____no_output_____
###Markdown
задание
###Code
def CoeffRefraction(a, alfa, beta, y):
return (1 + a * y**alfa)**beta
N = 4
param = [[1, 1, 0.5], [1, 1, 1], [2, 2, 2], [2.5, 3, 3.5]]
Np = 100
Xball = np.zeros((N, Np))
Yball = np.zeros((N, Np))
j = 0
for a, alfa, beta in param:
y_min = 1e-5
y_max = 20
y = []
for i in range(Np):
y.append(y_min + (y_max - y_min) / (Np - 1) * (i))
Nk = 1000
Y = np.zeros(Nk)
Z = np.zeros(Nk)
Xb = np.zeros(Np)
Yb = np.zeros(Np)
for i in range(Np):
for k in range(Nk):
Y[k] = y_min + (y[i] - y_min) / (Nk - 1) * (k)
Z[k] = 1 / ((CoeffRefraction(a, alfa, beta, Y[k])**2 - 1)**0.5)
Xb[i] = np.trapz(Z, Y)
Yb[i] = Y[Nk - 1]
Xball[j] = Xb
Yball[j] = Yb
j += 1
fig = []
ax = []
for i in range(N):
fig.append(plt.figure())
ax.append(fig[i].add_subplot(111))
ax[i].plot(Xball[i], Yball[i])
###Output
_____no_output_____ |
notebooks/ConfigurationFuzzer.ipynb | ###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences.
###Code
from bookutils import YouTubeVideo
YouTubeVideo('XTGFX-tcotE')
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb).
###Code
import bookutils
from typing import List, Union, Optional, Callable, Type
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> option_ebnf_grammar{'': ['()*'], '': [' -h', ' --help', ' --version', ' -v', ' --verbose', ' -d', ' --diff', ' -i', ' --in-place', ' --global-config ', ' --ignore-local-config', ' -r', ' --recursive', ' -j ', ' --jobs ', ' -p ', ' --pep8-passes ', ' -a', ' --aggressive', ' --experimental', ' --exclude ', ' --list-fixes', ' --ignore ', ' --select ', ' --max-line-length ', ' --line-range ', ' --range ', ' --indent-size ', ' --hang-closing', ' --exit-code'], '': [' foo.py'], '': ['+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~'], '': [''], '': ['(-)?+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], '': [''], '': [''], '': [''], '': ['']}```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)][' foo.py', ' --max-line-length 6 --jobs -594 --ignore , --ignore-local-config -r --in-place --list-fixes --recursive -v --experimental -p 72 -h --aggressive --indent-size 3 --exit-code --hang-closing --pep8-passes -180 -d --global-config XQjT --diff --exclude *g -j 43 --help --select A --version --verbose -a --line-range -3963 0 --range 1 4 -i --in-place --version foo.py', ' --global-config 2 --select PuR --ignore b --ignore @ --ignore ;7d --ignore ) --ignore Fw1Z --ignore 0 --global-config ynf --select >G --select + --global-config ( --exclude v --exclude V --ignore ^ --select L --exclude 6 --exclude =$` --ignore % --global-config N --ignore [8maop --ignore 3! --select ~?c< --exclude C --select U --exclude h --global-config --global-config 5O --select x --select B] --ignore _ --global-config .K --global-config S --exclude r --global-config qW --exclude te4/ --exclude J} --ignore " --exclude |H --global-config -&k{s --global-config E --select :I --ignore 9 --global-config M --exclude YD --select \\ --exclude z --ignore i --select \'l --ignore M --ignore ;h --exit-code foo.py']```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)][' --diff foo.py', ' --exclude --global-config V --select He --global-config | --global-config n}aicm --ignore 7 --ignore b --global-config u --exclude WB` --exclude 2 --exclude JpZt --exclude l_ --select *%^ --exclude & --exclude )Lv --global-config [ --global-config " --exclude sOEXP --aggressive --exclude \' --help --diff --experimental foo.py', ' --ignore FCw; --global-config /1K?:6 --exclude U --exclude z --ignore rQ --select x --select Y --select { --global-config o --select 34 --exclude ]j --select ~ --exclude 9@ --ignore w --global-config CVL --diff foo.py']```The final step in testing would now to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs.The `OptionRunner` constructor accepts an additional `miner` keyword parameter, which takes the class of the argument grammar miner to be used. By default, this is `OptionGrammarMiner` – a helper class that can be used (and extended) to create own option grammar miners. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integer` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import bookutils
from ExpectError import ExpectError
with ExpectError(SystemExit, print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol, Grammar
PROCESS_NUMBERS_EBNF_GRAMMAR: Grammar = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def trace_locals(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(trace_locals)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def trace_options(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(trace_options)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner:
"""Helper class for extracting option grammars"""
def __init__(self, function: Callable, log: bool = False):
"""Constructor.
`function` - a function processing arguments using argparse()
`log` - output diagnostics if True
"""
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
"""Extract EBNF option grammar"""
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.gettrace()
sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
"""Extract BNF option grammar"""
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return self.traceit
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input: (Note that the following commands will overwrite the file `foo.py`, if it already exists in the current working directory. Be aware of this, if you downloaded the notebooks and are working locally.)
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8_runner = ProgramRunner(args)
result, outcome = autopep8_runner.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(ProgramRunner):
"""Run a program while determining its option grammar"""
def __init__(self, program: Union[str, List[str]],
arguments: Optional[str] = None, *,
log: bool = False,
miner_class: Optional[Type[OptionGrammarMiner]] = None):
"""Constructor.
`program` - the (Python) program to be executed
`arguments` - an (optional) string with arguments for `program`
`log` - if True, enable logging in miner
`miner_class` - the `OptionGrammarMiner` class to be used
(default: `OptionGrammarMiner`)
"""
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
if miner_class is None:
miner_class = OptionGrammarMiner
self.miner_class = miner_class
self.log = log
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
if self._executable is None:
raise IOError(self.base_executable + ": not found")
first_line = open(self._executable).readline()
if first_line.find("python") < 0:
raise IOError(self.base_executable + ": not a Python executable")
self.contents = open(self._executable).read()
def invoker(self):
# We are passing the local variables as is, such that we can access `self`
# We set __name__ to '__main__' to invoke the script as an executable
exec(self.contents, {'__name__': '__main__'})
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = self.miner_class(self.invoker, log=self.log)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
"""Return extracted grammar in EBNF form"""
return self._ebnf_grammar
def grammar(self):
"""Return extracted grammar in BNF form"""
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
"""Fuzz a (Python) program using its arguments"""
def __init__(self, runner: OptionRunner, *args, **kwargs):
"""Constructor. `runner` is an OptionRunner."""
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_pairwise_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_pairwise_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = \frac{n (n - 1)}{2}$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus have 406 distinct pairs. However, the binomial coefficient does not differentiate between permutations of elements of the pairs, which our tests do. Therefore we need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
option_ebnf_grammar
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. The `OptionRunner` constructor accepts an additional `miner` keyword parameter, which takes the class of the argument grammar miner to be used. By default, this is `OptionGrammarMiner` – a helper class that can be used (and extended) to create own option grammar miners.
###Code
# ignore
from ClassDiagram import display_class_hierarchy
from Fuzzer import Fuzzer, Runner, ProgramRunner
from Grammars import Expansion
from GrammarFuzzer import GrammarFuzzer, DerivationTree
from GrammarCoverageFuzzer import TrackingGrammarCoverageFuzzer
# ignore
display_class_hierarchy([OptionRunner, OptionFuzzer, OptionGrammarMiner],
public_methods=[
Fuzzer.__init__,
Fuzzer.fuzz,
Fuzzer.run,
Fuzzer.runs,
GrammarFuzzer.__init__,
GrammarFuzzer.fuzz,
GrammarFuzzer.fuzz_tree,
TrackingGrammarCoverageFuzzer.__init__,
OptionFuzzer.__init__,
OptionFuzzer.run,
Runner.__init__,
Runner.run,
ProgramRunner.__init__,
ProgramRunner.__init__,
OptionRunner.__init__,
OptionRunner.ebnf_grammar,
OptionRunner.grammar,
OptionGrammarMiner.__init__,
OptionGrammarMiner.mine_ebnf_grammar,
OptionGrammarMiner.mine_grammar,
],
types={
'DerivationTree': DerivationTree,
'Expansion': Expansion,
'Grammar': Grammar
},
project='fuzzingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import Grammar, is_valid_grammar
cpp_grammar: Grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
assert is_valid_grammar(cpp_grammar)
cpp_grammar
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integers` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import fuzzingbook_utils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, is_valid_grammar, START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration Options In this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module. Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking Arguments Let us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input:
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration Options Let us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8 Let us apply this on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
from copy import deepcopy
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = deepcopy(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = n \times (n - 1)$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= { "if", "ifdef", "ifndef", "defined" }
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import new_symbol
cpp_grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
cpp_grammar
assert is_valid_grammar(cpp_grammar)
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = deepcopy(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> print(option_ebnf_grammar){'': ['()*'], '': [' -h', ' --help', ' --version', ' -v', ' --verbose', ' -d', ' --diff', ' -i', ' --in-place', ' --global-config ', ' --ignore-local-config', ' -r', ' --recursive', ' -j ', ' --jobs ', ' -p ', ' --pep8-passes ', ' -a', ' --aggressive', ' --experimental', ' --exclude ', ' --list-fixes', ' --ignore ', ' --select ', ' --max-line-length ', ' --line-range ', ' --range ', ' --indent-size ', ' --hang-closing', ' --exit-code'], '': [' foo.py'], '': ['+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~'], '': [''], '': ['(-)?+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], '': [''], '': [''], '': [''], '': ['']}```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)][' -v foo.py', ' --verbose --global-config e\\ --in-place --indent-size -9 --pep8-passes 48 -i --ignore-local-config -h --exit-code --select ] -r -a --help --exclude t --jobs -5 --aggressive --hang-closing --experimental --diff --range -26 -0 --max-line-length 7 --list-fixes --recursive -d --version -p -31 --line-range 6 2 --help -r -v --exit-code foo.py', ' --ignore P -j -9 --ignore }go --select * --global-config ;0 --select \' --exclude !s --exclude L/HW:n" --global-config T --ignore V --select jur --exclude &+w --select 3 --ignore %RhF[` --exclude yMB --global-config 1 --ignore X --exclude _ --global-config xQ) --exclude =>d --ignore ( --ignore ~Y --exclude K --ignore .b --global-config A? --ignore CU --ignore , --global-config f --global-config Ez --exclude p$8c@ --ignore O --select <6 --global-config 5DS --global-config Iq2 --select 4 --exclude J^ --global-config Nv --select 79 --select i- --ignore |Zkml{Z --select aG --version --exclude d --exclude 8g foo.py']```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)][' foo.py', ' -v --pep8-passes 4 --global-config b --hang-closing --experimental --recursive -a --verbose -j -10729 --ignore-local-config -r --select ,! --exit-code --max-line-length -5 --ignore Di --indent-size -86 --jobs -3 --exclude { --help --diff -d --version -p -89 --list-fixes --line-range 1 -0 --range 6 5 --aggressive -i foo.py', " --in-place -h --ignore vU --select O; --ignore mq' --ignore ~Q --global-config =F --ignore nfA?0% --exclude / --global-config g --select LB --global-config s --ignore 3\\ --select (y --global-config - --global-config : --exclude ke --select ^ --ignore `6 --ignore p --ignore T --select 4j --exclude I$ --ignore 1Z --exclude M --exclude rK --ignore wN95t --select a --global-config > --recursive --aggressive -a foo.py"]```The final step in testing would now to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integer` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import bookutils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol, Grammar
PROCESS_NUMBERS_EBNF_GRAMMAR: Grammar = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def trace_locals(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(trace_locals)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def trace_options(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(trace_options)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner:
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.gettrace()
sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input: (Note that the following commands will overwrite the file `foo.py`, if it already exists in the current working directory. Be aware of this, if you downloaded the notebooks and are working locally.)
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8_runner = ProgramRunner(args)
result, outcome = autopep8_runner.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_pairwise_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_pairwise_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = \frac{n (n - 1)}{2}$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus have 406 distinct pairs. However, the binomial coefficient does not differentiate between permutations of elements of the pairs, which our tests do. Therefore we need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
print(option_ebnf_grammar)
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import Grammar, is_valid_grammar
cpp_grammar: Grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
assert is_valid_grammar(cpp_grammar)
cpp_grammar
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> print(option_ebnf_grammar){'': ['()*'], '': [' -h', ' --help', ' --version', ' -v', ' --verbose', ' -d', ' --diff', ' -i', ' --in-place', ' --global-config ', ' --ignore-local-config', ' -r', ' --recursive', ' -j ', ' --jobs ', ' -p ', ' --pep8-passes ', ' -a', ' --aggressive', ' --experimental', ' --exclude ', ' --list-fixes', ' --ignore ', ' --select ', ' --max-line-length ', ' --line-range ', ' --range ', ' --indent-size ', ' --hang-closing'], '': [' foo.py'], '': ['+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~'], '': [''], '': ['(-)?+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], '': [''], '': [''], '': [''], '': ['']}```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)][' foo.py', ' --indent-size 54 --diff --global-config k --select &, --list-fixes -a --hang-closing --range 0 72 --ignore-local-config -p 8 --version -d --experimental foo.py', ' --ignore i --jobs -16 --verbose -v --line-range -3 9 -r --help --max-line-length 8 -h --aggressive --recursive --exclude qE" --in-place -j -979 -i --pep8-passes 4 --version --in-place --aggressive --version foo.py']```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)][' foo.py', ' --range 46 -1 --recursive -d --select <6 --exclude :" --global-config UVE --help --aggressive --experimental -r --line-range -7 -9 --version -i -h --indent-size -05 --max-line-length 8 --in-place --verbose --jobs -32 --ignore-local-config -v -p -1 --hang-closing -j 38 -a --list-fixes --pep8-passes 67 --diff --ignore v --select I --ignore (1NJ --ignore Km --ignore ? --select ^kZ --global-config y --select ia]9 --exclude o --ignore R!4GP.x8/ --ignore D --exclude 7 --exclude Bd -a --recursive --verbose foo.py', " --ignore \\ --global-config l --global-config @ --ignore ,CM~& --ignore nb --select c --global-config zgW --ignore $`s{H --global-config - --exclude 2| --select O --exclude 0 --exclude * --ignore qA'F}X --global-config p>_r+ --global-config eQ --exclude [ --ignore t --select h) --select %f --exclude u3;=TL --global-config w --ignore j5 --exclude Y --ignore S --ignore ]J --global-config 1 --ignore-local-config --max-line-length 36693 -i foo.py"]```The final step in testing would now to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integers` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import fuzzingbook_utils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.gettrace()
sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input:
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = n \times (n - 1)$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
print(option_ebnf_grammar)
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import new_symbol
cpp_grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
cpp_grammar
assert is_valid_grammar(cpp_grammar)
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences.
###Code
from bookutils import YouTubeVideo
YouTubeVideo('L0ztoXVru2U')
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb).
###Code
import bookutils
from typing import List, Union, Optional, Callable, Type
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> option_ebnf_grammar{'': ['()*'], '': [' -h', ' --help', ' --version', ' -v', ' --verbose', ' -d', ' --diff', ' -i', ' --in-place', ' --global-config ', ' --ignore-local-config', ' -r', ' --recursive', ' -j ', ' --jobs ', ' -p ', ' --pep8-passes ', ' -a', ' --aggressive', ' --experimental', ' --exclude ', ' --list-fixes', ' --ignore ', ' --select ', ' --max-line-length ', ' --line-range ', ' --range ', ' --indent-size ', ' --hang-closing', ' --exit-code'], '': [' foo.py'], '': ['+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~'], '': [''], '': ['(-)?+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], '': [''], '': [''], '': [''], '': ['']}```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)][' foo.py', ' --max-line-length 6 --jobs -594 --ignore , --ignore-local-config -r --in-place --list-fixes --recursive -v --experimental -p 72 -h --aggressive --indent-size 3 --exit-code --hang-closing --pep8-passes -180 -d --global-config XQjT --diff --exclude *g -j 43 --help --select A --version --verbose -a --line-range -3963 0 --range 1 4 -i --in-place --version foo.py', ' --global-config 2 --select PuR --ignore b --ignore @ --ignore ;7d --ignore ) --ignore Fw1Z --ignore 0 --global-config ynf --select >G --select + --global-config ( --exclude v --exclude V --ignore ^ --select L --exclude 6 --exclude =$` --ignore % --global-config N --ignore [8maop --ignore 3! --select ~?c< --exclude C --select U --exclude h --global-config --global-config 5O --select x --select B] --ignore _ --global-config .K --global-config S --exclude r --global-config qW --exclude te4/ --exclude J} --ignore " --exclude |H --global-config -&k{s --global-config E --select :I --ignore 9 --global-config M --exclude YD --select \\ --exclude z --ignore i --select \'l --ignore M --ignore ;h --exit-code foo.py']```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)][' --diff foo.py', ' --exclude --global-config V --select He --global-config | --global-config n}aicm --ignore 7 --ignore b --global-config u --exclude WB` --exclude 2 --exclude JpZt --exclude l_ --select *%^ --exclude & --exclude )Lv --global-config [ --global-config " --exclude sOEXP --aggressive --exclude \' --help --diff --experimental foo.py', ' --ignore FCw; --global-config /1K?:6 --exclude U --exclude z --ignore rQ --select x --select Y --select { --global-config o --select 34 --exclude ]j --select ~ --exclude 9@ --ignore w --global-config CVL --diff foo.py']```The final step in testing would now to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs.The `OptionRunner` constructor accepts an additional `miner` keyword parameter, which takes the class of the argument grammar miner to be used. By default, this is `OptionGrammarMiner` – a helper class that can be used (and extended) to create own option grammar miners. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integer` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import bookutils
from ExpectError import ExpectError
with ExpectError(SystemExit, print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol, Grammar
PROCESS_NUMBERS_EBNF_GRAMMAR: Grammar = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def trace_locals(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(trace_locals)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def trace_options(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(trace_options)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner:
"""Helper class for extracting option grammars"""
def __init__(self, function: Callable, log: bool = False):
"""Constructor.
`function` - a function processing arguments using argparse()
`log` - output diagnostics if True
"""
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
"""Extract EBNF option grammar"""
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.gettrace()
sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
"""Extract BNF option grammar"""
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return self.traceit
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input: (Note that the following commands will overwrite the file `foo.py`, if it already exists in the current working directory. Be aware of this, if you downloaded the notebooks and are working locally.)
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8_runner = ProgramRunner(args)
result, outcome = autopep8_runner.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(ProgramRunner):
"""Run a program while determining its option grammar"""
def __init__(self, program: Union[str, List[str]],
arguments: Optional[str] = None, *,
log: bool = False,
miner_class: Optional[Type[OptionGrammarMiner]] = None):
"""Constructor.
`program` - the (Python) program to be executed
`arguments` - an (optional) string with arguments for `program`
`log` - if True, enable logging in miner
`miner_class` - the `OptionGrammarMiner` class to be used
(default: `OptionGrammarMiner`)
"""
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
if miner_class is None:
miner_class = OptionGrammarMiner
self.miner_class = miner_class
self.log = log
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
if self._executable is None:
raise IOError(self.base_executable + ": not found")
first_line = open(self._executable).readline()
if first_line.find("python") < 0:
raise IOError(self.base_executable + ": not a Python executable")
self.contents = open(self._executable).read()
def invoker(self):
# We are passing the local variables as is, such that we can access `self`
# We set __name__ to '__main__' to invoke the script as an executable
exec(self.contents, {'__name__': '__main__'})
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = self.miner_class(self.invoker, log=self.log)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
"""Return extracted grammar in EBNF form"""
return self._ebnf_grammar
def grammar(self):
"""Return extracted grammar in BNF form"""
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
"""Fuzz a (Python) program using its arguments"""
def __init__(self, runner: OptionRunner, *args, **kwargs):
"""Constructor. `runner` is an OptionRunner."""
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
import warnings
with warnings.catch_warnings():
# Workaround: `notedown` can issue a `DeprecationWarning`
warnings.filterwarnings("ignore", category=DeprecationWarning)
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_pairwise_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_pairwise_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = \frac{n (n - 1)}{2}$$ For `autopep8` with its 30 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
# docassert
assert len(autopep8_runner.ebnf_grammar()["<option>"]) == 30
###Output
_____no_output_____
###Markdown
... we thus need 870 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 143 options, though, we already end up with 20,000+ tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
# docassert
assert len(mypy_runner.ebnf_grammar()["<option>"]) == 143
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
option_ebnf_grammar
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. The `OptionRunner` constructor accepts an additional `miner` keyword parameter, which takes the class of the argument grammar miner to be used. By default, this is `OptionGrammarMiner` – a helper class that can be used (and extended) to create own option grammar miners.
###Code
# ignore
from ClassDiagram import display_class_hierarchy
from Fuzzer import Fuzzer, Runner, ProgramRunner
from Grammars import Expansion
from GrammarFuzzer import GrammarFuzzer, DerivationTree
from GrammarCoverageFuzzer import TrackingGrammarCoverageFuzzer
# ignore
display_class_hierarchy([OptionRunner, OptionFuzzer, OptionGrammarMiner],
public_methods=[
Fuzzer.__init__,
Fuzzer.fuzz,
Fuzzer.run,
Fuzzer.runs,
GrammarFuzzer.__init__,
GrammarFuzzer.fuzz,
GrammarFuzzer.fuzz_tree,
TrackingGrammarCoverageFuzzer.__init__,
OptionFuzzer.__init__,
OptionFuzzer.run,
Runner.__init__,
Runner.run,
ProgramRunner.__init__,
ProgramRunner.__init__,
OptionRunner.__init__,
OptionRunner.ebnf_grammar,
OptionRunner.grammar,
OptionGrammarMiner.__init__,
OptionGrammarMiner.mine_ebnf_grammar,
OptionGrammarMiner.mine_grammar,
],
types={
'DerivationTree': DerivationTree,
'Expansion': Expansion,
'Grammar': Grammar
},
project='fuzzingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import Grammar, is_valid_grammar
cpp_grammar: Grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
assert is_valid_grammar(cpp_grammar)
cpp_grammar
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> print(option_ebnf_grammar){'': ['()*'], '': [' -h', ' --help', ' --version', ' -v', ' --verbose', ' -d', ' --diff', ' -i', ' --in-place', ' --global-config ', ' --ignore-local-config', ' -r', ' --recursive', ' -j ', ' --jobs ', ' -p ', ' --pep8-passes ', ' -a', ' --aggressive', ' --experimental', ' --exclude ', ' --list-fixes', ' --ignore ', ' --select ', ' --max-line-length ', ' --line-range ', ' --range ', ' --indent-size ', ' --hang-closing'], '': [' foo.py'], '': ['+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~'], '': [''], '': ['(-)?+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], '': [''], '': [''], '': [''], '': ['']}```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)][' foo.py', ' --indent-size 54 --diff --global-config k --select &, --list-fixes -a --hang-closing --range 0 72 --ignore-local-config -p 8 --version -d --experimental foo.py', ' --ignore i --jobs -16 --verbose -v --line-range -3 9 -r --help --max-line-length 8 -h --aggressive --recursive --exclude qE" --in-place -j -979 -i --pep8-passes 4 --version --in-place --aggressive --version foo.py']```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)][' foo.py', ' --range 46 -1 --recursive -d --select <6 --exclude :" --global-config UVE --help --aggressive --experimental -r --line-range -7 -9 --version -i -h --indent-size -05 --max-line-length 8 --in-place --verbose --jobs -32 --ignore-local-config -v -p -1 --hang-closing -j 38 -a --list-fixes --pep8-passes 67 --diff --ignore v --select I --ignore (1NJ --ignore Km --ignore ? --select ^kZ --global-config y --select ia]9 --exclude o --ignore R!4GP.x8/ --ignore D --exclude 7 --exclude Bd -a --recursive --verbose foo.py', " --ignore \\ --global-config l --global-config @ --ignore ,CM~& --ignore nb --select c --global-config zgW --ignore $`s{H --global-config - --exclude 2| --select O --exclude 0 --exclude * --ignore qA'F}X --global-config p>_r+ --global-config eQ --exclude [ --ignore t --select h) --select %f --exclude u3;=TL --global-config w --ignore j5 --exclude Y --ignore S --ignore ]J --global-config 1 --ignore-local-config --max-line-length 36693 -i foo.py"]```The final step in testing would now to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integers` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import fuzzingbook_utils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input:
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = n \times (n - 1)$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
print(option_ebnf_grammar)
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import new_symbol
cpp_grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
cpp_grammar
assert is_valid_grammar(cpp_grammar)
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integers` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import fuzzingbook_utils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, is_valid_grammar, START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration Options In this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module. Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking Arguments Let us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input:
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration Options Let us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8 Let us apply this on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
from copy import deepcopy
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = deepcopy(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = n \times (n - 1)$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The exercises, below, have a number of options ready for you. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: Configuration FilesBesides command-line options, a second important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 2: C Option FuzzingIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 3: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> print(option_ebnf_grammar)```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)]```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)]```The final step in testing would now be to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integer` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import bookutils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.gettrace()
sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input: (Note that the following commands will overwrite the file `foo.py`, if it already exists in the current working directory. Be aware of this, if you downloaded the notebooks and are working locally.)
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = \frac{n (n - 1)}{2}$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus have 406 distinct pairs. However, the binomial coefficient does not differentiate between permutations of elements of the pairs, which our tests do. Therefore we need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
print(option_ebnf_grammar)
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import new_symbol
cpp_grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
cpp_grammar
assert is_valid_grammar(cpp_grammar)
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> print(option_ebnf_grammar)```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)]```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)]```The final step in testing would now be to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integer` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import bookutils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.gettrace()
sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input: (Note that the following commands will overwrite the file `foo.py`, if it already exists in the current working directory. Be aware of this, if you downloaded the notebooks and are working locally.)
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = \frac{n (n - 1)}{2}$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus have 406 distinct pairs. However, the binomial coefficient does not differentiate between permutations of elements of the pairs, which our tests do. Therefore we need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
print(option_ebnf_grammar)
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import new_symbol
cpp_grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
cpp_grammar
assert is_valid_grammar(cpp_grammar)
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb).
###Code
import bookutils
from typing import List, Union, Optional, Callable, Type
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.ConfigurationFuzzer import ```and then make use of the following features.This chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options.`OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")```The grammar can be extracted via the method `ebnf_grammar()`:```python>>> option_ebnf_grammar = autopep8_runner.ebnf_grammar()>>> option_ebnf_grammar{'': ['()*'], '': [' -h', ' --help', ' --version', ' -v', ' --verbose', ' -d', ' --diff', ' -i', ' --in-place', ' --global-config ', ' --ignore-local-config', ' -r', ' --recursive', ' -j ', ' --jobs ', ' -p ', ' --pep8-passes ', ' -a', ' --aggressive', ' --experimental', ' --exclude ', ' --list-fixes', ' --ignore ', ' --select ', ' --max-line-length ', ' --line-range ', ' --range ', ' --indent-size ', ' --hang-closing', ' --exit-code'], '': [' foo.py'], '': ['+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~'], '': [''], '': ['(-)?+'], '': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], '': [''], '': [''], '': [''], '': ['']}```The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:```python>>> from Grammars import convert_ebnf_grammar>>> fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))>>> [fuzzer.fuzz() for i in range(3)][' foo.py', ' --max-line-length 6 --jobs -594 --ignore , --ignore-local-config -r --in-place --list-fixes --recursive -v --experimental -p 72 -h --aggressive --indent-size 3 --exit-code --hang-closing --pep8-passes -180 -d --global-config XQjT --diff --exclude *g -j 43 --help --select A --version --verbose -a --line-range -3963 0 --range 1 4 -i --in-place --version foo.py', ' --global-config 2 --select PuR --ignore b --ignore @ --ignore ;7d --ignore ) --ignore Fw1Z --ignore 0 --global-config ynf --select >G --select + --global-config ( --exclude v --exclude V --ignore ^ --select L --exclude 6 --exclude =$` --ignore % --global-config N --ignore [8maop --ignore 3! --select ~?c< --exclude C --select U --exclude h --global-config --global-config 5O --select x --select B] --ignore _ --global-config .K --global-config S --exclude r --global-config qW --exclude te4/ --exclude J} --ignore " --exclude |H --global-config -&k{s --global-config E --select :I --ignore 9 --global-config M --exclude YD --select \\ --exclude z --ignore i --select \'l --ignore M --ignore ;h --exit-code foo.py']```The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.```python>>> autopep8_runner = OptionRunner("autopep8", "foo.py")>>> autopep8_fuzzer = OptionFuzzer(autopep8_runner)>>> [autopep8_fuzzer.fuzz() for i in range(3)][' --diff foo.py', ' --exclude --global-config V --select He --global-config | --global-config n}aicm --ignore 7 --ignore b --global-config u --exclude WB` --exclude 2 --exclude JpZt --exclude l_ --select *%^ --exclude & --exclude )Lv --global-config [ --global-config " --exclude sOEXP --aggressive --exclude \' --help --diff --experimental foo.py', ' --ignore FCw; --global-config /1K?:6 --exclude U --exclude z --ignore rQ --select x --select Y --select { --global-config o --select 34 --exclude ]j --select ~ --exclude 9@ --ignore w --global-config CVL --diff foo.py']```The final step in testing would now to invoke the program with these arguments.Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs.The `OptionRunner` constructor accepts an additional `miner` keyword parameter, which takes the class of the argument grammar miner to be used. By default, this is `OptionGrammarMiner` – a helper class that can be used (and extended) to create own option grammar miners. Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integer` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import bookutils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, extend_grammar, is_valid_grammar
from Grammars import START_SYMBOL, new_symbol, Grammar
PROCESS_NUMBERS_EBNF_GRAMMAR: Grammar = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def trace_locals(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(trace_locals)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def trace_options(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(trace_options)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner:
"""Helper class for extracting option grammars"""
def __init__(self, function: Callable, log: bool = False):
"""Constructor.
`function` - a function processing arguments using argparse()
`log` - output diagnostics if True
"""
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
"""Extract EBNF option grammar"""
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.gettrace()
sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
"""Extract BNF option grammar"""
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input: (Note that the following commands will overwrite the file `foo.py`, if it already exists in the current working directory. Be aware of this, if you downloaded the notebooks and are working locally.)
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8_runner = ProgramRunner(args)
result, outcome = autopep8_runner.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(ProgramRunner):
"""Run a program while determining its option grammar"""
def __init__(self, program: Union[str, List[str]],
arguments: Optional[str] = None, *,
miner_class: Optional[Type[OptionGrammarMiner]] = None):
"""Constructor.
`program` - the (Python) program to be executed
`arguments` - an (optional) string with arguments for `program`
`miner` - the `OptionGrammarMiner` class to be used
(default: `OptionGrammarMiner`)
"""
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
if miner_class is None:
miner_class = OptionGrammarMiner
self.miner_class = miner_class
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = self.miner_class(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
"""Return extracted grammar in EBNF form"""
return self._ebnf_grammar
def grammar(self):
"""Return extracted grammar in BNF form"""
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
"""Fuzz a (Python) program using its arguments"""
def __init__(self, runner: OptionRunner, *args, **kwargs):
"""Constructor. `runner` is an OptionRunner."""
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = extend_grammar(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_pairwise_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_pairwise_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = \frac{n (n - 1)}{2}$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus have 406 distinct pairs. However, the binomial coefficient does not differentiate between permutations of elements of the pairs, which our tests do. Therefore we need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. SynopsisThis chapter provides two classes:* `OptionRunner` automatically extract command-line options from a Python program;* `OptionFuzzer` uses these to automatically test a Python program with a large variety of options. `OptionRunner` runs a program up to the point where it parses its arguments, and then extracts a grammar that describes its invocations:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
###Output
_____no_output_____
###Markdown
The grammar can be extracted via the method `ebnf_grammar()`:
###Code
option_ebnf_grammar = autopep8_runner.ebnf_grammar()
option_ebnf_grammar
###Output
_____no_output_____
###Markdown
The grammar can be immediately used for fuzzing. A `GrammarCoverageFuzzer` will ensure all options are covered:
###Code
from Grammars import convert_ebnf_grammar
fuzzer = GrammarCoverageFuzzer(convert_ebnf_grammar(option_ebnf_grammar))
[fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The `OptionFuzzer` class summarizes these steps. Its constructor takes an `OptionRunner` to automatically extract the grammar; it does the necessary steps to extract the grammar and fuzz with it.
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
autopep8_fuzzer = OptionFuzzer(autopep8_runner)
[autopep8_fuzzer.fuzz() for i in range(3)]
###Output
_____no_output_____
###Markdown
The final step in testing would now to invoke the program with these arguments. Note that `OptionRunner` is experimental: It assumes that the Python program in question uses the `argparse` module; and not all `argparse` features are supported. Still, it does a pretty good job even on nontrivial programs. The `OptionRunner` constructor accepts an additional `miner` keyword parameter, which takes the class of the argument grammar miner to be used. By default, this is `OptionGrammarMiner` – a helper class that can be used (and extended) to create own option grammar miners.
###Code
# ignore
from ClassDiagram import display_class_hierarchy
from Fuzzer import Fuzzer, Runner, ProgramRunner
from Grammars import Expansion
from GrammarFuzzer import GrammarFuzzer, DerivationTree
from GrammarCoverageFuzzer import TrackingGrammarCoverageFuzzer
# ignore
display_class_hierarchy([OptionRunner, OptionFuzzer, OptionGrammarMiner],
public_methods=[
Fuzzer.__init__,
Fuzzer.fuzz,
Fuzzer.run,
Fuzzer.runs,
GrammarFuzzer.__init__,
GrammarFuzzer.fuzz,
GrammarFuzzer.fuzz_tree,
TrackingGrammarCoverageFuzzer.__init__,
OptionFuzzer.__init__,
OptionFuzzer.run,
Runner.__init__,
Runner.run,
ProgramRunner.__init__,
ProgramRunner.__init__,
OptionRunner.__init__,
OptionRunner.ebnf_grammar,
OptionRunner.grammar,
OptionGrammarMiner.__init__,
OptionGrammarMiner.mine_ebnf_grammar,
OptionGrammarMiner.mine_grammar,
],
types={
'DerivationTree': DerivationTree,
'Expansion': Expansion,
'Grammar': Grammar
},
project='fuzzingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= {"if", "ifdef", "ifndef", "defined"}
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import Grammar, is_valid_grammar
cpp_grammar: Grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
assert is_valid_grammar(cpp_grammar)
cpp_grammar
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = extend_grammar(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should bne tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides an parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integers` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import fuzzingbook_utils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, is_valid_grammar, START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration Options In this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module. Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking Arguments Let us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
ebnf_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
ebnf_grammar["<start>"]
ebnf_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
ebnf_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
ebnf_grammar["<arguments>"]
ebnf_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
ebnf_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(ebnf_grammar)
grammar = convert_ebnf_grammar(ebnf_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input:
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprising high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration Options Let us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8 Let us apply this on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
from copy import deepcopy
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = deepcopy(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the the number of options reach the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = n \times (n - 1)$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The exercises, below, have a number of options ready for you. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb)Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simply inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: Configuration FilesBesides command-line options, a second important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 2: C Option FuzzingIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 3: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____
###Markdown
Testing ConfigurationsThe behavior of a program is not only governed by its data. The _configuration_ of a program – that is, the settings that govern the execution of a program on its (regular) input data, as set by options or configuration files – just as well influences behavior, and thus can and should be tested. In this chapter, we explore how to systematically _test_ and _cover_ software configurations. By _automatically inferring configuration options_, we can apply these techniques out of the box, with no need for writing a grammar. Finally, we show how to systematically cover _combinations_ of configuration options, quickly detecting unwanted interferences. **Prerequisites*** You should have read the [chapter on grammars](Grammars.ipynb).* You should have read the [chapter on grammar coverage](GrammarCoverageFuzzer.ipynb). Configuration OptionsWhen we talk about the input to a program, we usually think of the _data_ it processes. This is also what we have been fuzzing in the past chapters – be it with [random input](Fuzzer.ipynb), [mutation-based fuzzing](MutationFuzzer.ipynb), or [grammar-based fuzzing](GrammarFuzzer.ipynb). However, programs typically have several input sources, all of which can and should be tested – and included in test generation. One important source of input is the program's _configuration_ – that is, a set of inputs that typically is set once when beginning to process data and then stays constant while processing data, while the program is running, or even while the program is deployed. Such a configuration is frequently set in _configuration files_ (for instance, as key/value pairs); the most ubiquitous method for command-line tools, though, are _configuration options_ on the command line. As an example, consider the `grep` utility to find textual patterns in files. The exact mode by which `grep` works is governed by a multitude of options, which can be listed by providing a `--help` option:
###Code
!grep --help
###Output
_____no_output_____
###Markdown
All these options need to be tested for whether they operate correctly. In security testing, any such option may also trigger a yet unknown vulnerability. Hence, such options can become _fuzz targets_ on their own. In this chapter, we analyze how to systematically test such options – and better yet, how to extract possible configurations right out of given program files, such that we do not have to specify anything. Options in PythonLet us stick to our common programming language here and examine how options are processed in Python. The `argparse` module provides a parser for command-line arguments (and options) with great functionality – and great complexity. You start by defining a parser (`argparse.ArgumentParser()`) to which individual arguments with various features are added, one after another. Additional parameters for each argument can specify the type (`type`) of the argument (say, integers or strings), or the number of arguments (`nargs`). By default, arguments are stored under their name in the `args` object coming from `parse_args()` – thus, `args.integers` holds the `integers` arguments added earlier. Special actions (`actions`) allow to store specific values in given variables; the `store_const` action stores the given `const` in the attribute named by `dest`. The following example takes a number of integer arguments (`integers`) as well as an operator (`--sum`, `--min`, or `--max`) to be applied on these integers. The operators all store a function reference in the `accumulate` attribute, which is finally invoked on the integers parsed:
###Code
import argparse
def process_numbers(args=[]):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--sum', dest='accumulate', action='store_const',
const=sum,
help='sum the integers')
group.add_argument('--min', dest='accumulate', action='store_const',
const=min,
help='compute the minimum')
group.add_argument('--max', dest='accumulate', action='store_const',
const=max,
help='compute the maximum')
args = parser.parse_args(args)
print(args.accumulate(args.integers))
###Output
_____no_output_____
###Markdown
Here's how `process_numbers()` works. We can, for instance, invoke the `--min` option on the given arguments to compute the minimum:
###Code
process_numbers(["--min", "100", "200", "300"])
###Output
_____no_output_____
###Markdown
Or compute the sum of three numbers:
###Code
process_numbers(["--sum", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
When defined via `add_mutually_exclusive_group()` (as above), options are mutually exclusive. Consequently, we can have only one operator:
###Code
import fuzzingbook_utils
from ExpectError import ExpectError
with ExpectError(print_traceback=False):
process_numbers(["--sum", "--max", "1", "2", "3"])
###Output
_____no_output_____
###Markdown
A Grammar for ConfigurationsHow can we test a system with several options? The easiest answer is to write a grammar for it. The grammar `PROCESS_NUMBERS_EBNF_GRAMMAR` reflects the possible combinations of options and arguments:
###Code
from Grammars import crange, srange, convert_ebnf_grammar, is_valid_grammar, START_SYMBOL, new_symbol
PROCESS_NUMBERS_EBNF_GRAMMAR = {
"<start>": ["<operator> <integers>"],
"<operator>": ["--sum", "--min", "--max"],
"<integers>": ["<integer>", "<integers> <integer>"],
"<integer>": ["<digit>+"],
"<digit>": crange('0', '9')
}
assert is_valid_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
PROCESS_NUMBERS_GRAMMAR = convert_ebnf_grammar(PROCESS_NUMBERS_EBNF_GRAMMAR)
###Output
_____no_output_____
###Markdown
We can feed this grammar into our [grammar coverage fuzzer](GrammarCoverageFuzzer.ipynb) and have it cover one option after another:
###Code
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Of course, we can also invoke `process_numbers()` with these very arguments. To this end, we need to convert the string produced by the grammar back into a list of individual arguments, using `split()`:
###Code
f = GrammarCoverageFuzzer(PROCESS_NUMBERS_GRAMMAR, min_nonterminals=10)
for i in range(3):
args = f.fuzz().split()
print(args)
process_numbers(args)
###Output
_____no_output_____
###Markdown
In a similar way, we can define grammars for any program to be tested; as well as define grammars for, say, configuration files. Yet, the grammar has to be updated with every change to the program, which creates a maintenance burden. Given that the information required for the grammar is already all encoded in the program, the question arises: _Can't we go and extract configuration options right out of the program in the first place?_ Mining Configuration OptionsIn this section, we try to extract option and argument information right out of a program, such that we do not have to specify a configuration grammar. The aim is to have a configuration fuzzer that works on the options and arguments of an arbitrary program, as long as it follows specific conventions for processing its arguments. In the case of Python programs, this means using the `argparse` module.Our idea is as follows: We execute the given program up to the point where the arguments are actually parsed – that is, `argparse.parse_args()` is invoked. Up to this point, we track all calls into the argument parser, notably those calls that define arguments and options (`add_argument()`). From these, we construct the grammar. Tracking ArgumentsLet us illustrate this approach with a simple experiment: We define a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while `process_numbers` is invoked. If we have a call to a method `add_argument`, we access and print out the local variables (which at this point are the arguments to the method).
###Code
import sys
import string
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(method_name, locals)
###Output
_____no_output_____
###Markdown
What we get is a list of all calls to `add_argument()`, together with the method arguments passed:
###Code
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
From the `args` argument, we can access the individual options and arguments to be defined:
###Code
def traceit(frame, event, arg):
if event != "call":
return
method_name = frame.f_code.co_name
if method_name != "add_argument":
return
locals = frame.f_locals
print(locals['args'])
sys.settrace(traceit)
process_numbers(["--sum", "1", "2", "3"])
sys.settrace(None)
###Output
_____no_output_____
###Markdown
We see that each argument comes as a tuple with one (say, `integers` or `--sum`) or two members (`-h` and `--help`), which denote alternate forms for the same option. Our job will be to go through the arguments of `add_arguments()` and detect not only the names of options and arguments, but also whether they accept additional parameters, as well as the type of the parameters. A Grammar Miner for Options and Arguments Let us now build a class that gathers all this information to create a grammar. We use the `ParseInterrupt` exception to interrupt program execution after gathering all arguments and options:
###Code
class ParseInterrupt(Exception):
pass
###Output
_____no_output_____
###Markdown
The class `OptionGrammarMiner` takes an executable function for which the grammar of options and arguments is to be mined:
###Code
class OptionGrammarMiner(object):
def __init__(self, function, log=False):
self.function = function
self.log = log
###Output
_____no_output_____
###Markdown
The method `mine_ebnf_grammar()` is where everything happens. It creates a grammar of the form``` ::= * ::= ::= ```in which the options and arguments will be collected. It then sets a trace function (see [our chapter on coverage](Coverage.ipynb) for details) that is active while the previously defined `function` is invoked. Raising `ParseInterrupt` (when `parse_args()` is invoked) ends execution.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
OPTION_SYMBOL = "<option>"
ARGUMENTS_SYMBOL = "<arguments>"
def mine_ebnf_grammar(self):
self.grammar = {
START_SYMBOL: ["(" + self.OPTION_SYMBOL + ")*" + self.ARGUMENTS_SYMBOL],
self.OPTION_SYMBOL: [],
self.ARGUMENTS_SYMBOL: []
}
self.current_group = self.OPTION_SYMBOL
old_trace = sys.settrace(self.traceit)
try:
self.function()
except ParseInterrupt:
pass
sys.settrace(old_trace)
return self.grammar
def mine_grammar(self):
return convert_ebnf_grammar(self.mine_ebnf_grammar())
###Output
_____no_output_____
###Markdown
The trace function checks for four methods: `add_argument()` is the most important function, resulting in processing arguments; `frame.f_locals` again is the set of local variables, which at this point is mostly the arguments to `add_argument()`. Since mutually exclusive groups also have a method `add_argument()`, we set the flag `in_group` to differentiate. Note that we make no specific efforts to differentiate between multiple parsers or groups; we simply assume that there is one parser, and at any point at most one mutually exclusive group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def traceit(self, frame, event, arg):
if event != "call":
return
if "self" not in frame.f_locals:
return
self_var = frame.f_locals["self"]
method_name = frame.f_code.co_name
if method_name == "add_argument":
in_group = repr(type(self_var)).find("Group") >= 0
self.process_argument(frame.f_locals, in_group)
elif method_name == "add_mutually_exclusive_group":
self.add_group(frame.f_locals, exclusive=True)
elif method_name == "add_argument_group":
# self.add_group(frame.f_locals, exclusive=False)
pass
elif method_name == "parse_args":
raise ParseInterrupt
return None
###Output
_____no_output_____
###Markdown
The `process_arguments()` now analyzes the arguments passed and adds them to the grammar:* If the argument starts with `-`, it gets added as an optional element to the `` list* Otherwise, it gets added to the `` list.The optional `nargs` argument specifies how many arguments can follow. If it is a number, we add the appropriate number of elements to the grammar; if it is an abstract specifier (say, `+` or `*`), we use it directly as EBNF operator.Given the large number of parameters and optional behavior, this is a somewhat messy function, but it does the job.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def process_argument(self, locals, in_group):
args = locals["args"]
kwargs = locals["kwargs"]
if self.log:
print(args)
print(kwargs)
print()
for arg in args:
self.process_arg(arg, in_group, kwargs)
class OptionGrammarMiner(OptionGrammarMiner):
def process_arg(self, arg, in_group, kwargs):
if arg.startswith('-'):
if not in_group:
target = self.OPTION_SYMBOL
else:
target = self.current_group
metavar = None
arg = " " + arg
else:
target = self.ARGUMENTS_SYMBOL
metavar = arg
arg = ""
if "nargs" in kwargs:
nargs = kwargs["nargs"]
else:
nargs = 1
param = self.add_parameter(kwargs, metavar)
if param == "":
nargs = 0
if isinstance(nargs, int):
for i in range(nargs):
arg += param
else:
assert nargs in "?+*"
arg += '(' + param + ')' + nargs
if target == self.OPTION_SYMBOL:
self.grammar[target].append(arg)
else:
self.grammar[target].append(arg)
###Output
_____no_output_____
###Markdown
The method `add_parameter()` handles possible parameters of options. If the argument has an `action` defined, it takes no parameter. Otherwise, we identify the type of the parameter (as `int` or `str`) and augment the grammar with an appropriate rule.
###Code
import inspect
class OptionGrammarMiner(OptionGrammarMiner):
def add_parameter(self, kwargs, metavar):
if "action" in kwargs:
# No parameter
return ""
type_ = "str"
if "type" in kwargs:
given_type = kwargs["type"]
# int types come as '<class int>'
if inspect.isclass(given_type) and issubclass(given_type, int):
type_ = "int"
if metavar is None:
if "metavar" in kwargs:
metavar = kwargs["metavar"]
else:
metavar = type_
self.add_type_rule(type_)
if metavar != type_:
self.add_metavar_rule(metavar, type_)
param = " <" + metavar + ">"
return param
###Output
_____no_output_____
###Markdown
The method `add_type_rule()` adds a rule for parameter types to the grammar. If the parameter is identified by a meta-variable (say, `N`), we add a rule for this as well to improve legibility.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_type_rule(self, type_):
if type_ == "int":
self.add_int_rule()
else:
self.add_str_rule()
def add_int_rule(self):
self.grammar["<int>"] = ["(-)?<digit>+"]
self.grammar["<digit>"] = crange('0', '9')
def add_str_rule(self):
self.grammar["<str>"] = ["<char>+"]
self.grammar["<char>"] = srange(
string.digits
+ string.ascii_letters
+ string.punctuation)
def add_metavar_rule(self, metavar, type_):
self.grammar["<" + metavar + ">"] = ["<" + type_ + ">"]
###Output
_____no_output_____
###Markdown
The method `add_group()` adds a new mutually exclusive group to the grammar. We define a new symbol (say, ``) for the options added to the group, and use the `required` and `exclusive` flags to define an appropriate expansion operator. The group is then prefixed to the grammar, as in``` ::= * ::= ```and filled with the next calls to `add_argument()` within the group.
###Code
class OptionGrammarMiner(OptionGrammarMiner):
def add_group(self, locals, exclusive):
kwargs = locals["kwargs"]
if self.log:
print(kwargs)
required = kwargs.get("required", False)
group = new_symbol(self.grammar, "<group>")
if required and exclusive:
group_expansion = group
if required and not exclusive:
group_expansion = group + "+"
if not required and exclusive:
group_expansion = group + "?"
if not required and not exclusive:
group_expansion = group + "*"
self.grammar[START_SYMBOL][0] = group_expansion + \
self.grammar[START_SYMBOL][0]
self.grammar[group] = []
self.current_group = group
###Output
_____no_output_____
###Markdown
That's it! With this, we can now extract the grammar from our `process_numbers()` program. Turning on logging again reveals the variables we draw upon.
###Code
miner = OptionGrammarMiner(process_numbers, log=True)
process_numbers_grammar = miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
Here is the extracted grammar:
###Code
process_numbers_grammar
###Output
_____no_output_____
###Markdown
The grammar properly identifies the group found:
###Code
process_numbers_grammar["<start>"]
process_numbers_grammar["<group>"]
###Output
_____no_output_____
###Markdown
It also identifies a `--help` option provided not by us, but by the `argparse` module:
###Code
process_numbers_grammar["<option>"]
###Output
_____no_output_____
###Markdown
The grammar also correctly identifies the types of the arguments:
###Code
process_numbers_grammar["<arguments>"]
process_numbers_grammar["<integers>"]
###Output
_____no_output_____
###Markdown
The rules for `int` are set as defined by `add_int_rule()`
###Code
process_numbers_grammar["<int>"]
###Output
_____no_output_____
###Markdown
We can take this grammar and convert it to BNF, such that we can fuzz with it right away:
###Code
assert is_valid_grammar(process_numbers_grammar)
grammar = convert_ebnf_grammar(process_numbers_grammar)
assert is_valid_grammar(grammar)
f = GrammarCoverageFuzzer(grammar)
for i in range(10):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Each and every invocation adheres to the rules as set forth in the `argparse` calls. By mining options and arguments from existing programs, we can now fuzz these options out of the box – without having to specify a grammar. Testing Autopep8 Let us try out the option grammar miner on real-world Python programs. `autopep8` is a tool that automatically converts Python code to the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). (Actually, all Python code in this book runs through `autopep8` during production.) `autopep8` offers a wide range of options, as can be seen by invoking it with `--help`:
###Code
!autopep8 --help
###Output
_____no_output_____
###Markdown
Autopep8 SetupWe want to systematically test these options. In order to deploy our configuration grammar miner, we need to find the source code of the executable:
###Code
import os
def find_executable(name):
for path in os.get_exec_path():
qualified_name = os.path.join(path, name)
if os.path.exists(qualified_name):
return qualified_name
return None
autopep8_executable = find_executable("autopep8")
assert autopep8_executable is not None
autopep8_executable
###Output
_____no_output_____
###Markdown
Next, we build a function that reads the contents of the file and executes it.
###Code
def autopep8():
executable = find_executable("autopep8")
# First line has to contain "/usr/bin/env python" or like
first_line = open(executable).readline()
assert first_line.find("python") >= 0
contents = open(executable).read()
exec(contents)
###Output
_____no_output_____
###Markdown
Mining an Autopep8 GrammarWe can use the `autopep8()` function in our grammar miner:
###Code
autopep8_miner = OptionGrammarMiner(autopep8)
###Output
_____no_output_____
###Markdown
and extract a grammar for it:
###Code
autopep8_ebnf_grammar = autopep8_miner.mine_ebnf_grammar()
###Output
_____no_output_____
###Markdown
This works because here, `autopep8` is not a separate process (and a separate Python interpreter), but we run the `autopep8()` function (and the `autopep8` code) in our current Python interpreter – up to the call to `parse_args()`, where we interrupt execution again. At this point, the `autopep8` code has done nothing but setting up the argument parser – which is what we are interested in. The grammar options mined reflect precisely the options seen when providing `--help`:
###Code
print(autopep8_ebnf_grammar["<option>"])
###Output
_____no_output_____
###Markdown
Metavariables like `` or `` are placeholders for integers. We assume all metavariables of the same name have the same type:
###Code
autopep8_ebnf_grammar["<line>"]
###Output
_____no_output_____
###Markdown
The grammar miner has inferred that the argument to `autopep8` is a list of files:
###Code
autopep8_ebnf_grammar["<arguments>"]
###Output
_____no_output_____
###Markdown
which in turn all are strings:
###Code
autopep8_ebnf_grammar["<files>"]
###Output
_____no_output_____
###Markdown
As we are only interested in testing options, not arguments, we fix the arguments to a single mandatory input. (Otherwise, we'd have plenty of random file names generated.)
###Code
autopep8_ebnf_grammar["<arguments>"] = [" <files>"]
autopep8_ebnf_grammar["<files>"] = ["foo.py"]
assert is_valid_grammar(autopep8_ebnf_grammar)
###Output
_____no_output_____
###Markdown
Creating Autopep8 Options Let us now use the inferred grammar for fuzzing. Again, we convert the EBNF grammar into a regular BNF grammar:
###Code
autopep8_grammar = convert_ebnf_grammar(autopep8_ebnf_grammar)
assert is_valid_grammar(autopep8_grammar)
###Output
_____no_output_____
###Markdown
And we can use the grammar for fuzzing all options:
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=4)
for i in range(20):
print(f.fuzz())
###Output
_____no_output_____
###Markdown
Let us apply these options on the actual program. We need a file `foo.py` that will serve as input:
###Code
def create_foo_py():
open("foo.py", "w").write("""
def twice(x = 2):
return x + x
""")
create_foo_py()
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We see how `autopep8` fixes the spacing:
###Code
!autopep8 foo.py
###Output
_____no_output_____
###Markdown
Let us now put things together. We define a `ProgramRunner` that will run the `autopep8` executable with arguments coming from the mined `autopep8` grammar.
###Code
from Fuzzer import ProgramRunner
###Output
_____no_output_____
###Markdown
Running `autopep8` with the mined options reveals a surprisingly high number of passing runs. (We see that some options depend on each other or are mutually exclusive, but this is handled by the program logic, not the argument parser, and hence out of our scope.) The `GrammarCoverageFuzzer` ensures that each option is tested at least once. (Digits and letters, too, by the way.)
###Code
f = GrammarCoverageFuzzer(autopep8_grammar, max_nonterminals=5)
for i in range(20):
invocation = "autopep8" + f.fuzz()
print("$ " + invocation)
args = invocation.split()
autopep8 = ProgramRunner(args)
result, outcome = autopep8.run()
if result.stderr != "":
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Our `foo.py` file now has been formatted in place a number of times:
###Code
print(open("foo.py").read(), end="")
###Output
_____no_output_____
###Markdown
We don't need it anymore, so we clean up things:
###Code
import os
os.remove("foo.py")
###Output
_____no_output_____
###Markdown
Classes for Fuzzing Configuration OptionsLet us now create reusable classes that we can use for testing arbitrary programs. (Okay, make that "arbitrary programs that are written in Python and use the `argparse` module to process command-line arguments.") The class `OptionRunner` is a subclass of `ProgramRunner` that takes care of automatically determining the grammar, using the same steps we used for `autopep8`, above.
###Code
class OptionRunner(ProgramRunner):
def __init__(self, program, arguments=None):
if isinstance(program, str):
self.base_executable = program
else:
self.base_executable = program[0]
self.find_contents()
self.find_grammar()
if arguments is not None:
self.set_arguments(arguments)
super().__init__(program)
###Output
_____no_output_____
###Markdown
First, we find the contents of the Python executable:
###Code
class OptionRunner(OptionRunner):
def find_contents(self):
self._executable = find_executable(self.base_executable)
first_line = open(self._executable).readline()
assert first_line.find("python") >= 0
self.contents = open(self._executable).read()
def invoker(self):
exec(self.contents)
def executable(self):
return self._executable
###Output
_____no_output_____
###Markdown
Next, we determine the grammar using the `OptionGrammarMiner` class:
###Code
class OptionRunner(OptionRunner):
def find_grammar(self):
miner = OptionGrammarMiner(self.invoker)
self._ebnf_grammar = miner.mine_ebnf_grammar()
def ebnf_grammar(self):
return self._ebnf_grammar
def grammar(self):
return convert_ebnf_grammar(self._ebnf_grammar)
###Output
_____no_output_____
###Markdown
The two service methods `set_arguments()` and `set_invocation()` help us to change the arguments and program, respectively.
###Code
from Grammars import unreachable_nonterminals
class OptionRunner(OptionRunner):
def set_arguments(self, args):
self._ebnf_grammar["<arguments>"] = [" " + args]
# Delete rules for previous arguments
for nonterminal in unreachable_nonterminals(self._ebnf_grammar):
del self._ebnf_grammar[nonterminal]
def set_invocation(self, program):
self.program = program
###Output
_____no_output_____
###Markdown
We can instantiate the class on `autopep8` and immediately get the grammar:
###Code
autopep8_runner = OptionRunner("autopep8", "foo.py")
print(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
An `OptionFuzzer` interacts with the given `OptionRunner` to obtain its grammar, which is then passed to its `GrammarCoverageFuzzer` superclass.
###Code
class OptionFuzzer(GrammarCoverageFuzzer):
def __init__(self, runner, *args, **kwargs):
assert issubclass(type(runner), OptionRunner)
self.runner = runner
grammar = runner.grammar()
super().__init__(grammar, *args, **kwargs)
###Output
_____no_output_____
###Markdown
When invoking `run()`, the `OptionFuzzer` creates a new invocation (using `fuzz()` from its grammar) and runs the now given (or previously set) runner with the arguments from the grammar. Note that the runner specified in `run()` can differ from the one set during initialization; this allows for mining options from one program and applying it in another context.
###Code
class OptionFuzzer(OptionFuzzer):
def run(self, runner=None, inp=""):
if runner is None:
runner = self.runner
assert issubclass(type(runner), OptionRunner)
invocation = runner.executable() + " " + self.fuzz()
runner.set_invocation(invocation.split())
return runner.run(inp)
###Output
_____no_output_____
###Markdown
Example: Autopep8Let us apply our newly defined classes on the `autopep8` runner:
###Code
autopep8_fuzzer = OptionFuzzer(autopep8_runner, max_nonterminals=5)
for i in range(3):
print(autopep8_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
We can now systematically test `autopep8` with these classes:
###Code
autopep8_fuzzer.run(autopep8_runner)
###Output
_____no_output_____
###Markdown
Example: MyPyWe can extract options for the `mypy` static type checker for Python:
###Code
assert find_executable("mypy") is not None
mypy_runner = OptionRunner("mypy", "foo.py")
print(mypy_runner.ebnf_grammar()["<option>"])
mypy_fuzzer = OptionFuzzer(mypy_runner, max_nonterminals=5)
for i in range(10):
print(mypy_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Example: NotedownHere's the configuration options for the `notedown` Notebook to Markdown converter:
###Code
assert find_executable("notedown") is not None
notedown_runner = OptionRunner("notedown")
print(notedown_runner.ebnf_grammar()["<option>"])
notedown_fuzzer = OptionFuzzer(notedown_runner, max_nonterminals=5)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Combinatorial TestingOur `CoverageGrammarFuzzer` does a good job in covering each and every option at least once, which is great for systematic testing. However, as we also can see in our examples above, some options require each other, while others interfere with each other. What we should do as good testers is not only to cover every option individually, but also _combinations_ of options. The Python `itertools` module gives us means to create combinations from lists. We can, for instance, take the `notedown` options and create a list of all pairs.
###Code
from itertools import combinations
option_list = notedown_runner.ebnf_grammar()["<option>"]
pairs = list(combinations(option_list, 2))
###Output
_____no_output_____
###Markdown
There's quite a number of pairs:
###Code
len(pairs)
print(pairs[:20])
###Output
_____no_output_____
###Markdown
Testing every such pair of options frequently suffices to cover all interferences between options. (Programs rarely have conditions involving three or more configuration settings.) To this end, we _change_ the grammar from having a list of options to having a list of _option pairs_, such that covering these will automatically cover all pairs. We create a function `pairwise()` that takes a list of options as occurring in our grammar and returns a list of _pairwise options_ – that is, our original options, but concatenated.
###Code
def pairwise(option_list):
return [option_1 + option_2
for (option_1, option_2) in combinations(option_list, 2)]
###Output
_____no_output_____
###Markdown
Here's the first 20 pairs:
###Code
print(pairwise(option_list)[:20])
###Output
_____no_output_____
###Markdown
The new grammar `pairwise_notedown_grammar` is a copy of the `notedown` grammar, but with the list of options replaced with the above pairwise option list.
###Code
from copy import deepcopy
notedown_grammar = notedown_runner.grammar()
pairwise_notedown_grammar = deepcopy(notedown_grammar)
pairwise_notedown_grammar["<option>"] = pairwise(notedown_grammar["<option>"])
assert is_valid_grammar(pairwise_notedown_grammar)
###Output
_____no_output_____
###Markdown
Using the "pairwise" grammar to fuzz now covers one pair after another:
###Code
notedown_fuzzer = GrammarCoverageFuzzer(
pairwise_notedown_grammar, max_nonterminals=4)
for i in range(10):
print(notedown_fuzzer.fuzz())
###Output
_____no_output_____
###Markdown
Can we actually test all combinations of options? Not in practice, as the number of combinations quickly grows as the length increases. It decreases again as the number of options reaches the maximum (with 20 options, there is only 1 combination involving _all_ options), but the absolute numbers are still staggering:
###Code
for combination_length in range(1, 20):
tuples = list(combinations(option_list, combination_length))
print(combination_length, len(tuples))
###Output
_____no_output_____
###Markdown
Formally, the number of combinations of length $k$ in a set of options of length $n$ is the binomial coefficient$${n \choose k} = \frac{n!}{k!(n - k)!}$$ which for $k = 2$ (all pairs) gives us$${n \choose 2} = \frac{n!}{2(n - 2)!} = n \times (n - 1)$$ For `autopep8` with its 29 options...
###Code
len(autopep8_runner.ebnf_grammar()["<option>"])
###Output
_____no_output_____
###Markdown
... we thus need 812 tests to cover all pairs:
###Code
len(autopep8_runner.ebnf_grammar()["<option>"]) * \
(len(autopep8_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
For `mypy` with its 110 options, though, we already end up with 11,990 tests to be conducted:
###Code
len(mypy_runner.ebnf_grammar()["<option>"])
len(mypy_runner.ebnf_grammar()["<option>"]) * \
(len(mypy_runner.ebnf_grammar()["<option>"]) - 1)
###Output
_____no_output_____
###Markdown
Even if each pair takes a second to run, we'd still be done in three hours of testing, though. If your program has more options that you all want to get covered in combinations, it is advisable that you limit the number of configurations further – for instance by limiting combinatorial testing to those combinations that possibly can interact with each other; and covering all other (presumably orthogonal) options individually. This mechanism of creating configurations by extending grammars can be easily extended to other configuration targets. One may want to explore a greater number of configurations, or expansions in specific contexts. The [exercises](Exercises), below, have a number of options ready for you. Lessons Learned* Besides regular input data, program _configurations_ make an important testing target.* For a given program using a standard library to parse command-line options and arguments, one can automatically extract these and convert them into a grammar.* To cover not only single options, but combinations of options, one can expand the grammar to cover all pairs, or come up with even more ambitious targets. Next StepsIf you liked the idea of mining a grammar from a program, do not miss:* [how to mine grammars for input data](GrammarMiner.ipynb) Our next steps in the book focus on:* [how to parse and recombine inputs](Parser.ipynb)* [how to assign weights and probabilities to specific productions](ProbabilisticGrammarFuzzer.ipynb)* [how to simplify inputs that cause a failure](Reducer.ipynb) BackgroundAlthough configuration data is just as likely to cause failures as other input data, it has received relatively little attention in test generation – possibly because, unlike "regular" input data, configuration data is not so much under control of external parties, and because, again unlike regular data, there is little variance in configurations. Creating models for software configurations and using these models for testing is commonplace, as is the idea of pairwise testing. For an overview, see \cite{Pezze2008}; for a discussion and comparison of state-of-the-art techniques, see \cite{Petke2015}.More specifically, \cite{Sutton2007} also discuss techniques to systematically cover command-line options. Dai et al. \cite{Dai2010} apply configuration fuzzing by changing variables associated with configuration files. Exercises Exercise 1: ifdef Configuration FuzzingIn C programs, the *C preprocessor* can be used to choose which code parts should be compiled and which ones should not. As an example, in the C code```Cifdef LONG_FOOlong foo() { ... }elseint foo() { ... }endif```the compiler will compile the function `foo()` with return type`long` if the _preprocessor variable_ `LONG_FOO` is defined, and with return type `int` if not. Such preprocessor variables are either set in the source files (using `define`, as in `define LONG_FOO`) or on the C compiler command line (using `-D` or `-D=`, as in `-DLONG_FOO`. Such *conditional compilation* is used to configure C programs towards their environment. System-specific code can contain lots of conditional compilation. As an example, consider this excerpt of `xmlparse.c`, the XML parser that is part of the Python runtime library:```cif defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32) define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800endifif !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \ && !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \ && !defined(XML_DEV_URANDOM) \ && !defined(_WIN32) \ && !defined(XML_POOR_ENTROPY) errorendifif !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */endififdef XML_UNICODE_WCHAR_Tdefine XML_T(x) (const wchar_t)xdefine XML_L(x) L xelsedefine XML_T(x) (const unsigned short)xdefine XML_L(x) xendifint fun(int x) { return XML_T(x); }``` A typical configuration for the C preprocessor on the above code could be `cc -c -D_WIN32 -DXML_POOR_ENTROPY -DXML_UNICODE_WCHAR_T xmlparse.c`, defining the given preprocessor variables and selecting the appropriate code fragments. Since the compiler can only compile one configuration at a time (implying that we can also only _test_ one resulting executable at a time), your task is to find out which of these configurations actually compile. To this end, proceed in three steps. Part 1: Extract Preprocessor VariablesWrite a _function_ `cpp_identifiers()` that, given a set of lines (say, from `open(filename).readlines()`), extracts all preprocessor variables referenced in `if` or `ifdef` preprocessor instructions. Apply `ifdef_identifiers()` on the sample C input above, such that```pythoncpp_identifiers(open("xmlparse.c").readlines()) ```returns the set```python{'_WIN32', 'LOAD_LIBRARY_SEARCH_SYSTEM32', 'HAVE_GETRANDOM', 'HAVE_SYSCALL_GETRANDOM', 'HAVE_ARC4RANDOM_BUF', ...}``` **Solution.** Let us start with creating a sample input file, `xmlparse.c`:
###Code
filename = "xmlparse.c"
open(filename, "w").write(
"""
#if defined(_WIN32) && !defined(LOAD_LIBRARY_SEARCH_SYSTEM32)
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
#endif
#if !defined(HAVE_GETRANDOM) && !defined(HAVE_SYSCALL_GETRANDOM) \
&& !defined(HAVE_ARC4RANDOM_BUF) && !defined(HAVE_ARC4RANDOM) \
&& !defined(XML_DEV_URANDOM) \
&& !defined(_WIN32) \
&& !defined(XML_POOR_ENTROPY)
# error
#endif
#if !defined(TIOCSWINSZ) || defined(__SCO__) || defined(__UNIXWARE__)
#define USE_SYSV_ENVVARS /* COLUMNS/LINES vs. TERMCAP */
#endif
#ifdef XML_UNICODE_WCHAR_T
#define XML_T(x) (const wchar_t)x
#define XML_L(x) L ## x
#else
#define XML_T(x) (const unsigned short)x
#define XML_L(x) x
#endif
int fun(int x) { return XML_T(x); }
""");
###Output
_____no_output_____
###Markdown
To find C preprocessor `if` directives and preprocessor variables, we use regular expressions matching them.
###Code
import re
re_cpp_if_directive = re.compile(r"\s*#\s*(el)?if")
re_cpp_identifier = re.compile(r"[a-zA-Z_$]+")
def cpp_identifiers(lines):
identifiers = set()
for line in lines:
if re_cpp_if_directive.match(line):
identifiers |= set(re_cpp_identifier.findall(line))
# These are preprocessor keywords
identifiers -= { "if", "ifdef", "ifndef", "defined" }
return identifiers
cpp_ids = cpp_identifiers(open("xmlparse.c").readlines())
cpp_ids
###Output
_____no_output_____
###Markdown
Part 2: Derive an Option GrammarWith the help of `cpp_identifiers()`, create a grammar which has C compiler invocations with a list of options, where each option takes the form `-D` for a preprocessor variable ``. Using this grammar `cpp_grammar`, a fuzzer ```pythong = GrammarCoverageFuzzer(cpp_grammar)```would create C compiler invocations such as```python[g.fuzz() for i in range(10)]['cc -DHAVE_SYSCALL_GETRANDOM xmlparse.c', 'cc -D__SCO__ -DRANDOM_BUF -DXML_UNICODE_WCHAR_T -D__UNIXWARE__ xmlparse.c', 'cc -DXML_POOR_ENTROPY xmlparse.c', 'cc -DRANDOM xmlparse.c', 'cc -D_WIN xmlparse.c', 'cc -DHAVE_ARC xmlparse.c', ...]``` **Solution.** This is not very difficult:
###Code
from Grammars import new_symbol
cpp_grammar = {
"<start>": ["cc -c<options> " + filename],
"<options>": ["<option>", "<options><option>"],
"<option>": []
}
for id in cpp_ids:
s = new_symbol(cpp_grammar, "<" + id + ">")
cpp_grammar["<option>"].append(s)
cpp_grammar[s] = [" -D" + id]
cpp_grammar
assert is_valid_grammar(cpp_grammar)
###Output
_____no_output_____
###Markdown
Part 3: C Preprocessor Configuration FuzzingUsing the grammar just produced, use a `GrammarCoverageFuzzer` to1. Test each processor variable individually2. Test each pair of processor variables, using `pairwise()`.What happens if you actually run the invocations? **Solution.** We can simply run the coverage fuzzer, as described above.
###Code
g = GrammarCoverageFuzzer(cpp_grammar)
g.fuzz()
from Fuzzer import ProgramRunner
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
To test all pairs, we can use `pairwise()`:
###Code
pairwise_cpp_grammar = deepcopy(cpp_grammar)
pairwise_cpp_grammar["<option>"] = pairwise(cpp_grammar["<option>"])
pairwise_cpp_grammar["<option>"][:10]
for i in range(10):
invocation = g.fuzz()
print("$", invocation)
# subprocess.call(invocation, shell=True)
cc_runner = ProgramRunner(invocation.split(' '))
(result, outcome) = cc_runner.run()
print(result.stderr, end="")
###Output
_____no_output_____
###Markdown
Some of the compilation errors we get could be expected – for instance, defining `XML_UNICODE_WCHAR_T` when actually, the type is not supported in our environment. Other errors may not be expected – and it is these errors we would find through systematic configuration fuzzing, as described above. At the end, don't forget to clean up:
###Code
os.remove("xmlparse.c")
if os.path.exists("xmlparse.o"):
os.remove("xmlparse.o")
###Output
_____no_output_____
###Markdown
Exercise 2: .ini Configuration FuzzingBesides command-line options, another important source of configurations are _configuration files_. In this exercise, we will consider the very simple configuration language provided by the Python `ConfigParser` module, which is very similar to what is found in Microsoft Windows _.ini_ files. The following example for a `ConfigParser` input file stems right from [the ConfigParser documentation](https://docs.python.org/3/library/configparser.html):```[DEFAULT]ServerAliveInterval = 45Compression = yesCompressionLevel = 9ForwardX11 = yes[bitbucket.org]User = hg[topsecret.server.com]Port = 50022ForwardX11 = no``` The above `ConfigParser` file can be created programmatically:
###Code
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
with open('example.ini') as configfile:
print(configfile.read(), end="")
###Output
_____no_output_____
###Markdown
and be read in again:
###Code
config = configparser.ConfigParser()
config.read('example.ini')
topsecret = config['topsecret.server.com']
topsecret['Port']
###Output
_____no_output_____
###Markdown
Part 1: Read ConfigurationUsing `configparser`, create a program reading in the above configuration file and accessing the individual elements. Part 2: Create a Configuration GrammarDesign a grammar that will automatically create configuration files suitable for your above program. Fuzz your program with it. Part 3: Mine a Configuration GrammarBy dynamically tracking the individual accesses to configuration elements, you can again extract a basic grammar from the execution. To this end, create a subclass of `ConfigParser` with a special method `__getitem__`:
###Code
class TrackingConfigParser(configparser.ConfigParser):
def __getitem__(self, key):
print("Accessing", repr(key))
return super().__getitem__(key)
###Output
_____no_output_____
###Markdown
For a `TrackingConfigParser` object `p`, `p.__getitem__(key)` will be invoked whenever `p[key]` is accessed:
###Code
tracking_config_parser = TrackingConfigParser()
tracking_config_parser.read('example.ini')
section = tracking_config_parser['topsecret.server.com']
###Output
_____no_output_____
###Markdown
Using `__getitem__()`, as above, implement a tracking mechanism that, while your program accesses the read configuration, automatically saves options accessed and values read. Create a prototype grammar from these values; use it for fuzzing. At the end, don't forget to clean up:
###Code
import os
os.remove("example.ini")
###Output
_____no_output_____
###Markdown
**Solution.** Left to the reader. Enjoy! Exercise 3: Extracting and Fuzzing C Command-Line OptionsIn C programs, the `getopt()` function are frequently used to process configuration options. A call```getopt(argc, argv, "bf:")```indicates that the program accepts two options `-b` and `-f`, with `-f` taking an argument (as indicated by the following colon). Part 1: Getopt FuzzingWrite a framework which, for a given C program, automatically extracts the argument to `getopt()` and derives a fuzzing grammar for it. There are multiple ways to achieve this:1. Scan the program source code for occurrences of `getopt()` and return the string passed. (Crude, but should frequently work.)2. Insert your own implementation of `getopt()` into the source code (effectively replacing `getopt()` from the runtime library), which outputs the `getopt()` argument and exits the program. Recompile and run.3. (Advanced.) As above, but instead of changing the source code, hook into the _dynamic linker_ which at runtime links the program with the C runtime library. Set the library loading path (on Linux and Unix, this is the `LD_LIBRARY_PATH` environment variable) such that your own version of `getopt()` is linked first, and the regular libraries later. Executing the program (without recompiling) should yield the desired result.Apply this on `grep` and `ls`; report the resulting grammars and results. **Solution.** Left to the reader. Enjoy hacking! Part 2: Fuzzing Long Options in CSame as Part 1, but also hook into the GNU variant `getopt_long()`, which accepts "long" arguments with double dashes such as `--help`. Note that method 1, above, will not work here, since the "long" options are defined in a separately defined structure. **Solution.** Left to the reader. Enjoy hacking! Exercise 4: Expansions in ContextIn our above option configurations, we have multiple symbols which all expand to the same integer. For instance, the `--line-range` option of `autopep8` takes two `` parameters which both expand into the same `` symbol:``` ::= ... | --line-range | ... ::= ::= (-)?+ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9```
###Code
autopep8_runner.ebnf_grammar()["<line>"]
autopep8_runner.ebnf_grammar()["<int>"]
autopep8_runner.ebnf_grammar()["<digit>"]
###Output
_____no_output_____ |
Modulo1/Clase5_MidiendoConHistoricos.ipynb | ###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas as pd
import numpy as np
%matplotlib inline
import pandas_datareader.data as web
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
names=['MSFT','AAPL','AMZN','FB','GOOGL','^GSPC']
start_date='2015-01-01'
# Precios diarios
closes=get_adj_closes(tickers=names,start_date=start_date)
closes.sample(10)
# Gráfico de histórico de precios diarios
closes.plot(figsize=(8,6),grid=True);
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
help(closes.shift)
closes.shift() #desplaza el indice por un periodo
# Calcular los rendimientos
ret=((closes-closes.shift())/closes.shift()).dropna()
ret.head()
# Otra forma (más fácil)
ret=closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot(figsize=(6,4), grid=True)
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret=np.log(closes/closes.shift()).dropna()
log_ret.head(2)
# Recordar rendimientos porcentuales. Ver que son similares
ret.head(2)
# Veamos el valor absoluto de la diferencia
np.abs(ret-log_ret).head(3)
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
mean_ret =ret.mean()
mean_ret
# Volatilidad diaria (desviación estándar)
vol=ret.std()#DESVIACION ESTANDAR
vol
# Podemos resumir en un DataFrame
ret_summary=pd.DataFrame({'Mean': mean_ret,'Vol':vol})
ret_summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
anual_ret_summary=pd.DataFrame({'Mean': mean_ret*252,'Vol':vol*(np.sqrt(252))})# ES DIARIA Y LA QUEREMOS ANUAL
anual_ret_summary
# Gráfico rendimiento esperado vs. volatilidad
import matplotlib.pyplot as plt
# Puntos a graficar
x_points=anual_ret_summary.loc[:,'Vol']
y_points=anual_ret_summary.loc[:,'Mean']
# Ventana para graficar
plt.figure(figsize=(6,4))
# Graficar puntos
plt.plot(x_points,y_points, 'o', ms=10)
plt.grid()
# Etiquetas de los ejes
plt.xlabel('Volatilidad ')
plt.ylabel('Volatilidad ')
# Etiqueta de cada instrumento
plt.text(x_points[0],y_points[0], "AAPL") #ó plt.text(x_points[0],y_points[0],anual_ret_summary.index[0] )
plt.text(x_points[1],y_points[1], "AMZN") #ó plt.text(x_points[1],y_points[1],anual_ret_summary.index[1] )
plt.text(x_points[2],y_points[2], "FB") #ó plt.text(x_points[2],y_points[2],anual_ret_summary.index[2] )
plt.text(x_points[3],y_points[3], "GOOGL")
plt.text(x_points[4],y_points[4], "GOOGL")
plt.text(x_points[5],y_points[5], "^GSPC")
plt.show
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
help(minimize) #minimiiza una función escalar
# Funcion objetivo y condicion inicial
def objetivo(beta, vol, mean_ret):
recta= beta[0]+beta[1]*vol
return ((mean_ret-recta)**2).sum()
# Resolver problema de optimizacion
beta_ini=[0,0]
solucion= minimize(fun=objetivo, x0=beta_ini, args=(anual_ret_summary['Vol'],anual_ret_summary['Mean']))
solucion
beta_opt=solucion.x
# Ordenar datos con np.sort
# Gráfico rendimiento esperado vs. volatilidad
import matplotlib.pyplot as plt
# Puntos a graficar
x_points=anual_ret_summary.loc[:,'Vol']
y_points=anual_ret_summary.loc[:,'Mean']
# Ventana para graficar
plt.figure(figsize=(6,4))
# Graficar puntos
plt.plot(x_points,y_points, 'o', ms=10)
plt.grid()
# Etiquetas de los ejes
plt.xlabel('Volatilidad ')
plt.ylabel('Volatilidad ')
# Etiqueta de cada instrumento
plt.text(x_points[0],y_points[0], "AAPL") #ó plt.text(x_points[0],y_points[0],anual_ret_summary.index[0] )
plt.text(x_points[1],y_points[1], "AMZN") #ó plt.text(x_points[1],y_points[1],anual_ret_summary.index[1] )
plt.text(x_points[2],y_points[2], "FB") #ó plt.text(x_points[2],y_points[2],anual_ret_summary.index[2] )
plt.text(x_points[3],y_points[3], "GOOGL")
plt.text(x_points[4],y_points[4], "GOOGL")
plt.text(x_points[5],y_points[5], "^GSPC")
plt.show
#Grafica de recta ajustada
x_recta=np.linspace(0.1,0.3,100)
y_recta=beta_opt[1]*x_recta+beta_opt[0]
plt.plot(x_recta,y_recta,'r',lw=3,label='Recta ajustada')
plt.legend(loc='best')
plt.show()
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
closes = get_adj_closes(tickers=["MSFT", "AAPL", "AMZN", "FB", "GOOGL"],
start_date="2015-01-01")
# Precios diarios ajustados en el cierre
closes.head()
closes.tail()
# Gráfico de histórico de precios diarios
closes.plot();
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
closes.pct_change?
(40.889702 - 41.269203) / 41.269203
# Calcular rendimientos
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot();
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# ¿Cómo calcular S_{t-1}?
closes.shift?
closes.shift().head()
closes.head()
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head()
# Recordar rendimientos porcentuales. Ver que son similares
ret.head()
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret).head()
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
ret_dist_summary = pd.DataFrame(data={'Rendimiento esperado': ret.mean(),
'Volatilidad': ret.std()})
ret_dist_summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
ret_dist_summary_annual = pd.DataFrame(data={'Rendimiento esperado': 252 * ret.mean(),
'Volatilidad': np.sqrt(252) * ret.std()})
ret_dist_summary_annual
from matplotlib import pyplot as plt
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(6, 4))
for i in range(len(ret_dist_summary_annual)):
plt.plot(ret_dist_summary_annual.iloc[i, 1],
ret_dist_summary_annual.iloc[i, 0],
'*',
label=ret_dist_summary_annual.index[i],
ms=10)
plt.grid()
plt.xlabel("Volatilidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
plt.legend(loc="upper left", bbox_to_anchor=(1.05, 1))
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
minimize?
# Funcion objetivo y condicion inicial
def min_sq(beta, y, x):
recta = beta[0] + beta[1] * x
error = y - recta
return (error**2).sum()
beta_ini = [0, 0]
# Resolver problema de optimizacion
sol = minimize(fun=min_sq,
x0=beta_ini,
args=(ret_dist_summary_annual["Rendimiento esperado"], ret_dist_summary_annual["Volatilidad"]))
sol
beta = sol.x
beta
X = np.concatenate((np.ones((5, 1)), ret_dist_summary_annual["Volatilidad"].values.reshape((5, 1))), axis=1)
y = ret_dist_summary_annual["Rendimiento esperado"].values
X, y
np.linalg.inv((X.T.dot(X))).dot(X.T).dot(y)
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(6, 4))
for i in range(len(ret_dist_summary_annual)):
plt.plot(ret_dist_summary_annual.iloc[i, 1],
ret_dist_summary_annual.iloc[i, 0],
'*',
label=ret_dist_summary_annual.index[i],
ms=10)
s = np.linspace(0.25, 0.35)
plt.plot(s, beta[0] + beta[1] * s, '--', label="Recta ajustada", lw=4)
plt.grid()
plt.xlabel("Volatilidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
plt.legend(loc="upper left", bbox_to_anchor=(1.05, 1))
beta
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head()
log_ret2 = np.log(ret + 1)
log_ret == log_ret2
# Recordar rendimientos porcentuales. Ver que son similares
ret.head()
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret)
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
summary = pd.DataFrame(data={'Media': ret.mean(),
'Vol': ret.std()})
summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
annual_summary = pd.DataFrame(data={'Media': 252 * ret.mean(),
'Vol': (252**0.5) * ret.std()})
annual_summary
from matplotlib import pyplot as plt
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(6, 4))
plt.plot(annual_summary['Vol'], annual_summary['Media'], 'bo', ms=10)
for i in range(len(annual_summary)):
plt.text(annual_summary.iloc[i, 1], annual_summary.iloc[i, 0], annual_summary.index[i])
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
plt.grid()
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy import optimize as opt
# Funcion minimize
help(opt.minimize)
# Funcion objetivo y condicion inicial
def fun_obj(beta, E_r, s):
recta = beta[0] + beta[1] * s
return ((E_r - recta)**2).sum()
beta_ini = [0, 0]
# Resolver problema de optimizacion
min_sq = opt.minimize(fun=fun_obj,
x0=beta_ini,
args=(annual_summary['Media'], annual_summary['Vol']))
min_sq
beta = min_sq.x
beta
###Output
_____no_output_____
###Markdown
$y = -0.1768 + 1.6796 x$
###Code
# Ventana para graficar
plt.figure(figsize=(6, 4))
plt.plot(annual_summary['Vol'], annual_summary['Media'], 'bo', ms=10)
s = np.linspace(0.15, 0.35)
plt.plot(s, beta[0] + beta[1] * s, 'r', lw=2,
label=f'$y=${np.round(beta[0], 2)}+{np.round(beta[1], 2)}$x$')
for i in range(len(annual_summary)):
plt.text(annual_summary.iloc[i, 1], annual_summary.iloc[i, 1], annual_summary.index[i])
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
plt.grid()
plt.legend(loc='best')
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
tickers = ['MSFT', 'AAPL', 'AMZN', 'FB', 'GOOGL', '^GSPC']
start = '2015-01-01'
# Precios diarios ajustados en el cierre
closes = get_adj_closes(tickers=tickers,
start_date=start)
closes.head()
# Gráfico de histórico de precios diarios
closes.plot();
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# S_{t-1}
r = (closes - closes.shift()) / closes.shift()
r.head()
help(closes.pct_change)
# Calcular rendimientos
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot();
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
names = ['MSFT', 'AAPL', 'AMZN', 'FB', 'GOOGL', '^GSPC']
start_date = '2015-01-01'
# Precios diarios
closes = get_adj_closes(tickers=names,
start_date=start_date
)
closes.head()
# Gráfico de histórico de precios diarios
closes[['MSFT', 'AAPL', 'AMZN', 'FB', 'GOOGL']].plot()
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
help(closes.shift)
closes.head(3)
closes.shift().head(3)
# Calcular los rendimientos
St = closes
St_1 = closes.shift()
ret = ((St - St_1) / St_1).dropna()
ret.head()
help(closes.pct_change)
# Otra forma (más fácil)
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot()
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret = np.log(St / St_1).dropna()
log_ret.head(3)
# Recordar rendimientos porcentuales. Ver que son similares
ret.head(3)
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret).head()
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
resumen = pd.DataFrame({'R.E.': ret.mean(), 'Vol': ret.std()})
resumen
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
resumen_anual = pd.DataFrame({'R.E.': 252 * ret.mean(), 'Vol': np.sqrt(252) * ret.std()})
resumen_anual
# Gráfico rendimiento esperado vs. volatilidad
import matplotlib.pyplot as plt
%matplotlib inline
# Puntos a graficar
# Ventana para graficar
plt.figure(figsize=(6, 4))
# Graficar puntos
plt.plot(resumen_anual['Vol'], resumen_anual['R.E.'], 'o', ms=10)
# Etiquetas de los ejes
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
# Etiqueta de cada instrumento
for i in range(len(resumen_anual)):
plt.text(resumen_anual.iloc[i, 1], resumen_anual.iloc[i, 0], resumen_anual.index[i])
plt.grid()
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
help(minimize)
# Funcion objetivo y condicion inicial
def fun_obj(beta, x, y):
recta = beta[0] + beta[1] * x
return ((y - recta)**2).sum()
# Resolver problema de optimizacion
x0 = [0, 0]
solucion = minimize(fun=fun_obj,
x0=x0,
args=(resumen_anual['Vol'], resumen_anual['R.E.'])
)
solucion
beta = solucion.x
beta
y = m * x + b
E[r] = beta1 * s + beta0
# Ventana para graficar
plt.figure(figsize=(6, 4))
# Graficar puntos
plt.plot(resumen_anual['Vol'], resumen_anual['R.E.'], 'o', ms=10)
# Etiquetas de los ejes
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
# Etiqueta de cada instrumento
for i in range(len(resumen_anual)):
plt.text(resumen_anual.iloc[i, 1], resumen_anual.iloc[i, 0], resumen_anual.index[i])
plt.grid()
# Grafica de recta ajustada
vol = np.linspace(0.1, 0.4)
plt.plot(vol, beta[0] + beta[1] * vol,
label=f'Recta ajustada $E[r]=${np.round(beta[0], 2)}$+${np.round(beta[1], 2)}$\sigma$')
plt.legend(loc='best')
'mensaje {}'.format(20 / 2)
f'mensaje {20 / 2}'
help(np.linspace)
###Output
Help on function linspace in module numpy:
linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0)
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float, optional
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
scale (a geometric progression).
logspace : Similar to `geomspace`, but with the end points specified as
logarithms.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
# Precios diarios
# Gráfico de histórico de precios diarios
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
# Calcular los rendimientos
# Otra forma (más fácil)
# Graficar...
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
# Recordar rendimientos porcentuales. Ver que son similares
# Veamos el valor absoluto de la diferencia
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
# Volatilidad diaria (desviación estándar)
# Podemos resumir en un DataFrame
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
# Gráfico rendimiento esperado vs. volatilidad
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
# Funcion minimize
# Funcion objetivo y condicion inicial
# Resolver problema de optimizacion
# Ventana para graficar
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
names = ['AAPL', 'MSFT', 'AMZN', 'FB', 'GOOGL']
start = '2015-01-01'
# Precios diarios
closes = get_adj_closes(tickers=names,
start_date=start)
closes.head()
# Gráfico de histórico de precios diarios
closes.plot()
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
help(closes.shift)
closes.head()
closes.shift().head()
# Calcular los rendimientos
ret = ((closes - closes.shift()) / closes.shift()).dropna()
ret.head()
# Otra forma (más fácil)
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot()
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head()
# Recordar rendimientos porcentuales. Ver que son similares
ret.head()
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret)
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
ret_summary = pd.DataFrame({'Media': ret.mean(), 'Vol': ret.std()})
ret_summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
annual_ret_summary = pd.DataFrame({'Media': 252 * ret.mean(), 'Vol': np.sqrt(252) * ret.std()})
annual_ret_summary
from matplotlib import pyplot as plt
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(6, 4));
for i in range(len(annual_ret_summary)):
plt.plot(annual_ret_summary.iloc[i, 1], annual_ret_summary.iloc[i, 0], 'ob', ms=10)
plt.text(annual_ret_summary.iloc[i, 1]+0.001, annual_ret_summary.iloc[i, 0], annual_ret_summary.index[i])
plt.grid()
plt.xlabel("Volatilidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
help(minimize)
# Funcion objetivo y condicion inicial
def min_sq(beta, y, x):
f_hat = beta[0] + beta[1] * x
sum_sq = ((y - f_hat)**2).sum()
return sum_sq
x0 = [0, 0]
# Resolver problema de optimizacion
resultado = minimize(fun=min_sq,
x0=x0,
args=(annual_ret_summary['Media'], annual_ret_summary['Vol'])
)
resultado
beta = resultado.x
# Ventana para graficar
plt.figure(figsize=(6, 4));
for i in range(len(annual_ret_summary)):
plt.plot(annual_ret_summary.iloc[i, 1], annual_ret_summary.iloc[i, 0], 'ob', ms=10)
plt.text(annual_ret_summary.iloc[i, 1]+0.002, annual_ret_summary.iloc[i, 0], annual_ret_summary.index[i])
x = np.linspace(0.26, 0.35)
plt.plot(x, beta[0] + beta[1] * x, '-r', lw=3, label='Recta min. cuad.')
plt.grid()
plt.legend(loc='best')
plt.xlabel("Volatilidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
beta
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
# Precios diarios
# Gráfico de histórico de precios diarios
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
# Calcular los rendimientos
# Otra forma (más fácil)
# Graficar...
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
# Recordar rendimientos porcentuales. Ver que son similares
# Veamos el valor absoluto de la diferencia
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
# Volatilidad diaria (desviación estándar)
# Podemos resumir en un DataFrame
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
# Gráfico rendimiento esperado vs. volatilidad
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
# Funcion minimize
# Funcion objetivo y condicion inicial
# Resolver problema de optimizacion
# Ventana para graficar
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
# Precios diarios ajustados en el cierre
# Gráfico de histórico de precios diarios
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Calcular rendimientos
# Graficar...
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# ¿Cómo calcular S_{t-1}?
# Calcular rendimientos continuamente compuestos
# Recordar rendimientos porcentuales. Ver que son similares
# Veamos el valor absoluto de la diferencia
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
# Volatilidad diaria (desviación estándar)
# Podemos resumir en un DataFrame
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
# Gráfico rendimiento esperado vs. volatilidad
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
# Funcion minimize
# Funcion objetivo y condicion inicial
# Resolver problema de optimizacion
# Gráfico rendimiento esperado vs. volatilidad
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
names = ['MSFT', 'AAPL', 'AMZN', 'FB', 'GOOGL', '^GSPC']
start_date = '2015-01-01'
# Precios diarios
closes = get_adj_closes(tickers=names,
start_date=start_date
)
closes.tail(10)
# Gráfico de histórico de precios diarios
closes.plot(figsize=(6,4), grid=True)
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
help(closes.shift)
closes.shift()
# Calcular los rendimientos
ret = ((closes - closes.shift()) / closes.shift()).dropna()
ret.head()
# Otra forma (más fácil)
ret = closes.pct_change().dropna()
# Graficar...
ret.plot(figsize=(6, 4), grid=True)
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head(2)
# Recordar rendimientos porcentuales. Ver que son similares
ret.head(2)
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret).head(3)
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
mean_ret = ret.mean()
mean_ret
# Volatilidad diaria (desviación estándar)
vol = ret.std()
vol
# Podemos resumir en un DataFrame
ret_summary = pd.DataFrame({'Mean': mean_ret, 'Vol': vol})
ret_summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
annual_ret_summary = pd.DataFrame({'Mean': mean_ret * 252,
'Vol': vol * np.sqrt(252)
})
annual_ret_summary
# Gráfico rendimiento esperado vs. volatilidad
import matplotlib.pyplot as plt
# Puntos a graficar
x_points = annual_ret_summary.loc[:, 'Vol']
y_points = annual_ret_summary.loc[:, 'Mean']
# Ventana para graficar
plt.figure(figsize=(6, 4))
# Graficar puntos
plt.plot(x_points, y_points, 'o', ms=10)
plt.grid()
# Etiquetas de los ejes
plt.xlabel('Volatilidad ($\sigma$)')
plt.ylabel('Rendimiento esperado ($E[r]$)')
# Etiqueta de cada instrumento
plt.text(x_points[0], y_points[0], annual_ret_summary.index[0])
plt.text(x_points[1], y_points[1], annual_ret_summary.index[1])
plt.text(x_points[2], y_points[2], annual_ret_summary.index[2])
plt.text(x_points[3], y_points[3], annual_ret_summary.index[3])
plt.text(x_points[4], y_points[4], annual_ret_summary.index[4])
plt.text(x_points[5], y_points[5], annual_ret_summary.index[5])
plt.show()
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
help(minimize)
# Funcion objetivo y condicion inicial
def objetivo(beta, vol, mean_ret):
recta = beta[0] + beta[1] * vol
return ((mean_ret - recta) ** 2).sum()
# Resolver problema de optimizacion
beta_ini = [0, 0]
solucion = minimize(fun=objetivo,
x0=beta_ini,
args=(annual_ret_summary['Vol'],
annual_ret_summary['Mean']
)
)
solucion
beta_opt = solucion.x
beta_opt
# Puntos a graficar
x_points = annual_ret_summary.loc[:, 'Vol']
y_points = annual_ret_summary.loc[:, 'Mean']
# Ventana para graficar
plt.figure(figsize=(6, 4))
# Graficar puntos
plt.plot(x_points, y_points, 'o', ms=10)
plt.grid()
# Etiquetas de los ejes
plt.xlabel('Volatilidad ($\sigma$)')
plt.ylabel('Rendimiento esperado ($E[r]$)')
# Etiqueta de cada instrumento
plt.text(x_points[0], y_points[0], annual_ret_summary.index[0])
plt.text(x_points[1], y_points[1], annual_ret_summary.index[1])
plt.text(x_points[2], y_points[2], annual_ret_summary.index[2])
plt.text(x_points[3], y_points[3], annual_ret_summary.index[3])
plt.text(x_points[4], y_points[4], annual_ret_summary.index[4])
plt.text(x_points[5], y_points[5], annual_ret_summary.index[5])
# Grafica de recta ajustada
x_recta = np.linspace(0.1, 0.3, 100)
y_recta = beta_opt[1] * x_recta + beta_opt[0]
plt.plot(x_recta, y_recta, 'r', lw=3, label='Recta ajustada')
plt.legend(loc='best')
plt.show()
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
# Precios diarios
# Gráfico de histórico de precios diarios
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
# Calcular los rendimientos
# Otra forma (más fácil)
# Graficar...
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
# Recordar rendimientos porcentuales. Ver que son similares
# Veamos el valor absoluto de la diferencia
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
# Volatilidad diaria (desviación estándar)
# Podemos resumir en un DataFrame
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
# Gráfico rendimiento esperado vs. volatilidad
# Puntos a graficar
# Ventana para graficar
# Graficar puntos
# Etiquetas de los ejes
# Etiqueta de cada instrumento
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
# Funcion minimize
# Funcion objetivo y condicion inicial
# Resolver problema de optimizacion
# Puntos a graficar
# Ventana para graficar
# Graficar puntos
# Etiquetas de los ejes
# Etiqueta de cada instrumento
# Grafica de recta ajustada
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
names = ['AAPL', 'MSFT', 'AMZN', 'FB', 'GOOGL']
start = '2015-01-01'
# Precios diarios
closes = get_adj_closes(tickers=names,
start_date=start)
closes.head()
# Gráfico de histórico de precios diarios
closes.plot()
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
help(closes.shift)
closes.head()
closes.shift().head()
# Calcular los rendimientos
ret = ((closes - closes.shift()) / closes.shift()).dropna()
ret.head()
# Otra forma (más fácil)
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot()
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head()
# Recordar rendimientos porcentuales. Ver que son similares
ret.head()
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret)
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
ret_summary = pd.DataFrame({'Media': ret.mean(), 'Vol': ret.std()})
ret_summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
annual_ret_summary = pd.DataFrame({'Media': 252 * ret.mean(), 'Vol': np.sqrt(252) * ret.std()})
annual_ret_summary
from matplotlib import pyplot as plt
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(6, 4));
for i in range(len(annual_ret_summary)):
plt.plot(annual_ret_summary.iloc[i, 1], annual_ret_summary.iloc[i, 0], 'ob', ms=10)
plt.text(annual_ret_summary.iloc[i, 1]+0.001, annual_ret_summary.iloc[i, 0], annual_ret_summary.index[i])
plt.grid()
plt.xlabel("Volatilidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
help(minimize)
# Funcion objetivo y condicion inicial
def min_sq(beta, y, x):
f_hat = beta[0] + beta[1] * x
sum_sq = ((y - f_hat)**2).sum()
return sum_sq
x0 = [0, 0]
# Resolver problema de optimizacion
resultado = minimize(fun=min_sq,
x0=x0,
args=(annual_ret_summary['Media'], annual_ret_summary['Vol'])
)
resultado
beta = resultado.x
# Ventana para graficar
plt.figure(figsize=(6, 4));
for i in range(len(annual_ret_summary)):
plt.plot(annual_ret_summary.iloc[i, 1], annual_ret_summary.iloc[i, 0], 'ob', ms=10)
plt.text(annual_ret_summary.iloc[i, 1]+0.002, annual_ret_summary.iloc[i, 0], annual_ret_summary.index[i])
x = np.linspace(0.26, 0.35)
plt.plot(x, beta[0] + beta[1] * x, '-r', lw=3, label='Recta min. cuad.')
plt.grid()
plt.legend(loc='best')
plt.xlabel("Volatilidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
beta
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
tickers = ["MSFT", "AAPL", "AMZN", "FB", "GOOGL"]
start = "2015-01-01"
# Precios diarios ajustados en el cierre
closes = get_adj_closes(tickers=tickers,
start_date=start)
closes.head()
# Gráfico de histórico de precios diarios
closes.plot()
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# S_t
St = closes
St.head()
# S_{t-1}
Stm1 = closes.shift()
Stm1.head()
rt = (St - Stm1) / Stm1
rt.head()
# Calcular rendimientos
ret = closes.pct_change().dropna()
ret.head()
###Output
_____no_output_____
###Markdown
`NaN`: Not A Number
###Code
# Graficar...
ret.plot()
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
x = np.linspace(-0.8, 2, 100)
y1 = x
y2 = np.log(1 + x)
from matplotlib import pyplot as plt
plt.plot(x, y1, x, y2)
plt.axvline(x=0, c='k')
plt.axhline(y=0, c='k')
# ¿Cómo calcular S_{t-1}?
closes.shift()
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head()
# Recordar rendimientos porcentuales. Ver que son similares
ret.head()
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret)
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
summary = pd.DataFrame({
'Mean': ret.mean(),
'Vol': ret.std()
}).T
summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
annual_summary = pd.DataFrame({
'Mean': 252 * ret.mean(),
'Vol': 252**0.5 * ret.std()
}).T
annual_summary
annual_summary.columns
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(7, 5))
plt.plot(annual_summary.loc['Vol'], # eje x
annual_summary.loc['Mean'], # eje y
'ok', # Estilo de gráfico
ms=10 # Tamaño de marcadores (marker size)
)
for stock in annual_summary.columns:
plt.text(annual_summary.loc['Vol', stock] + 0.001,
annual_summary.loc['Mean', stock] + 0.001,
stock)
plt.xlabel("Volaitlidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
help(minimize)
# Funcion objetivo y condicion inicial
def fun_obj(beta, x, y):
recta = beta[0] + beta[1] * x
errores = y - recta
return (errores**2).mean()
beta_ini = [0, 0]
# Resolver problema de optimizacion
sol = minimize(fun=fun_obj,
x0=beta_ini,
args=(annual_summary.loc['Vol'].drop('FB'),
annual_summary.loc['Mean'].drop('FB')))
sol
beta = sol.x
beta
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(7, 5))
plt.plot(annual_summary.loc['Vol'], # eje x
annual_summary.loc['Mean'], # eje y
'ok', # Estilo de gráfico
ms=10 # Tamaño de marcadores (marker size)
)
for stock in annual_summary.columns:
plt.text(annual_summary.loc['Vol', stock] + 0.001,
annual_summary.loc['Mean', stock] + 0.001,
stock)
x = np.linspace(0.25, 0.35)
plt.plot(x, beta[0] + beta[1] * x, 'r', lw=3)
plt.xlabel("Volaitlidad $\sigma$")
plt.ylabel("Rendimiento esperado $E[r]$")
beta
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
# Precios diarios ajustados en el cierre
# Gráfico de histórico de precios diarios
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# S_{t-1}
# Calcular rendimientos
# Graficar...
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
# Recordar rendimientos porcentuales. Ver que son similares
# Veamos el valor absoluto de la diferencia
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
# Volatilidad diaria (desviación estándar)
# Podemos resumir en un DataFrame
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
# Gráfico rendimiento esperado vs. volatilidad
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
# Funcion minimize
# Funcion objetivo y condicion inicial
# Resolver problema de optimizacion
# Ventana para graficar
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
tickers = ['MSFT', 'AAPL', 'AMZN', 'FB', 'GOOGL', '^GSPC']
start = '2015-01-01'
# Precios diarios ajustados en el cierre
closes = get_adj_closes(tickers=tickers,
start_date=start)
closes.head()
# Gráfico de histórico de precios diarios
closes.plot();
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# S_{t-1}
r = (closes - closes.shift()) / closes.shift()
r.head()
help(closes.pct_change)
# Calcular rendimientos
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot();
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head()
log_ret2 = np.log(ret + 1)
log_ret == log_ret2
# Recordar rendimientos porcentuales. Ver que son similares
ret.head()
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret)
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
summary = pd.DataFrame(data={'Media': ret.mean(),
'Vol': ret.std()})
summary
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
annual_summary = pd.DataFrame(data={'Media': 252 * ret.mean(),
'Vol': (252**0.5) * ret.std()})
annual_summary
from matplotlib import pyplot as plt
# Gráfico rendimiento esperado vs. volatilidad
plt.figure(figsize=(6, 4))
plt.plot(annual_summary['Vol'], annual_summary['Media'], 'bo', ms=10)
for i in range(len(annual_summary)):
plt.text(annual_summary.iloc[i, 1], annual_summary.iloc[i, 0], annual_summary.index[i])
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
plt.grid()
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy import optimize as opt
# Funcion minimize
help(opt.minimize)
# Funcion objetivo y condicion inicial
def fun_obj(beta, E_r, s):
recta = beta[0] + beta[1] * s
return ((E_r - recta)**2).sum()
beta_ini = [0, 0]
# Resolver problema de optimizacion
min_sq = opt.minimize(fun=fun_obj,
x0=beta_ini,
args=(annual_summary['Media'], annual_summary['Vol']))
min_sq
beta = min_sq.x
beta
# Ventana para graficar
plt.figure(figsize=(6, 4))
plt.plot(annual_summary['Vol'], annual_summary['Media'], 'bo', ms=10)
s = np.linspace(0.15, 0.35)
plt.plot(s, beta[0] + beta[1] * s, 'r', lw=2, label='Ajuste por mínimos cuadrados')
for i in range(len(annual_summary)):
plt.text(annual_summary.iloc[i, 1], annual_summary.iloc[i, 0], annual_summary.index[i])
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
plt.grid()
plt.legend(loc='best')
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
closes = get_adj_closes(tickers=["MSFT", "AAPL", "AMZN", "FB", "GOOGL"],
start_date="2015-01-01")
# Precios diarios ajustados en el cierre
closes.head()
closes.tail()
# Gráfico de histórico de precios diarios
closes.plot();
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
closes.pct_change?
(40.889702 - 41.269203) / 41.269203
# Calcular rendimientos
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot();
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# ¿Cómo calcular S_{t-1}?
closes.shift?
closes.shift().head()
closes.head()
# Calcular rendimientos continuamente compuestos
log_ret = np.log(closes / closes.shift()).dropna()
log_ret.head()
# Recordar rendimientos porcentuales. Ver que son similares
ret.head()
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret).head()
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
# Volatilidad diaria (desviación estándar)
# Podemos resumir en un DataFrame
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
# Gráfico rendimiento esperado vs. volatilidad
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
# Funcion minimize
# Funcion objetivo y condicion inicial
# Resolver problema de optimizacion
# Ventana para graficar
###Output
_____no_output_____
###Markdown
Midiendo rendimiento y riesgo con datos históricos> Ya sabemos que podemos caracterizar la distribución de rendimientos de un activo mediante una medida de tendencia central (media: rendimiento esperado) y una medida de dispersión (desviación estándar: volatilidad). > En la clase pasada vimos como obtener reportes históricos de precios de activos. ¿Cómo usamos estos históricos para medir el rendimiento esperado y la volatilidad de los rendimientos? *Objetivos:*- Calcular los rendimientos a partir de históricos de precios.- Estimar rendimiento esperado y riesgo a partir de históricos de rendimientos.- Anualizar rendimiento y volatilidad cuando los datos están en una base de tiempo menor.- Verificar la relación entre rendimiento y riesgo a través de datos reales.**Referencias:**- http://pandas.pydata.org/- https://pandas-datareader.readthedocs.io/en/latest/- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Cálculo de los rendimientosMuy bien, ya entonces sabemos descargar históricos de precios... **Ejemplo:** trabajaremos esta clase con activos que se encuentran en el top-10 del índice S&P500. Descargar precios ajustados en el cierre de Microsoft (MSFT), Apple (AAPL), Amazon (AMZN), Facebook (FB) y Alphabet Inc. (GOOGL) desde el primero de enero del 2015 hasta hoy.
###Code
# Importar paquetes
import pandas_datareader.data as web
import pandas as pd
import numpy as np
%matplotlib inline
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Información
names = ['MSFT', 'AAPL', 'AMZN', 'FB', 'GOOGL', '^GSPC']
start_date = '2015-01-01'
# Precios diarios
closes = get_adj_closes(tickers=names,
start_date=start_date
)
closes.head()
# Gráfico de histórico de precios diarios
closes[['MSFT', 'AAPL', 'AMZN', 'FB', 'GOOGL']].plot()
###Output
_____no_output_____
###Markdown
1.1. Rendimientos porcentuales Muy bien, pero para el análisis no trabajamos con los precios sino con los rendimientos... **¿porqué?** Para una sucesión de precios $\{S_t\}_{t=0}^{n}$, el rendimiento simple $R_t$ se define como el el cambio porcentual$$R_t=\frac{S_t-S_{t-1}}{S_{t-1}}$$para $t=1,\ldots,n$. *¡Cuidado!* los rendimientos son de acuerdo a la base de tiempo en que se reportan los precios. Por ejemplo: - si los precios se reportan en una base diaria, los rendimientos también son diarios;- si los precios se reportan en una base mensual, los rendimientos también son mensuales.
###Code
# Método shift() de un DataFrame...
help(closes.shift)
closes.head(3)
closes.shift().head(3)
# Calcular los rendimientos
St = closes
St_1 = closes.shift()
ret = ((St - St_1) / St_1).dropna()
ret.head()
help(closes.pct_change)
# Otra forma (más fácil)
ret = closes.pct_change().dropna()
ret.head()
# Graficar...
ret.plot()
###Output
_____no_output_____
###Markdown
**¿Qué se observa respecto a los precios?**Respuestas:- Los rendimientos parecen conservar tendecias estadísticas constantes (por ejemplo, oscilan al rededor de números cercanos a cero). 1.2. Rendimientos logarítmicos (log-rendimientos) Otro rendimiento usado con frecuencia es el rendimiento continuamente compuesto o rendimiento logaritmico. Éste, está definido como$$r_t=\ln\left(\frac{S_t}{S_{t-1}}\right).$$Es fácil darse cuenta que $r_t=\ln(1+R_t)$.Ver en el tablero que si $0\leq|x|\ll 1$, entonces $\ln(1+x)\approx x$.
###Code
# Calcular rendimientos continuamente compuestos
log_ret = np.log(St / St_1).dropna()
log_ret.head(3)
# Recordar rendimientos porcentuales. Ver que son similares
ret.head(3)
# Veamos el valor absoluto de la diferencia
np.abs(ret - log_ret).head()
###Output
_____no_output_____
###Markdown
Por lo anterior, muchas veces se usan para el análisis los rendimientos continuamente compuestos.___ 2. Caracterización de la distribución de los rendimientosEntonces:- partimos de que tenemos los rendimientos porcentuales diarios de Apple, Walmart, IBM y Nike desde inicios del 2011 a finales del 2015;- ¿cómo resumirían estos datos?
###Code
# Rendimiento medio diario (media aritmética)
ret.mean()
# Volatilidad diaria (desviación estándar)
ret.std()
# Podemos resumir en un DataFrame
resumen = pd.DataFrame({'R.E.': ret.mean(), 'Vol': ret.std()})
resumen
###Output
_____no_output_____
###Markdown
Normalmente se reportan rendimientos esperados y volatilidades en una base anual. Para anualizar:$$E[r_a]=12E[r_m]=252E[r_d]=52E[r_w],\text{ y}$$$$\sigma_{r_a}=\sqrt{12}\sigma_{r_m}=\sqrt{252}\sigma_{r_d}=\sqrt{52}\sigma_{r_w}$$
###Code
# Resumen en base anual
resumen_anual = pd.DataFrame({'R.E.': 252 * ret.mean(), 'Vol': np.sqrt(252) * ret.std()})
resumen_anual
# Gráfico rendimiento esperado vs. volatilidad
import matplotlib.pyplot as plt
%matplotlib inline
# Puntos a graficar
# Ventana para graficar
plt.figure(figsize=(6, 4))
# Graficar puntos
plt.plot(resumen_anual['Vol'], resumen_anual['R.E.'], 'o', ms=10)
# Etiquetas de los ejes
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
# Etiqueta de cada instrumento
for i in range(len(resumen_anual)):
plt.text(resumen_anual.iloc[i, 1], resumen_anual.iloc[i, 0], resumen_anual.index[i])
plt.grid()
###Output
_____no_output_____
###Markdown
2.1 Ajuste de curvas con mínimos cuadradosConsideramos que tenemos un conjunto de n pares ordenados de datos $(\sigma_{r_i},E[r_i])$, para $i=1,2,3,\dots,n$... **en este caso corresponden a volatilidad y rendimiento esperado** ¿Cuál es la recta que mejor se ajusta a estos datos?Consideramos entonces ajustes de la forma $\hat{f}(\sigma) = \beta_0+\beta_1 \sigma = \left[1 \quad \sigma\right]\left[\begin{array}{c} \beta_0 \\ \beta_1 \end{array}\right]=\left[1 \quad \sigma\right]\boldsymbol{\beta}$ (lineas rectas).Para decir '*mejor*', tenemos que definir algún sentido en que una recta se ajuste *mejor* que otra.**Mínimos cuadrados**: el objetivo es seleccionar los coeficientes $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$, de forma que la función evaluada en los puntos $\sigma_{r_i}$ ($\hat{f}(\sigma_{r_i})$) aproxime los valores correspondientes $E[r_i]$.La formulación por mínimos cuadrados, encuentra los $\boldsymbol{\beta}=\left[\beta_0 \quad \beta_1 \right]^T$ que minimiza$$\sum_{i=1}^{n}(E[r_i]-\hat{f}(\sigma_{r_i}))^2$$
###Code
# Importar el módulo optimize de la librería scipy
from scipy.optimize import minimize
# Funcion minimize
help(minimize)
# Funcion objetivo y condicion inicial
def fun_obj(beta, x, y):
recta = beta[0] + beta[1] * x
return ((y - recta)**2).sum()
# Resolver problema de optimizacion
x0 = [0, 0]
solucion = minimize(fun=fun_obj,
x0=x0,
args=(resumen_anual['Vol'], resumen_anual['R.E.'])
)
solucion
beta = solucion.x
beta
y = m * x + b
E[r] = beta1 * s + beta0
# Ventana para graficar
plt.figure(figsize=(6, 4))
# Graficar puntos
plt.plot(resumen_anual['Vol'], resumen_anual['R.E.'], 'o', ms=10)
# Etiquetas de los ejes
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
# Etiqueta de cada instrumento
for i in range(len(resumen_anual)):
plt.text(resumen_anual.iloc[i, 1], resumen_anual.iloc[i, 0], resumen_anual.index[i])
plt.grid()
# Grafica de recta ajustada
vol = np.linspace(0.1, 0.4)
plt.plot(vol, beta[0] + beta[1] * vol,
label=f'Recta ajustada $E[r]=${np.round(beta[0], 2)}$+${np.round(beta[1], 2)}$\sigma$')
plt.legend(loc='best')
'mensaje {}'.format(20 / 2)
f'mensaje {20 / 2}'
help(np.linspace)
###Output
Help on function linspace in module numpy:
linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0)
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float, optional
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
scale (a geometric progression).
logspace : Similar to `geomspace`, but with the end points specified as
logarithms.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
|
UDACITY/wrangle_act.ipynb | ###Markdown
Table of contents* [Data Gathering](data-gathering)* [Data Assessment](data-assessment)* [Data Cleaning](data-cleaning)* [Data Exploring](data-exploring)* [Data Evaluation](data-evaluation) Imports* Pandas - Dealing with data* Numpy - C based functions resulting in faster times* Matplotlib.pyplot - Dealing with visualization* Seaborn - Dealing with visualization* Tweepy - Allowing us to interact with twitter's API* Tiwtter_keys - API keys
###Code
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
import tweepy
import requests
# API Key
# import twitter_keys
# consumer_key = twitter_keys.consumer_key
# consumer_secret = twitter_keys.consumer_secret
# access_token = twitter_keys.access_token
# access_secret = twitter_keys.access_secret
%matplotlib inline
# dogs = pd.read_csv('./datasets/twitter-archive-enhanced.csv')
# predictions = pd.read_csv('./datasets/image-predictions.tsv', delim_whitespace=True)
# tweets = pd.read_csv('./datasets/tweet_df.csv')
# dogs_clean = dogs.copy()
# predictions_clean = predictions.copy()
# tweets_clean = tweets.copy()
# auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# auth.set_access_token(access_token, access_secret)
# api = tweepy.API(auth)
###Output
_____no_output_____
###Markdown
Data Gathering * Given by Udacity
###Code
dogs = pd.read_csv('./datasets/twitter-archive-enhanced.csv')
dogs.head()
###Output
_____no_output_____
###Markdown
* Given by Udacity
###Code
# Udacity server url
udacity_url = 'https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv'
predictions = pd.read_csv(udacity_url, delim_whitespace=True)
predictions.head()
###Output
_____no_output_____
###Markdown
Itterate through tweet_id to grab additional information
###Code
# retweeted
# retweet_count
# favorite_count
# is_quote_status
# favorited
# new_df = pd.DataFrame(data=[['one', 'two', 'three', 'four', 'five', 'size']], columns=['tweet_id', 'retweeted', 'retweet_count', 'favorite_count', 'is_quote_status', 'favorited'])
# temp = []
# # tweet_ids = list(df['tweet_id'])
# columns = ['tweet_id', 'retweeted', 'retweet_count', 'favorite_count', 'is_quote_status', 'favorited']
# # temp = []
# for i in tweet_ids[19:]:
# try:
# response = api.get_status(i, tweet_mode='extended')
# results = response._json
# except:
# continue
# # retweeted
# try:
# retweeted = results['retweeted']
# except:
# retweeted = np.nan
# # retweet_count
# try:
# retweet_count = results['retweet_count']
# except:
# retweet_count = np.nan
# # favorite_count
# try:
# favorite_count = results['favorite_count']
# except:
# favorite_count = np.nan
# # is_quote_status
# try:
# is_quote_status = results['is_quote_status']
# except:
# is_quote_status = np.nan
# # favorited
# try:
# favorited = results['favorited']
# except:
# favorited = np.nan
# temp.append([i, retweeted, retweet_count, favorite_count, is_quote_status, favorited])
# temp_df = pd.DataFrame(temp, columns=columns)
# temp_df.to_csv('tweet_df.csv', index=False)
# time.sleep(2)
###Output
_____no_output_____
###Markdown
Data Assessment
###Code
dogs
dogs['source'].values
dogs.isna().sum()
dogs.loc[dogs['name'] == 'None'].head()
dogs[dogs['retweeted_status_timestamp'].notna()]
dogs.loc[dogs['expanded_urls'].map(lambda x: len(str(x).split('https://twitter.com')) > 2), 'expanded_urls'].values[0]
dogs.info()
predictions
predictions.info()
tweets = pd.read_csv('./datasets/tweet_df.csv')
tweets.head()
tweets.info()
dogs['text'].str.contains('@RT').sum()
###Output
_____no_output_____
###Markdown
Quality dogs table* rating numerator and denominator should be floats.* tweet_id column should be object datatype* timestamp is not a datetime type* Source is unnecessary* Nulls replaced with something more appropiate, maybe 0* retweeted_status_timestamp is not a datetime type* Wrong dtypes for doggo, floofer, pupper, puppo* Wrong names* doggo, floofer, pupper, puppo needs only 1 column as dog_stage.* if not a pupper, puppo then a floofer* recorded the rating wrong in some cells.* Missing links for pictures.* Duplicated links within link to pictures. predictions table* tweet_id column should be object datatype* p1, p2, p3 wrong dtypes tweets table* tweet_id column should be object datatype Tidiness Predictions* Duplicated picture links for tables 'jpg_url' and 'exanded_url'* Drop img_num Dogs* Some retweets need to be dropped* Unnecessary columns for 'in_reply_to_status_id', 'in_reply_to_user_id,' 'retweeted_status_user_id', 'retweeted_status_timestamp', 'retweeted_status_id,'* tweets needs to be joined with dogs* Gofundme websites in links for pictures need to be deleted.* Many rows that are not about dogs need to be deleted Tweets* Drop the same tweet ids from dogs to combine later Data Cleaning
###Code
dogs_clean = dogs.copy()
predictions_clean = predictions.copy()
tweets_clean = tweets.copy()
###Output
_____no_output_____
###Markdown
Some retweets need to be dropped Define First collect all the twitter ids into one list (for later use), then grab the index and drop inplace. Code
###Code
tweet_ids_drop = dogs_clean.loc[dogs_clean['retweeted_status_id'].notna(), 'tweet_id'].values
drop_index = dogs_clean[dogs_clean['tweet_id'].map(lambda x: x in tweet_ids_drop)].index
dogs_clean.drop(drop_index, inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean['retweeted_status_id'].notna().sum()
###Output
_____no_output_____
###Markdown
Drop the same tweet ids from dogs to combine later Define Using the tweet id's from earlier, grab the index from tweets and drop inplace. Code
###Code
index_drop = tweets_clean[tweets_clean['tweet_id'].map(lambda x: x in tweet_ids_drop)].index
tweets_clean.drop(index_drop, inplace=True)
index_drop = predictions_clean[predictions_clean['tweet_id'].map(lambda x: x in tweet_ids_drop)].index
predictions_clean.drop(index_drop, inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
tweets_clean['tweet_id'].map(lambda x: x in tweet_ids_drop).sum()
predictions_clean['tweet_id'].map(lambda x: x in tweet_ids_drop).sum()
###Output
_____no_output_____
###Markdown
rating numerator and denominator should be floats. Define Change with as type and set it to itself. Code
###Code
dogs_clean['rating_numerator'] = dogs_clean['rating_numerator'].astype(float)
dogs_clean['rating_denominator'] = dogs_clean['rating_denominator'].astype(float)
###Output
_____no_output_____
###Markdown
Test
###Code
print(dogs_clean['rating_numerator'].dtype)
dogs_clean['rating_denominator'].dtype
###Output
float64
###Markdown
tweet_id column should be object datatype Define We will just change the comlumn with astype(object) and set it to itself. Code
###Code
dogs_clean['tweet_id'] = dogs_clean['tweet_id'].astype(object)
predictions_clean['tweet_id'] = predictions_clean.astype(object)
tweets_clean['tweet_id'] = tweets_clean['tweet_id'].astype(object)
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean['tweet_id'].dtype
predictions_clean['tweet_id'].dtype
tweets_clean['tweet_id'].dtype
###Output
_____no_output_____
###Markdown
timestamp is not a datetime type Define Re-declare timestamp column as original but as type datetime. Code
###Code
dogs_clean['timestamp'] = pd.to_datetime(dogs_clean['timestamp'])
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2175 entries, 0 to 2355
Data columns (total 17 columns):
tweet_id 2175 non-null object
in_reply_to_status_id 78 non-null float64
in_reply_to_user_id 78 non-null float64
timestamp 2175 non-null datetime64[ns, UTC]
source 2175 non-null object
text 2175 non-null object
retweeted_status_id 0 non-null float64
retweeted_status_user_id 0 non-null float64
retweeted_status_timestamp 0 non-null object
expanded_urls 2117 non-null object
rating_numerator 2175 non-null float64
rating_denominator 2175 non-null float64
name 2175 non-null object
doggo 2175 non-null object
floofer 2175 non-null object
pupper 2175 non-null object
puppo 2175 non-null object
dtypes: datetime64[ns, UTC](1), float64(6), object(10)
memory usage: 305.9+ KB
###Markdown
Source is unnecessary Define Drop column, 'source.' Code
###Code
dogs_clean.drop(columns='source', inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
'source' in dogs_clean.columns
###Output
_____no_output_____
###Markdown
Nulls replaced with something more appropiate, maybe 0 Define Simply fill nulls as 0. Code
###Code
dogs_clean.fillna(0, inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.isna().sum().sum()
###Output
_____no_output_____
###Markdown
retweeted_status_timestamp is not a datetime type Define Declare anew as itself turned into datetime. Code
###Code
dogs_clean['retweeted_status_timestamp'] = pd.to_datetime(dogs_clean['retweeted_status_timestamp'])
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2175 entries, 0 to 2355
Data columns (total 16 columns):
tweet_id 2175 non-null int64
in_reply_to_status_id 2175 non-null float64
in_reply_to_user_id 2175 non-null float64
timestamp 2175 non-null datetime64[ns, UTC]
text 2175 non-null object
retweeted_status_id 2175 non-null float64
retweeted_status_user_id 2175 non-null float64
retweeted_status_timestamp 2175 non-null datetime64[ns]
expanded_urls 2175 non-null object
rating_numerator 2175 non-null float64
rating_denominator 2175 non-null float64
name 2175 non-null object
doggo 2175 non-null object
floofer 2175 non-null object
pupper 2175 non-null object
puppo 2175 non-null object
dtypes: datetime64[ns, UTC](1), datetime64[ns](1), float64(6), int64(1), object(7)
memory usage: 288.9+ KB
###Markdown
Wrong names Define Can't actually fix this. Code
###Code
import re
def lower_first_letter(x):
return x[0] == x[0].lower()
mask = dogs_clean[dogs_clean['name'].map(lower_first_letter)]
df_fix = mask[dogs_clean[dogs_clean['name'].map(lower_first_letter)]['text'].str.contains('name').values]
def throwaway(text):
try:
regex = r'name is ([a-zA-Z]+)'
name = re.search(regex, text).group(1)
except:
regex = r'named ([a-zA-Z]+)'
name = re.search(regex, text).group(1)
return name
mask = df_fix.index
dogs_clean.loc[mask, 'name'] = dogs_clean.loc[mask, 'text'].map(throwaway)
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean[dogs_clean['name'].map(lower_first_letter)]['text'].values[:5]
dogs_clean.loc[mask, 'name']
###Output
_____no_output_____
###Markdown
Rows that have nothing to do with dogs need to be dropped Define Find where the name is None, the numberator divided by denominator is less than 1, and it's None in all of the dog_stage columns. Code
###Code
f = dogs_clean['name'] == 'None'
e = dogs_clean['rating_numerator'] / dogs_clean['rating_denominator'] < 1
a = dogs_clean['floofer'] == 'None'
b = dogs_clean['pupper'] == 'None'
c = dogs_clean['puppo'] == 'None'
d = dogs_clean['doggo'] == 'None'
index = dogs_clean.loc[a & b & c & d & e & f].index
dogs_clean.drop(index, inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.loc[a & b & c & d & e & f]
###Output
_____no_output_____
###Markdown
doggo, floofer, pupper, puppo needs only one column Define Loop through the columns and take the name out of them and put them in a new column called dog_stage. Code
###Code
def new_dog_type(doggo, floofer, pupper, puppo):
doggo = doggo.replace('None', '')
floofer = floofer.replace('None', '')
pupper = pupper.replace('None', '')
puppo = puppo.replace('None', '')
dog_type = doggo + floofer + pupper + puppo
if dog_type == '':
return 'floofer'
return dog_type
dogs_clean['dog_stage'] = dogs_clean.apply(lambda x: new_dog_type(doggo = x['doggo'],
floofer = x['floofer'],
pupper = x['pupper'],
puppo = x['puppo']),
axis = 1)
def seperate_dog_stage(x):
if x not in ['doggo', 'floofer', 'pupper', 'puppo']:
first_half = x[:5]
second_half = x[5:]
return first_half + ', ' + second_half
return x
dogs_clean['dog_stage'] = dogs_clean['dog_stage'].map(seperate_dog_stage)
dogs_clean.drop(columns=['doggo', 'floofer', 'pupper', 'puppo'], inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.head()
dogs_clean.loc[dogs_clean['dog_stage'].map(lambda x: x not in ['doggo', 'floofer', 'pupper', 'puppo']), 'dog_stage'].head()
###Output
_____no_output_____
###Markdown
Wrong dtypes for doggo, floofer, pupper, puppo Define Change the columns with astype to category. Code
###Code
dogs_clean['dog_stage'] = dogs_clean['dog_stage'].astype('category')
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2044 entries, 0 to 2354
Data columns (total 13 columns):
tweet_id 2044 non-null int64
in_reply_to_status_id 2044 non-null float64
in_reply_to_user_id 2044 non-null float64
timestamp 2044 non-null datetime64[ns, UTC]
text 2044 non-null object
retweeted_status_id 2044 non-null float64
retweeted_status_user_id 2044 non-null float64
retweeted_status_timestamp 2044 non-null datetime64[ns]
expanded_urls 2044 non-null object
rating_numerator 2044 non-null float64
rating_denominator 2044 non-null float64
name 2044 non-null object
dog_stage 2044 non-null category
dtypes: category(1), datetime64[ns, UTC](1), datetime64[ns](1), float64(6), int64(1), object(3)
memory usage: 210.0+ KB
###Markdown
if not a pupper, puppo then a floofer because floofer is the basic of all dogs apparently Define Done above Code Test p1, p2, p3 wrong dtypes Define Simple declare anew as itself with type as category. Code
###Code
predictions_clean[['p1', 'p2', 'p3']] = predictions_clean[['p1', 'p2', 'p3']].astype('category')
###Output
_____no_output_____
###Markdown
Test
###Code
predictions_clean[['p1', 'p2', 'p3']].info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1994 entries, 0 to 2074
Data columns (total 3 columns):
p1 1994 non-null category
p2 1994 non-null category
p3 1994 non-null category
dtypes: category(3)
memory usage: 86.5 KB
###Markdown
Drop jpg urls DefineSimply drop the columns with inplace set to true. Code
###Code
predictions_clean.drop(columns='jpg_url', inplace=True)
predictions_clean.head()
###Output
_____no_output_____
###Markdown
Test Drop img_num column Define Drop with inplace. Code
###Code
predictions_clean.drop(columns='img_num', inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
predictions_clean.head()
###Output
_____no_output_____
###Markdown
Unnecessary columns for 'in_reply_to_status_id', 'in_reply_to_user_id,' 'retweeted_status_user_id', 'retweeted_status_timestamp', 'retweeted_status_id,' Define After review, just going to drop them. Code
###Code
dogs_clean.drop(columns=['in_reply_to_status_id',
'in_reply_to_user_id',
'retweeted_status_user_id',
'retweeted_status_timestamp',
'retweeted_status_id'],
inplace=True)
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.head()
###Output
_____no_output_____
###Markdown
Move 'jpg_urls' in place of 'extended_urls' Define Ended up just dropping jpg_urls. Code Test tweets needs to be joined with dogs Define Will merge on tweets_id. Code
###Code
dogs_tweets_clean = dogs_clean.merge(tweets_clean, on='tweet_id')
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_tweets_clean.head()
###Output
_____no_output_____
###Markdown
recorded the rating wrong in some cells.Missing links for pictures. Recorded the rating wrong in some cells Bella specifically Define Grab Bella's tweet id and switch the numerator to 13.5. Code Bella 883482846933004288
###Code
dogs_clean.loc[dogs_clean['tweet_id'] == 883482846933004288, 'rating_numerator'] = 13.5
###Output
_____no_output_____
###Markdown
Test
###Code
dogs_clean.loc[dogs_clean['tweet_id'] == 883482846933004288]
###Output
_____no_output_____
###Markdown
Missing links for pictures Define After some effort, not actually able to get the missing links for pictures. Code Test Duplicated links within cells Define First need to create a mask in order to isolate the duplicated links. Next create a function that will split them and compare. Lastly, using the mask, set those into the clean dataframe. Code
###Code
mask = dogs_clean['expanded_urls'].map(lambda x: str(x).count('http') >= 2)
def delete_duplicated_link(url):
"""
Splits url into two parts allowing it to be checked for equality.
If they are not equal; return the original as to not lose data.
Parameters
----------
url : string
Returns
-------
url | a | b : string
Depends on if they are same, not a url, or twitter.
"""
try:
a, b = url.split(',')[:2]
except:
return url
if a == b: return a
return (b if 'twitter' in b else a)
test = dogs_clean[mask]['expanded_urls'].map(delete_duplicated_link)
dogs_clean.loc[mask, 'expanded_urls'] = test
###Output
_____no_output_____
###Markdown
Test
###Code
(dogs_clean['expanded_urls'].map(lambda x: str(x).count('http') >= 2)).sum()
###Output
_____no_output_____
###Markdown
Gofundme Links needs to be deleted DefineAlready done above. Code Test
###Code
(dogs_clean['expanded_urls'].map(lambda x: 'gofund' in str(x))).sum()
###Output
_____no_output_____
###Markdown
Combine all DATAFRAMES Define Code
###Code
combined_df = tweets_clean.merge(dogs_clean, on='tweet_id').merge(predictions_clean, on='tweet_id')
###Output
_____no_output_____
###Markdown
Test Saving Clean Datasets
###Code
combined_df.to_csv('twitter_archive_master.csv', index=False)
# If you need to import them
combined_df = pd.read_csv('twitter_archive_master.csv')
###Output
_____no_output_____
###Markdown
Data Exploring
###Code
combined_df['favorited'].sum()
###Output
_____no_output_____
###Markdown
* It wiould seem that none of these dogs were favorited...
###Code
combined_df['favorite_count'].sum()
###Output
_____no_output_____
###Markdown
* Then looking at the favorite count, we can see that over 16 million people favorited it! Not sure on the difference between favorited and favorite count.
###Code
combined_df['retweet_count'].sum()
###Output
_____no_output_____
###Markdown
* These tweets in total were retweeted over 5 million times!
###Code
combined_df['retweeted'].sum()
###Output
_____no_output_____
###Markdown
* We were trying to avoid grabbing any tweets that were retweeted and it shows that we were successful. * It also seems a lot of people will send in a picture of their pet blending in with the rug, which people do not seem to appreciate for some reason. Or pictures that are not even of dogs. Data Evaluation
###Code
combined_df
for i in range(1, 4):
x = combined_df[f'p{i}_conf']
# Plotting configuration
plt.figure()
plt.hist(x,
color='red',
bins=25,
edgecolor='black');
plt.title(f'p{i} Confidence Distribution', size=18);
###Output
_____no_output_____ |
neb_scratch.ipynb | ###Markdown
---
###Code
####### Running the NEB path optimization
import numpy as np
import cell_lattices as cx
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams["figure.facecolor"] = "black"
plt.rcParams["axes.facecolor"] = "black"
plt.rcParams["text.color"] = "white"
plt.rcParams["axes.titlesize"] = 10
plt.rcParams["figure.titlesize"] = 14
# Make a lattice configuration of cells
rows = cols = 9
X = cx.hex_grid(rows, cols, sigma=0.)
# Define 3 cells that will rotate positions
swapping_cells = np.array([31, 39, 41])
# Get the indices of cells on the border, which will remain fixed
frozen_idx = cx.get_outer_idx(rows, cols)
# Visualize the initial system state and energy gradient
from matplotlib.collections import LineCollection
fig, ax = plt.subplots()
# original cell positions in blue
plt.scatter(*X.T)
# trajectory of repulsion
X_repulsed = 0.25 * exponential_repulsion(X, freeze=frozen_idx)[1]
segs = np.zeros((X.shape[0], 2, 2))
segs[:, 0, :] = X
segs[:, 1, :] = X - X_repulsed
line_segments = LineCollection(segs)
ax.add_collection(line_segments)
# fixed cells in gray
plt.scatter(*X[frozen_idx].T, color="gray")
# Define the final state
X_final = X.copy()
X_final[swapping_cells] = X[np.roll(swapping_cells, 1)]
# Construct a linear path with `nt` time-steps (beads)
nt = 75
t = np.linspace(0, 1, nt)
lin_path = np.array([(1 - _t) * X + _t * X_final for _t in t])
# Plot the linear path
fig, axs = plt.subplots(3, 5, figsize=(10, 6))
plt.suptitle("Linear path")
nplot = len(axs.flat)
sample_idx = np.array([int(k) for k in np.linspace(0, nt - 1, nplot)])
for i, ax in enumerate(axs.flat):
_X = lin_path[sample_idx[i]]
ax.scatter(*_X.T, s=5)
ax.scatter(*_X[frozen_idx].T, color="gray")
for j in range(3):
ax.scatter(*_X[swapping_cells[j]], s=10)
ax.set_aspect("equal")
ax.axis("off")
ax.set_title(f"{sample_idx[i] + 1} / {nt}")
plt.tight_layout()
# Make an NEB optimizer object
neb = NEB(lin_path, 1.0)
# Define the function for energy minimization
minimizer = SteepestDescent(stepsize=0.01, ) # Gradient descent
# Make a wrapper function that takes the system state and returns
# energy (E) and the energy gradient (G)
EG_func = lambda x: exponential_repulsion(x, freeze=frozen_idx)
# Run NEB optimization
nsteps = 100
neb_path = neb.minimize(nsteps, EG_func, minimizer, progress=True)
# PLot the optimized path
fig, axs = plt.subplots(3, 5, figsize=(10, 6))
plt.suptitle("NEB-optimized geodesic path")
for i, ax in enumerate(axs.flat):
_X = neb_path[sample_idx[i]]
ax.scatter(*_X.T, s=5)
ax.scatter(*_X[frozen_idx].T, color="gray")
for j in range(3):
ax.scatter(*_X[swapping_cells[j]], s=10)
ax.set_aspect("equal")
ax.axis("off")
ax.set_title(f"{sample_idx[i] + 1} / {nt}")
plt.tight_layout()
# Make an animation!
import os
import matplotlib as mpl
from matplotlib import animation
save_dir = os.path.abspath("./plots")
fpath = os.path.join(save_dir, f"Linear_NEB_paths_toy_.mp4")
dpi = 300
save = True
n_frames = 75
writer = "ffmpeg"
fps = 12
xlim = -5.15, 5.65
ylim = -4.55, 4.55
if save:
# Get which frames to animate
nt = t.size
frames = cx.vround(np.linspace(0, nt-1, n_frames))
# # Font sizes
# SMALL_SIZE = 12
# MEDIUM_SIZE = 14
# BIGGER_SIZE = 16
# plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
# plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
# plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
# plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# Make figure
fig, axs = plt.subplots(
nrows=1,
ncols=2,
figsize=(6, 3),
)
for ax in axs:
ax.set_aspect("equal")
ax.axis("off")
# Update which data is used for each run, in each frame
var_kw = dict(X = X, title = "")
def update_kw(f, pathtype):
var_kw.update(
X = (lin_path, neb_path)[pathtype][frames[f]],
title = ("Linear path", "NEB-optimized geodesic")[pathtype],
)
# Plot one frame of animation
def make_frame(f):
# print(f"Frame {f+1} / {n_frames}")
# Set title at top of figure
plt.suptitle(f"Sim. time: {frames[f] / (nt - 1):.3f}")
# Iterate through a 1 x 2 layout of plots
for idx, ax in enumerate(axs.flat):
# Update plotting params
update_kw(f, idx)
# Clear axis
ax.clear()
# All cells
ax.scatter(*var_kw["X"].T, s=5)
# Fixed cells
ax.scatter(*var_kw["X"][frozen_idx].T, color="gray")
# Moving cells
for j in range(3):
ax.scatter(*var_kw["X"][swapping_cells[j]], s=10)
# Options
ax.set_title(var_kw["title"])
ax.set_xlim(xlim)
ax.set_ylim(ylim)
try:
_writer = animation.writers[writer](fps=fps, bitrate=1800)
except RuntimeError:
print("""
The `ffmpeg` writer must be installed inside the runtime environment.
Writer availability can be checked in the current enviornment by executing
`matplotlib.animation.writers.list()` in Python. Install location can be
checked by running `which ffmpeg` on a command line/terminal.
""")
_anim_FA = animation.FuncAnimation(fig, make_frame, frames=n_frames, interval=200)
# Get path and print to output
_fpath = str(fpath)
if not _fpath.endswith(".mp4"):
_fpath += ".mp4"
print("Writing to:", _fpath)
# Save animation
_anim_FA.save(
_fpath,
writer=_writer,
dpi=dpi,
progress_callback=lambda i, n: print(f"Frame {i+1} / {n}"),
);
###Output
_____no_output_____ |
OOP_ML_+_Multi_Thread_v2_1.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import KNNImputer
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.metrics import classification_report, accuracy_score, roc_curve, auc, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
import time
import threading
sns.set()
!pip install --force https://github.com/chengs/tqdm/archive/colab.zip
from tqdm import tqdm_notebook as tqdm
with tqdm(range(1)) as pbar:
pass
###Output
_____no_output_____
###Markdown
Class for parameters' initiation
**MLStarter**
###Code
class MLStarter:
def __init__(self, df, seed, model_name):
self.df = df
self.seed = seed
self.model_name = model_name
X_unscale, X, y, X_train, X_test, y_train, y_test, y_pred, model = 0, 0, 0, 0, 0, 0, 0, 0, 0
self.X_unscale = X_unscale
self.X = X
self.y = y
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.y_pred = y_pred
self.model = model
###Output
_____no_output_____
###Markdown
Class for PreProcessing
**Child of MLStarter**
###Code
class preprocessor(MLStarter):
def __init__(self, df, seed, model_name):
super().__init__(df, seed, model_name)
def null_checker(self):
return (df.isnull().sum())
def separator(self):
self.X = df.iloc[:, :-1]
self.y = df.iloc[:,-1]
self.X_unscale = self.X
def data_encoder(self):
encoder = LabelEncoder()
self.y = encoder.fit_transform(self.y)
def null_imputer(self):
if (sum(self.null_checker()) != 0):
cols = []
for col in self.X.columns:
cols.append(col)
imputer = KNNImputer(n_neighbors=2)
X_filled = imputer.fit_transform(self.X)
X_new = pd.DataFrame(data=X_filled, columns=cols)
self.X = X_new
def data_scaler(self):
scaler = MinMaxScaler()
scaler.fit(self.X)
self.X = scaler.transform(self.X)
# self.X_unscale = scaler.inverse_transform(self.X)
def data_splitter(self):
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=self.seed)
def metrics_calc(self):
print(classification_report(self.y_test, self.y_pred))
print("="*30)
print("Accuracy: {}".format(accuracy_score(self.y_test, self.y_pred)))
###Output
_____no_output_____
###Markdown
Class of ML classifier
**Child of preprocessor**
###Code
class classifier(preprocessor):
def __init__(self, df, seed, model_name):
super().__init__(df, seed, model_name)
def SVM(self):
print("SVM")
svc = SVC(probability=True) # Default hyperparameters -- put probability=True if you want to plot ROC Curve
svc.fit(self.X_train, self.y_train)
y_pred = svc.predict(self.X_test)
self.y_pred = y_pred
self.model = svc
self.metrics_calc() #calculate metrics
def logreg(self):
print("LogReg")
model = LogisticRegression(max_iter=1000)
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_test)
self.y_pred = y_pred
self.model = model
self.metrics_calc()
def NB(self):
print("Naive Bayes")
NB = GaussianNB()
NB.fit(self.X_train, self.y_train)
y_pred = NB.predict(self.X_test)
self.y_pred = y_pred
self.model = NB
self.metrics_calc()
def DT(self):
print("Decision Tree")
DT = DecisionTreeClassifier(criterion = 'gini', splitter='best', max_depth=15)
DT.fit(self.X_train, self.y_train)
y_pred = DT.predict(self.X_test)
self.y_pred = y_pred
self.model = DT
self.metrics_calc()
def KNN(self):
print("K Nearest Neighbors (KNN)")
KNN = KNeighborsClassifier(n_neighbors = 5)
KNN.fit(self.X_train, self.y_train)
y_pred = KNN.predict(self.X_test)
self.y_pred = y_pred
self.model = KNN
self.metrics_calc()
def NN(self):
print("Neural Networks")
NN = MLPClassifier(hidden_layer_sizes=(50, 50, 50), max_iter=1500, activation = 'relu', solver='adam', random_state=42, verbose = False)
NN.fit(self.X_train, self.y_train)
y_pred = NN.predict(self.X_test)
self.y_pred = y_pred
self.model = NN
self.metrics_calc()
def RF(self):
print("Random Forest")
clf = RandomForestClassifier(n_estimators=100, random_state=0)
clf.fit(self.X_train, self.y_train)
y_pred = clf.predict(self.X_test)
self.y_pred = y_pred
self.model = clf
self.metrics_calc()
###Output
_____no_output_____
###Markdown
Class of postprocessor
**Child of classifier**
###Code
class postprocessor(classifier):
def __init__(self, df, seed, model_name):
super().__init__(df, seed, model_name)
def roc_plotter(self):
y_pred_proba = self.model.predict_proba(self.X_test)
fpr0, tpr0, _ = roc_curve(self.y_test, y_pred_proba[:, 0], pos_label=self.model.classes_[0]) #class 0
fpr1, tpr1, _ = roc_curve(self.y_test, y_pred_proba[:, 1], pos_label=self.model.classes_[1]) #class 1
AUC0 = auc(fpr0, tpr0)
AUC1 = auc(fpr1, tpr1)
print("AUC score: class 0: {} | class 1: {}".format(AUC0, AUC1))
plt.figure(figsize = (8, 8))
plt.xlabel("FP Rate")
plt.ylabel("TP Rate")
plt.plot(fpr0, tpr0, label = 'class 0', linewidth = 2)
plt.plot(fpr1, tpr1, label = 'class 1', linewidth = 2)
plt.title("ROC Curve")
plt.legend()
plt.autoscale(enable=True, axis='both', tight=False)
plt.show()
def conf_matrix(self):
cnf_matrix = confusion_matrix(self.y_test, self.y_pred)
fig, ax = plt.subplots(figsize=(7, 7))
# create heatmap
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu", fmt="g")
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title("Confusion matrix", y=1.1)
plt.ylabel("Actual label")
plt.xlabel("Predicted label");
class autolearner(postprocessor):
def __init__(self, df, seed, model_name):
super().__init__(df, seed, model_name)
def model_selector(self):
if self.model_name == "SVM":
self.SVM()
elif self.model_name == "LogReg":
self.logreg()
elif self.model_name == "NB":
self.NB()
elif self.model_name == "DT":
self.DT()
elif self.model_name == "KNN":
self.KNN()
elif self.model_name == "NN":
self.NN()
elif self.model_name == "RF":
self.RF()
def auto_learn(self):
self.null_checker()
self.separator()
self.data_encoder()
self.null_imputer()
self.data_scaler()
self.data_splitter()
self.model_selector()
file_name = '/content/voice.csv'
df = pd.read_csv(file_name)
seed = 8
# model_name = 'NN'
model_names = ['SVM', 'LogReg', 'NB', 'DT', 'KNN', 'NN', 'RF']
vmodel0 = autolearner(df, seed, model_names[0])
vmodel1 = autolearner(df, seed, model_names[1])
vmodel2 = autolearner(df, seed, model_names[2])
vmodel3 = autolearner(df, seed, model_names[3])
vmodel4 = autolearner(df, seed, model_names[4])
vmodel5 = autolearner(df, seed, model_names[5])
vmodel6 = autolearner(df, seed, model_names[6])
t0 = threading.Thread(target = vmodel0.auto_learn())
t1 = threading.Thread(target = vmodel1.auto_learn())
t2 = threading.Thread(target = vmodel2.auto_learn())
t3 = threading.Thread(target = vmodel3.auto_learn())
t4 = threading.Thread(target = vmodel4.auto_learn())
t5 = threading.Thread(target = vmodel5.auto_learn())
t6 = threading.Thread(target = vmodel6.auto_learn())
t0.start()
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t6.start()
t0.join()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
t6.join()
print("Done!")
vmodel0.roc_plotter()
###Output
_____no_output_____ |
notebooks/feature_store.ipynb | ###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top MLOps repositories on GitHub Set up
###Code
# Install Feast
!pip install feast==0.10.5 -q
!pip freeze | grep feast
###Output
feast==0.10.5
###Markdown
We're going to create a feature repository at the root of our project. [Feast](https://feast.dev/) will create a configuration file for us and we're going to add an additional [features.py](https://github.com/GokuMohandas/MLOps/blob/main/features/features.py) file to define our features.> Traditionally, the feature repository would be it's own isolated repository that other services will use to read/write features from but we're going to simplify it and create it directly in our application's repository.
###Code
%%bash
cd ../
feast init --minimal --template local features
cd features
touch features.py
###Output
Creating a new Feast repository in /Users/goku/Documents/madewithml/mlops/features.
###Markdown
```bashfeatures/├── feature_store.yaml - configuration└── features.py - feature definitions``` We're going to configure the locations for our registry and online store (SQLite) in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml) file. - **registry**: contains information about our feature repository, such as data sources, feature views, etc. Since it's in a DB, instead of a Python file, it can very quickly be accessed in production.- **online store**: DB (SQLite for local) that stores the (latest) features for defined entites to be used for online inference.If all definitions look valid, Feast will sync the metadata about Feast objects to the registry. The registry is a tiny database storing most of the same information you have in the feature repository. This step is necessary because the production feature serving infrastructure won't be able to access Python files in the feature repository at run time, but it will be able to efficiently and securely read the feature definitions from the registry.```yamlproject: featuresregistry: ../stores/feature/registry.dbprovider: localonline_store: path: ../stores/feature/online_store.db``` Data Feast requires it's [data sources](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/data_source.py) to either come from a file ([Parquet](https://databricks.com/glossary/what-is-parquet)), data warehouse ([BigQuery](https://cloud.google.com/bigquery)) or data stream ([Kafka](https://kafka.apache.org/) / [Kinesis](https://aws.amazon.com/kinesis/)). We'll convert our generated features file (`features.json`) into a Parquet file.> Read more about these data sources in our [pipelines](https://madewithml.com/courses/mlops/pipelines/data) and [deployment](https://madewithml.com/courses/mlops/deployment/batch-processing) lessons.
###Code
import pandas as pd
from pathlib import Path
from config import config
from artclass import utils
# Load features to df
features_fp = Path(config.DATA_DIR, "features.json")
features = utils.load_dict(filepath=features_fp)
df = pd.DataFrame(features)
# Format timestamp
df.created_on = pd.to_datetime(df.created_on)
# Convert to parquet
df.to_parquet(
Path(config.DATA_DIR, "features.parquet"),
compression=None,
allow_truncated_timestamps=True,
)
###Output
_____no_output_____
###Markdown
Feature definitions Now that we have our data source prepared, we can define our features for the feature store.
###Code
from datetime import datetime
from feast import Entity, Feature, FeatureView, ValueType
from feast.data_source import FileSource
from google.protobuf.duration_pb2 import Duration
from config import config
###Output
_____no_output_____
###Markdown
The first step is to define the location of the features (FileSource in our case) and the timestamp column for each data point.
###Code
# Read data
START_TIME = "2020-02-17"
project_details = FileSource(
path=str(Path(config.DATA_DIR, "features.parquet")),
event_timestamp_column="created_on",
)
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Next, we need to define the main entity that each data point pertains to. In our case, each project has a unique ID with features such as text and tags.
###Code
# Define an entity
project = Entity(
name="id",
value_type=ValueType.INT64,
description="project id",
)
###Output
_____no_output_____
###Markdown
Finally, we're ready to create a [FeatureView](https://docs.feast.dev/concepts/feature-views) that loads specific features (`features`), of various [value types](https://api.docs.feast.dev/python/feast.html?highlight=valuetypefeast.value_type.ValueType), from a data source (`input`) for a specific period of time (`ttl`).
###Code
# Define a Feature View for each project
project_details_view = FeatureView(
name="project_details",
entities=["id"],
ttl=Duration(
seconds=(datetime.today() - datetime.strptime(START_TIME, "%Y-%m-%d")).days * 24 * 60 * 60
),
features=[
Feature(name="text", dtype=ValueType.STRING),
Feature(name="tags", dtype=ValueType.STRING_LIST),
],
online=True,
input=project_details,
tags={},
)
###Output
_____no_output_____
###Markdown
Once we've defined our feature views, we can `apply` it to push a version controlled definition of our features to the registry for fast access. It will also configure our registry and online stores that we've defined in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml)
###Code
%%bash
cd ../features
feast apply
###Output
Registered entity id
Registered feature view project_details
Deploying infrastructure for project_details
###Markdown
Historical features Once we've registered our feature definition, along with the data source, entity definition, etc., we can use it to fetch historical features. This is done via joins using the provided timestamps using pandas (local) or BigQuery (production).
###Code
import pandas as pd
from feast import FeatureStore
# Identify entities
project_ids = [1, 2, 3]
now = datetime.now()
timestamps = [datetime(now.year, now.month, now.day)]*len(project_ids)
entity_df = pd.DataFrame.from_dict({"id": project_ids, "event_timestamp": timestamps})
entity_df.head()
# Get historical features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
training_df = store.get_historical_features(
entity_df=entity_df,
feature_refs=["project_details:text", "project_details:tags"],
).to_df()
training_df.head()
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Materialize For online inference, we want to retrieve features very quickly via our online store, as opposed to fetching them from slow joins. However, the features are not in our online store just yet, so we'll need to [materialize](https://docs.feast.dev/quickstart4-materializing-features-to-the-online-store) them first.
###Code
%%bash
cd ../features
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S")
feast materialize-incremental $CURRENT_TIME
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
This has moved the features for all of our projects into the online store since this was first time materializing to the online store. When we subsequently run the [`materialize-incremental`](https://docs.feast.dev/how-to-guides/load-data-into-the-online-store2-b-materialize-incremental-alternative) command, Feast keeps track of previous materializations and so we'll only materialize the new data since the last attempt. Online features
###Code
# Get online features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
feature_vector = store.get_online_features(
feature_refs=["project_details:text", "project_details:tags"],
entity_rows=[{"id": 1000}],
).to_dict()
feature_vector
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top MLOps repositories on GitHub Set up
###Code
# Install Feast
!pip install feast==0.10.5 -q
!pip freeze | grep feast
###Output
feast==0.10.5
###Markdown
We're going to create a feature repository at the root of our project. [Feast](https://feast.dev/) will create a configuration file for us and we're going to add an additional [features.py](https://github.com/GokuMohandas/MLOps/blob/main/features/features.py) file to define our features.> Traditionally, the feature repository would be it's own isolated repository that other services will use to read/write features from but we're going to simplify it and create it directly in our application's repository.
###Code
%%bash
cd ../
feast init --minimal --template local features
cd features
touch features.py
###Output
Creating a new Feast repository in /Users/goku/Documents/madewithml/mlops/features.
###Markdown
```bashfeatures/├── feature_store.yaml - configuration└── features.py - feature definitions``` We're going to configure the locations for our registry and online store (SQLite) in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml) file. - **registry**: contains information about our feature repository, such as data sources, feature views, etc. Since it's in a DB, instead of a Python file, it can very quickly be accessed in production.- **online store**: DB (SQLite for local) that stores the (latest) features for defined entites to be used for online inference.If all definitions look valid, Feast will sync the metadata about Feast objects to the registry. The registry is a tiny database storing most of the same information you have in the feature repository. This step is necessary because the production feature serving infrastructure won't be able to access Python files in the feature repository at run time, but it will be able to efficiently and securely read the feature definitions from the registry.```yamlproject: featuresregistry: ../stores/feature/registry.dbprovider: localonline_store: path: ../stores/feature/online_store.db``` Data Feast requires it's [data sources](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/data_source.py) to either come from a file ([Parquet](https://databricks.com/glossary/what-is-parquet)), data warehouse ([BigQuery](https://cloud.google.com/bigquery)) or data stream ([Kafka](https://kafka.apache.org/) / [Kinesis](https://aws.amazon.com/kinesis/)). We'll convert our generated features file (`features.json`) into a Parquet file.> Read more about these data sources in our [pipelines](https://madewithml.com/courses/mlops/pipelines/data) and [deployment](https://madewithml.com/courses/mlops/deployment/batch-processing) lessons.
###Code
import pandas as pd
from pathlib import Path
from tagifai import config, utils
# Load features to df
features_fp = Path(config.DATA_DIR, "features.json")
features = utils.load_dict(filepath=features_fp)
df = pd.DataFrame(features)
# Format timestamp
df.created_on = pd.to_datetime(df.created_on)
# Convert to parquet
df.to_parquet(
Path(config.DATA_DIR, "features.parquet"),
compression=None,
allow_truncated_timestamps=True,
)
###Output
_____no_output_____
###Markdown
Feature definitions Now that we have our data source prepared, we can define our features for the feature store.
###Code
from datetime import datetime
from feast import Entity, Feature, FeatureView, ValueType
from feast.data_source import FileSource
from google.protobuf.duration_pb2 import Duration
from tagifai import config
###Output
_____no_output_____
###Markdown
The first step is to define the location of the features (FileSource in our case) and the timestamp column for each data point.
###Code
# Read data
START_TIME = "2020-02-17"
project_details = FileSource(
path=str(Path(config.DATA_DIR, "features.parquet")),
event_timestamp_column="created_on",
)
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Next, we need to define the main entity that each data point pertains to. In our case, each project has a unique ID with features such as text and tags.
###Code
# Define an entity
project = Entity(
name="id",
value_type=ValueType.INT64,
description="project id",
)
###Output
_____no_output_____
###Markdown
Finally, we're ready to create a [FeatureView](https://docs.feast.dev/concepts/feature-views) that loads specific features (`features`), of various [value types](https://api.docs.feast.dev/python/feast.html?highlight=valuetypefeast.value_type.ValueType), from a data source (`input`) for a specific period of time (`ttl`).
###Code
# Define a Feature View for each project
project_details_view = FeatureView(
name="project_details",
entities=["id"],
ttl=Duration(
seconds=(datetime.today() - datetime.strptime(START_TIME, "%Y-%m-%d")).days * 24 * 60 * 60
),
features=[
Feature(name="text", dtype=ValueType.STRING),
Feature(name="tags", dtype=ValueType.STRING_LIST),
],
online=True,
input=project_details,
tags={},
)
###Output
_____no_output_____
###Markdown
Once we've defined our feature views, we can `apply` it to push a version controlled definition of our features to the registry for fast access. It will also configure our registry and online stores that we've defined in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml)
###Code
%%bash
cd ../features
feast apply
###Output
Registered entity id
Registered feature view project_details
Deploying infrastructure for project_details
###Markdown
Historical features Once we've registered our feature definition, along with the data source, entity definition, etc., we can use it to fetch historical features. This is done via joins using the provided timestamps using pandas (local) or BigQuery (production).
###Code
import pandas as pd
from feast import FeatureStore
# Identify entities
project_ids = [1, 2, 3]
now = datetime.now()
timestamps = [datetime(now.year, now.month, now.day)]*len(project_ids)
entity_df = pd.DataFrame.from_dict({"id": project_ids, "event_timestamp": timestamps})
entity_df.head()
# Get historical features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
training_df = store.get_historical_features(
entity_df=entity_df,
feature_refs=["project_details:text", "project_details:tags"],
).to_df()
training_df.head()
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Materialize For online inference, we want to retrieve features very quickly via our online store, as opposed to fetching them from slow joins. However, the features are not in our online store just yet, so we'll need to [materialize](https://docs.feast.dev/quickstart4-materializing-features-to-the-online-store) them first.
###Code
%%bash
cd ../features
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S")
feast materialize-incremental $CURRENT_TIME
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
This has moved the features for all of our projects into the online store since this was first time materializing to the online store. When we subsequently run the [`materialize-incremental`](https://docs.feast.dev/how-to-guides/load-data-into-the-online-store2-b-materialize-incremental-alternative) command, Feast keeps track of previous materializations and so we'll only materialize the new data since the last attempt. Online features
###Code
# Get online features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
feature_vector = store.get_online_features(
feature_refs=["project_details:text", "project_details:tags"],
entity_rows=[{"id": 1000}],
).to_dict()
feature_vector
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top MLOps repositories on GitHub Set up
###Code
# Install Feast
!pip install feast==0.10.5 -q
!pip freeze | grep feast
###Output
feast==0.10.5
###Markdown
We're going to create a feature repository at the root of our project. [Feast](https://feast.dev/) will create a configuration file for us and we're going to add an additional [features.py](https://github.com/GokuMohandas/MLOps/blob/main/features/features.py) file to define our features.> Traditionally, the feature repository would be it's own isolated repository that other services will use to read/write features from but we're going to simplify it and create it directly in our application's repository.
###Code
%%bash
cd ../
feast init --minimal --template local features
cd features
touch features.py
###Output
Creating a new Feast repository in /Users/goku/Documents/madewithml/mlops/features.
###Markdown
```bashfeatures/├── feature_store.yaml - configuration└── features.py - feature definitions``` We're going to configure the locations for our registry and online store (SQLite) in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml) file. - **registry**: contains information about our feature repository, such as data sources, feature views, etc. Since it's in a DB, instead of a Python file, it can very quickly be accessed in production.- **online store**: DB (SQLite for local) that stores the (latest) features for defined entites to be used for online inference.If all definitions look valid, Feast will sync the metadata about Feast objects to the registry. The registry is a tiny database storing most of the same information you have in the feature repository. This step is necessary because the production feature serving infrastructure won't be able to access Python files in the feature repository at run time, but it will be able to efficiently and securely read the feature definitions from the registry.```yamlproject: featuresregistry: ../stores/feature/registry.dbprovider: localonline_store: path: ../stores/feature/online_store.db``` Data Feast requires it's [data sources](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/data_source.py) to either come from a file ([Parquet](https://databricks.com/glossary/what-is-parquet)), data warehouse ([BigQuery](https://cloud.google.com/bigquery)) or data stream ([Kafka](https://kafka.apache.org/) / [Kinesis](https://aws.amazon.com/kinesis/)). We'll convert our generated features file (`features.json`) into a Parquet file.> Read more about these data sources in our [pipelines](https://madewithml.com/courses/mlops/pipelines/data) and [deployment](https://madewithml.com/courses/mlops/deployment/batch-processing) lessons.
###Code
import pandas as pd
from pathlib import Path
from app import config
from tagifai import utils
# Load features to df
features_fp = Path(config.DATA_DIR, "features.json")
features = utils.load_dict(filepath=features_fp)
df = pd.DataFrame(features)
# Format timestamp
df.created_on = pd.to_datetime(df.created_on)
# Convert to parquet
df.to_parquet(
Path(config.DATA_DIR, "features.parquet"),
compression=None,
allow_truncated_timestamps=True,
)
###Output
_____no_output_____
###Markdown
Feature definitions Now that we have our data source prepared, we can define our features for the feature store.
###Code
from datetime import datetime
from feast import Entity, Feature, FeatureView, ValueType
from feast.data_source import FileSource
from google.protobuf.duration_pb2 import Duration
from app import config
###Output
_____no_output_____
###Markdown
The first step is to define the location of the features (FileSource in our case) and the timestamp column for each data point.
###Code
# Read data
START_TIME = "2020-02-17"
project_details = FileSource(
path=str(Path(config.DATA_DIR, "features.parquet")),
event_timestamp_column="created_on",
)
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Next, we need to define the main entity that each data point pertains to. In our case, each project has a unique ID with features such as text and tags.
###Code
# Define an entity
project = Entity(
name="id",
value_type=ValueType.INT64,
description="project id",
)
###Output
_____no_output_____
###Markdown
Finally, we're ready to create a [FeatureView](https://docs.feast.dev/concepts/feature-views) that loads specific features (`features`), of various [value types](https://api.docs.feast.dev/python/feast.html?highlight=valuetypefeast.value_type.ValueType), from a data source (`input`) for a specific period of time (`ttl`).
###Code
# Define a Feature View for each project
project_details_view = FeatureView(
name="project_details",
entities=["id"],
ttl=Duration(
seconds=(datetime.today() - datetime.strptime(START_TIME, "%Y-%m-%d")).days * 24 * 60 * 60
),
features=[
Feature(name="text", dtype=ValueType.STRING),
Feature(name="tags", dtype=ValueType.STRING_LIST),
],
online=True,
input=project_details,
tags={},
)
###Output
_____no_output_____
###Markdown
Once we've defined our feature views, we can `apply` it to push a version controlled definition of our features to the registry for fast access. It will also configure our registry and online stores that we've defined in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml)
###Code
%%bash
cd ../features
feast apply
###Output
Registered entity id
Registered feature view project_details
Deploying infrastructure for project_details
###Markdown
Historical features Once we've registered our feature definition, along with the data source, entity definition, etc., we can use it to fetch historical features. This is done via joins using the provided timestamps using pandas (local) or BigQuery (production).
###Code
import pandas as pd
from feast import FeatureStore
# Identify entities
project_ids = [1, 2, 3]
now = datetime.now()
timestamps = [datetime(now.year, now.month, now.day)]*len(project_ids)
entity_df = pd.DataFrame.from_dict({"id": project_ids, "event_timestamp": timestamps})
entity_df.head()
# Get historical features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
training_df = store.get_historical_features(
entity_df=entity_df,
feature_refs=["project_details:text", "project_details:tags"],
).to_df()
training_df.head()
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Materialize For online inference, we want to retrieve features very quickly via our online store, as opposed to fetching them from slow joins. However, the features are not in our online store just yet, so we'll need to [materialize](https://docs.feast.dev/quickstart4-materializing-features-to-the-online-store) them first.
###Code
%%bash
cd ../features
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S")
feast materialize-incremental $CURRENT_TIME
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
This has moved the features for all of our projects into the online store since this was first time materializing to the online store. When we subsequently run the [`materialize-incremental`](https://docs.feast.dev/how-to-guides/load-data-into-the-online-store2-b-materialize-incremental-alternative) command, Feast keeps track of previous materializations and so we'll only materialize the new data since the last attempt. Online features
###Code
# Get online features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
feature_vector = store.get_online_features(
feature_refs=["project_details:text", "project_details:tags"],
entity_rows=[{"id": 1000}],
).to_dict()
feature_vector
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top MLOps repositories on GitHub Set up
###Code
# Install Feast
!pip install feast==0.10.5 -q
!pip freeze | grep feast
###Output
feast==0.10.5
###Markdown
We're going to create a feature repository at the root of our project. [Feast](https://feast.dev/) will create a configuration file for us and we're going to add an additional [features.py](https://github.com/GokuMohandas/MLOps/blob/main/features/features.py) file to define our features.> Traditionally, the feature repository would be it's own isolated repository that other services will use to read/write features from but we're going to simplify it and create it directly in our application's repository.
###Code
%%bash
cd ../
feast init --minimal --template local features
cd features
touch features.py
###Output
Creating a new Feast repository in /Users/goku/Documents/madewithml/mlops/features.
###Markdown
```bashfeatures/├── feature_store.yaml - configuration└── features.py - feature definitions``` We're going to configure the locations for our registry and online store (SQLite) in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml) file. - **registry**: contains information about our feature repository, such as data sources, feature views, etc. Since it's in a DB, instead of a Python file, it can very quickly be accessed in production.- **online store**: DB (SQLite for local) that stores the (latest) features for defined entites to be used for online inference.If all definitions look valid, Feast will sync the metadata about Feast objects to the registry. The registry is a tiny database storing most of the same information you have in the feature repository. This step is necessary because the production feature serving infrastructure won't be able to access Python files in the feature repository at run time, but it will be able to efficiently and securely read the feature definitions from the registry.```yamlproject: featuresregistry: ../stores/feature/registry.dbprovider: localonline_store: path: ../stores/feature/online_store.db``` Data Feast requires it's [data sources](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/data_source.py) to either come from a file ([Parquet](https://databricks.com/glossary/what-is-parquet)), data warehouse ([BigQuery](https://cloud.google.com/bigquery)) or data stream ([Kafka](https://kafka.apache.org/) / [Kinesis](https://aws.amazon.com/kinesis/)). We'll convert our generated features file (`features.json`) into a Parquet file.> Read more about these data sources in our [pipelines](https://madewithml.com/courses/mlops/pipelines/data) and [deployment](https://madewithml.com/courses/mlops/deployment/batch-processing) lessons.
###Code
import pandas as pd
from pathlib import Path
from config import config
from tagifai import utils
# Load features to df
features_fp = Path(config.DATA_DIR, "features.json")
features = utils.load_dict(filepath=features_fp)
df = pd.DataFrame(features)
# Format timestamp
df.created_on = pd.to_datetime(df.created_on)
# Convert to parquet
df.to_parquet(
Path(config.DATA_DIR, "features.parquet"),
compression=None,
allow_truncated_timestamps=True,
)
###Output
_____no_output_____
###Markdown
Feature definitions Now that we have our data source prepared, we can define our features for the feature store.
###Code
from datetime import datetime
from feast import Entity, Feature, FeatureView, ValueType
from feast.data_source import FileSource
from google.protobuf.duration_pb2 import Duration
from config import config
###Output
_____no_output_____
###Markdown
The first step is to define the location of the features (FileSource in our case) and the timestamp column for each data point.
###Code
# Read data
START_TIME = "2020-02-17"
project_details = FileSource(
path=str(Path(config.DATA_DIR, "features.parquet")),
event_timestamp_column="created_on",
)
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Next, we need to define the main entity that each data point pertains to. In our case, each project has a unique ID with features such as text and tags.
###Code
# Define an entity
project = Entity(
name="id",
value_type=ValueType.INT64,
description="project id",
)
###Output
_____no_output_____
###Markdown
Finally, we're ready to create a [FeatureView](https://docs.feast.dev/concepts/feature-views) that loads specific features (`features`), of various [value types](https://api.docs.feast.dev/python/feast.html?highlight=valuetypefeast.value_type.ValueType), from a data source (`input`) for a specific period of time (`ttl`).
###Code
# Define a Feature View for each project
project_details_view = FeatureView(
name="project_details",
entities=["id"],
ttl=Duration(
seconds=(datetime.today() - datetime.strptime(START_TIME, "%Y-%m-%d")).days * 24 * 60 * 60
),
features=[
Feature(name="text", dtype=ValueType.STRING),
Feature(name="tags", dtype=ValueType.STRING_LIST),
],
online=True,
input=project_details,
tags={},
)
###Output
_____no_output_____
###Markdown
Once we've defined our feature views, we can `apply` it to push a version controlled definition of our features to the registry for fast access. It will also configure our registry and online stores that we've defined in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml)
###Code
%%bash
cd ../features
feast apply
###Output
Registered entity id
Registered feature view project_details
Deploying infrastructure for project_details
###Markdown
Historical features Once we've registered our feature definition, along with the data source, entity definition, etc., we can use it to fetch historical features. This is done via joins using the provided timestamps using pandas (local) or BigQuery (production).
###Code
import pandas as pd
from feast import FeatureStore
# Identify entities
project_ids = [1, 2, 3]
now = datetime.now()
timestamps = [datetime(now.year, now.month, now.day)]*len(project_ids)
entity_df = pd.DataFrame.from_dict({"id": project_ids, "event_timestamp": timestamps})
entity_df.head()
# Get historical features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
training_df = store.get_historical_features(
entity_df=entity_df,
feature_refs=["project_details:text", "project_details:tags"],
).to_df()
training_df.head()
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Materialize For online inference, we want to retrieve features very quickly via our online store, as opposed to fetching them from slow joins. However, the features are not in our online store just yet, so we'll need to [materialize](https://docs.feast.dev/quickstart4-materializing-features-to-the-online-store) them first.
###Code
%%bash
cd ../features
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S")
feast materialize-incremental $CURRENT_TIME
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
This has moved the features for all of our projects into the online store since this was first time materializing to the online store. When we subsequently run the [`materialize-incremental`](https://docs.feast.dev/how-to-guides/load-data-into-the-online-store2-b-materialize-incremental-alternative) command, Feast keeps track of previous materializations and so we'll only materialize the new data since the last attempt. Online features
###Code
# Get online features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
feature_vector = store.get_online_features(
feature_refs=["project_details:text", "project_details:tags"],
entity_rows=[{"id": 1000}],
).to_dict()
feature_vector
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top MLOps repositories on GitHub Set up
###Code
# Install Feast
!pip install feast==0.10.5 -q
!pip freeze | grep feast
###Output
feast==0.10.5
###Markdown
We're going to create a feature repository at the root of our project. [Feast](https://feast.dev/) will create a configuration file for us and we're going to add an additional [features.py](https://github.com/GokuMohandas/MLOps/blob/main/features/features.py) file to define our features.> Traditionally, the feature repository would be it's own isolated repository that other services will use to read/write features from but we're going to simplify it and create it directly in our application's repository.
###Code
%%bash
cd ../
feast init --minimal --template local features
cd features
touch features.py
###Output
Creating a new Feast repository in /Users/goku/Documents/madewithml/mlops/features.
###Markdown
```bashfeatures/├── feature_store.yaml - configuration└── features.py - feature definitions``` We're going to configure the locations for our registry and online store (SQLite) in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml) file. - **registry**: contains information about our feature repository, such as data sources, feature views, etc. Since it's in a DB, instead of a Python file, it can very quickly be accessed in production.- **online store**: DB (SQLite for local) that stores the (latest) features for defined entites to be used for online inference.If all definitions look valid, Feast will sync the metadata about Feast objects to the registry. The registry is a tiny database storing most of the same information you have in the feature repository. This step is necessary because the production feature serving infrastructure won't be able to access Python files in the feature repository at run time, but it will be able to efficiently and securely read the feature definitions from the registry.```yamlproject: featuresregistry: ../stores/feature/registry.dbprovider: localonline_store: path: ../stores/feature/online_store.db``` Data Feast requires it's [data sources](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/data_source.py) to either come from a file ([Parquet](https://databricks.com/glossary/what-is-parquet)), data warehouse ([BigQuery](https://cloud.google.com/bigquery)) or data stream ([Kafka](https://kafka.apache.org/) / [Kinesis](https://aws.amazon.com/kinesis/)). We'll convert our generated features file (`features.json`) into a Parquet file.> Read more about these data sources in our [pipelines](https://madewithml.com/courses/mlops/pipelines/data) and [deployment](https://madewithml.com/courses/mlops/deployment/batch-processing) lessons.
###Code
import pandas as pd
from pathlib import Path
from config import config
from tagifai import utils
# Load features to df
features_fp = Path(config.DATA_DIR, "features.json")
features = utils.load_dict(filepath=features_fp)
df = pd.DataFrame(features)
# Format timestamp
df.created_on = pd.to_datetime(df.created_on)
# Convert to parquet
df.to_parquet(
Path(config.DATA_DIR, "features.parquet"),
compression=None,
allow_truncated_timestamps=True,
)
###Output
_____no_output_____
###Markdown
Feature definitions Now that we have our data source prepared, we can define our features for the feature store.
###Code
from datetime import datetime
from feast import Entity, Feature, FeatureView, ValueType
from feast.data_source import FileSource
from google.protobuf.duration_pb2 import Duration
from config import config
###Output
_____no_output_____
###Markdown
The first step is to define the location of the features (FileSource in our case) and the timestamp column for each data point.
###Code
# Read data
START_TIME = "2020-02-17"
project_details = FileSource(
path=str(Path(config.DATA_DIR, "features.parquet")),
event_timestamp_column="created_on",
)
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Next, we need to define the main entity that each data point pertains to. In our case, each project has a unique ID with features such as text and tags.
###Code
# Define an entity
project = Entity(
name="id",
value_type=ValueType.INT64,
description="project id",
)
###Output
_____no_output_____
###Markdown
Finally, we're ready to create a [FeatureView](https://docs.feast.dev/concepts/feature-views) that loads specific features (`features`), of various [value types](https://api.docs.feast.dev/python/feast.html?highlight=valuetypefeast.value_type.ValueType), from a data source (`input`) for a specific period of time (`ttl`).
###Code
# Define a Feature View for each project
project_details_view = FeatureView(
name="project_details",
entities=["id"],
ttl=Duration(
seconds=(datetime.today() - datetime.strptime(START_TIME, "%Y-%m-%d")).days * 24 * 60 * 60
),
features=[
Feature(name="text", dtype=ValueType.STRING),
Feature(name="tags", dtype=ValueType.STRING_LIST),
],
online=True,
input=project_details,
tags={},
)
###Output
_____no_output_____
###Markdown
Once we've defined our feature views, we can `apply` it to push a version controlled definition of our features to the registry for fast access. It will also configure our registry and online stores that we've defined in our [feature_store.yaml](https://github.com/GokuMohandas/MLOps/blob/main/features/feature_store.yaml)
###Code
%%bash
cd ../features
feast apply
###Output
Registered entity id
Registered feature view project_details
Deploying infrastructure for project_details
###Markdown
Historical features Once we've registered our feature definition, along with the data source, entity definition, etc., we can use it to fetch historical features. This is done via joins using the provided timestamps using pandas (local) or BigQuery (production).
###Code
import pandas as pd
from feast import FeatureStore
# Identify entities
project_ids = [1, 2, 3]
now = datetime.now()
timestamps = [datetime(now.year, now.month, now.day)]*len(project_ids)
entity_df = pd.DataFrame.from_dict({"id": project_ids, "event_timestamp": timestamps})
entity_df.head()
# Get historical features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
training_df = store.get_historical_features(
entity_df=entity_df,
feature_refs=["project_details:text", "project_details:tags"],
).to_df()
training_df.head()
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
Materialize For online inference, we want to retrieve features very quickly via our online store, as opposed to fetching them from slow joins. However, the features are not in our online store just yet, so we'll need to [materialize](https://docs.feast.dev/quickstart4-materializing-features-to-the-online-store) them first.
###Code
%%bash
cd ../features
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S")
feast materialize-incremental $CURRENT_TIME
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
This has moved the features for all of our projects into the online store since this was first time materializing to the online store. When we subsequently run the [`materialize-incremental`](https://docs.feast.dev/how-to-guides/load-data-into-the-online-store2-b-materialize-incremental-alternative) command, Feast keeps track of previous materializations and so we'll only materialize the new data since the last attempt. Online features
###Code
# Get online features
store = FeatureStore(repo_path=Path(config.BASE_DIR, "features"))
feature_vector = store.get_online_features(
feature_refs=["project_details:text", "project_details:tags"],
entity_rows=[{"id": 1000}],
).to_dict()
feature_vector
###Output
/Users/goku/Documents/madewithml/mlops/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
|
assignment1/2-svm.ipynb | ###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
###Output
(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
###Output
loss: 9.333543
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: 0.367519 analytic: 0.367519, relative error: 7.131038e-10
numerical: 16.478772 analytic: 16.478772, relative error: 1.565251e-11
numerical: 38.328000 analytic: 38.328000, relative error: 5.378939e-12
numerical: -13.898186 analytic: -13.898186, relative error: 3.136094e-11
numerical: -32.348258 analytic: -32.348258, relative error: 5.200750e-12
numerical: 0.173577 analytic: 0.173577, relative error: 6.332868e-10
numerical: 2.965209 analytic: 2.965209, relative error: 5.861317e-11
numerical: -18.999220 analytic: -18.999220, relative error: 3.932726e-11
numerical: -30.381972 analytic: -30.381972, relative error: 7.004402e-14
numerical: -2.486654 analytic: -2.486654, relative error: 2.675943e-11
numerical: -1.260629 analytic: -1.260629, relative error: 1.591417e-10
numerical: -10.702457 analytic: -10.702457, relative error: 3.739734e-11
numerical: -0.507166 analytic: -0.507166, relative error: 3.403752e-10
numerical: -25.749778 analytic: -25.749778, relative error: 8.864476e-12
numerical: 11.973233 analytic: 11.973233, relative error: 1.792827e-11
numerical: -2.076387 analytic: -2.076387, relative error: 2.074408e-10
numerical: 2.873016 analytic: 2.873016, relative error: 8.548901e-11
numerical: 14.793530 analytic: 14.793530, relative error: 9.505805e-12
numerical: 3.695552 analytic: 3.695552, relative error: 2.783116e-11
numerical: -3.798256 analytic: -3.798256, relative error: 5.064941e-11
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer:** A discrepancy may be caused when the loss function is not differentiable at some points. For example, the function ReLU $f(x) = max(0,x)$ is not differetiable at x=0. The formula for the numerical gradient is $\frac{df(x)}{dx} = \frac{f(x+h)-f(x-h)}{2h}$. Using this formula, the numerical gradients is $\frac{h}{2h} = \frac{1}{2}$, while the analytic gradient is $f'(\frac{h}{2}) = 1$. Thus, a large discrtpancy is caused at the point near $x=0$
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
print('difference: %f' % (loss_naive - loss_vectorized))
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss and gradient: computed in %fs' % (toc - tic))
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss and gradient: computed in %fs' % (toc - tic))
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('difference: %f' % difference)
###Output
Naive loss and gradient: computed in 0.148338s
Vectorized loss and gradient: computed in 0.009920s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = svm.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
#learning_rates = [1e-7, 5e-5]
#regularization_strengths = [2.5e4, 5e4]
learning_rates = [1.4e-7, 1.5e-7, 1.6e-7]
regularization_strengths = [(1+i*0.1)*1e4 for i in range(-3,3)] + [(2+0.1*i)*1e4 for i in range(-3,3)]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
for reg in regularization_strengths:
for lr in learning_rates:
svm = LinearSVM()
loss_hist = svm.train(X_train, y_train, lr, reg, num_iters=3000)
y_train_pred = svm.predict(X_train)
train_accuracy = np.mean(y_train == y_train_pred)
y_val_pred = svm.predict(X_val)
val_accuracy = np.mean(y_val == y_val_pred)
if val_accuracy > best_val:
best_val = val_accuracy
best_svm = svm
results[(lr,reg)] = train_accuracy, val_accuracy
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____ |
notebooks/data_cleaning_w_2020/1. mysql connection.ipynb | ###Markdown
Set up
###Code
import os
import sys
import random
import time
import datetime
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sys.path.insert(1, '/Users/boyuliu/pyprojects/Joann/Joann-Thailand-Project/src/py')
from utils.mysql_util import get_db_proxy
db = get_db_proxy()
data_dir = '../../datasets/new_dataset/'
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
%matplotlib inline
###Output
_____no_output_____
###Markdown
get data
###Code
sql = 'select * from suppliers;'
suppliers = db.execute_query(sql)
print(suppliers.shape)
suppliers.head()
# suppliers.to_csv(data_dir+'suppliers.csv')
old_cases = pd.read_csv(data_dir + 'cases.csv')
old_cases.shape
sql = 'select * from cases;'
cases = db.execute_query(sql)
print(cases.shape)
cases.head()
sorted([item for item in cases.created.unique() if item is not None ])
# old_case_dates = sorted([item for item in old_cases.created.unique() if item is not None and pd.notnull(item)])
print('first 10:', old_case_dates[:15])
print()
print('last 10:', old_case_dates[-10:])
###Output
first 10: ['0003-12-29', '0202-02-02', '0202-04-07', '2015-04-07', '2015-11-20', '2016-10-04', '2016-10-05', '2017-02-26', '2018-03-28', '2018-07-30', '2018-08-26', '2018-09-01', '2018-09-02', '2018-09-03', '2018-09-04']
last 10: ['2020-08-31', '2020-09-01', '2020-09-02', '2020-09-03', '2020-09-04', '2020-10-14', '2020-12-01', '2020-12-06', '2109-08-03', '2109-09-08']
###Markdown
count case data by month
###Code
cases = cases[(cases.created<datetime.date(2020, 12, 10)) & (cases.created>datetime.date(2018, 8, 30))]
months = [datetime.date(2018, m, 1) for m in [9, 10, 11, 12]] + \
[datetime.date(2019, m, 1) for m in range(1, 13)] + \
[datetime.date(2020, m, 1) for m in range(1, 13)] + [datetime.date(2021, 1, 1)]
bins = {}
for month_idx in range(len(months)-1):
start_m = months[month_idx]
end_m = months[month_idx+1]
count_data = cases[(cases.created>=start_m) & (cases.created<end_m)].shape[0]
bins[start_m] = count_data
bins
old_cases = old_cases[(old_cases.created<'2020-10-01') & (old_cases.created>'2018-08-30')]
old_cases.created = pd.to_datetime(old_cases.created)
old_months = [datetime.date(2018, m, 1) for m in [9, 10, 11, 12]] + \
[datetime.date(2019, m, 1) for m in range(1, 13)] + \
[datetime.date(2020, m, 1) for m in range(1, 10)]
old_bins = {}
for month_idx in range(len(old_months)-1):
start_m = old_months[month_idx]
end_m = old_months[month_idx+1]
count_data = old_cases[(old_cases.created>=start_m) & (old_cases.created<end_m)].shape[0]
old_bins[start_m] = count_data
old_bins
a4_dims = (11.7, 8.27)
fig, ax = plt.subplots(figsize=a4_dims)
barplot = sns.barplot(list(old_bins.keys()), list(old_bins.values()))
for item in barplot.get_xticklabels():
item.set_rotation(45)
plt.title('Data Count by Month in Old Cases')
plt.ylabel('number of cases')
plt.show()
a4_dims = (11.7, 8.27)
fig, ax = plt.subplots(figsize=a4_dims)
barplot = sns.barplot(list(bins.keys()), list(bins.values()))
for item in barplot.get_xticklabels():
item.set_rotation(45)
plt.title('Data Count by Month in New Cases')
plt.ylabel('number of cases')
plt.show()
###Output
_____no_output_____
###Markdown
merge case KPI
###Code
sql = 'select * from cases_kpis;'
cases_kpis = db.execute_query(sql)
print(cases_kpis.shape)
print(pd.read_csv(data_dir + 'cases_kpis.csv').shape)
cases_kpis.to_csv(data_dir + 'new_cases_kpis.csv', index=False)
cases_kpis.head()
print(cases_kpis.shape)
print(cases_kpis.case_id.describe())
print(cases_kpis.kpi_id.describe())
cases = pd.merge(cases, cases_kpis[['case_id', 'kpi_id']], left_on='id', right_on='case_id', how='left')
print(cases.shape)
cases.head()
cases.to_csv(data_dir + 'cases.csv', index=False)
###Output
_____no_output_____
###Markdown
2020 demand
###Code
sql = 'select * from 2020_thai_demand_approved;'
demand_data = db.execute_query(sql)
print(demand_data.shape)
demand_data.head()
def convert_year_week(week_year):
week, year = week_year.split('-')
return year + '-' + week
year_week = demand_data['week'].apply(convert_year_week)
print('number of unique weeks', year_week.nunique(), 'min:',year_week.min(), 'max', year_week.max())
print(year_week.unique())
demand_data['year_week'] = year_week
demand_data.to_csv(data_dir + '2020_demand_data.csv', index=False)
sql = 'select * from 2018_thai_demand_approved;'
demand_data2018 = db.execute_query(sql)
print(demand_data2018.shape)
demand_data2018.head()
demand_data2018['year_week'] = demand_data2018['week'].apply(convert_year_week)
demand_data2018.to_csv(data_dir + '2018_demand_data.csv', index=False)
sql = 'select * from 2019_Thai_Demand_Approved;'
demand_data2019 = db.execute_query(sql)
print(demand_data2019.shape)
demand_data2019.head()
demand_data2019['year_week'] = demand_data2019['Week'].apply(convert_year_week)
demand_data2019.to_csv(data_dir + '2019_demand_data.csv', index=False)
###Output
_____no_output_____
###Markdown
2018 worker voice
###Code
sql = 'select * from legacy_worker_jan_nov_2018;'
prev_cases = db.execute_query(sql)
print(prev_cases.shape)
prev_cases.head()
prev_cases.call_date.min(), prev_cases.call_date.max()
#[datetime.date(2018, m, 1) for m in [9, 10, 11, 12]] + \
old_months = \
[datetime.date(2017, m, 1) for m in range(1, 13)] + \
[datetime.date(2018, m, 1) for m in range(1, 13)]
old_bins = {}
for month_idx in range(len(old_months)-1):
start_m = old_months[month_idx]
end_m = old_months[month_idx+1]
count_data = prev_cases[(prev_cases.call_date>=start_m) & (prev_cases.call_date<end_m)].shape[0]
old_bins[start_m] = count_data
old_bins
a4_dims = (11.7, 8.27)
fig, ax = plt.subplots(figsize=a4_dims)
barplot = sns.barplot(list(old_bins.keys()), list(old_bins.values()))
for item in barplot.get_xticklabels():
item.set_rotation(45)
plt.title('Data Count by Month in Old Cases')
plt.ylabel('number of cases')
plt.show()
prev_cases.to_csv(data_dir + 'prev_wv_data.csv', index=False)
###Output
_____no_output_____
###Markdown
old
###Code
sum(pd.isnull(cases.supplier_id)) / cases.shape[0]
sum(pd.isnull(cases.province_id)) / cases.shape[0]
sum(pd.notnull(cases.supplier_id) & pd.notnull(cases.province_id)) / cases.shape[0]
sum(pd.isnull(cases.supplier_id) & pd.notnull(cases.province_id)) / cases.shape[0]
suppliers.head()
sum(pd.isnull(suppliers.industry_id)) / suppliers.shape[0]
sum(pd.isnull(cases.supplier_id)) / cases.shape[0]
# data_dir = '../../datasets/new_dataset/'
# os.makedirs('../../datasets/new_dataset')
# cases.to_csv(data_dir + 'cases.csv')
print(sum(pd.isnull(cases.province_id)) / cases.shape[0], cases.shape[0])
sql = 'select * from mm_thai_demand_data;'
demand_data = db.execute_query(sql)
demand_data.to_csv(data_dir + 'demand_data.csv')
demand_data.head()
print(demand_data.shape)
def convert_year_week(week_year):
week, year = week_year.split('-')
return year + '-' + week
year_week = demand_data['week'].apply(convert_year_week)
print(year_week.nunique(), year_week.min(), year_week.max())
sql = 'select * from mm_thai_demand_data_back;'
demand_data_back = db.execute_query(sql)
demand_data_back.to_csv(data_dir + 'demand_data_back.csv')
demand_data_back.head()
year_week = demand_data_back['week'].apply(convert_year_week)
print(year_week.nunique(), year_week.min(), year_week.max())
sql = 'select * from issue_categories;'
categories = db.execute_query(sql)
categories
categories.to_csv(data_dir+'categories.csv', index=False)
os.listdir(data_dir)
sql = 'select id, name from provinces where country_id = 8;'
provinces = db.execute_query(sql)
print(provinces.shape)
provinces.head()
provinces.to_csv(data_dir + 'provinces.csv', index=False)
sql = 'select id, name from industries;'
industries = db.execute_query(sql)
industries.head()
industries.to_csv(data_dir + 'industries.csv', index=False)
###Output
_____no_output_____
###Markdown
demand 2019
###Code
sql = 'select * from 2019_Thai_Demand_Approved;'
demand2019 = db.execute_query(sql)
print(demand2019.shape)
demand2019.head()
demand_dir = '/Users/boyuliu/Dropbox (MIT)/Boyu-Joann/Data/Demand_data/clean/'
demand2019.to_csv(demand_dir + 'demand_2019_complete.csv', index=False)
sql = 'select * from 2018_thai_demand_approved;'
demand2018 = db.execute_query(sql)
print(demand2018.shape)
demand2018.head()
demand2018.to_csv(demand_dir + 'demand_2018_complete.csv', index=False)
###Output
_____no_output_____ |
Celestrak/Celestrak_Satellites_over_time.ipynb | ###Markdown
Celestrak - Satellites over time We analyze the number of satellites in the space from the first launch to now, and the part of them which became inactive.These data come from http://www.celestrak.com/. It provides free data as CSV files easily accessible.The CSV file we got contains many data as the name of the satellite, its owner, launch site, id, apogee and many others.What interested us the most were the status code, the launch date and the decay date, in order to create a graph with years in X axis and number of satellites in Y axis. **Tags:** celestrak opendata satelltes Input Import librairies
###Code
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
###Output
_____no_output_____
###Markdown
Variables
###Code
URL = 'http://www.celestrak.com/pub/satcat.csv'
###Output
_____no_output_____
###Markdown
Model Recovery and processing of CSV data
###Code
df = pd.read_csv(URL)
df = df[['OPS_STATUS_CODE', 'LAUNCH_DATE', 'DECAY_DATE']]
df['DECAY_DATE'] = df['DECAY_DATE'].mask(df['DECAY_DATE'].isnull(), '9999')
df['LAUNCH_DATE'] = df['LAUNCH_DATE'].str[:4].astype(int)
df['DECAY_DATE'] = df['DECAY_DATE'].str[:4].astype(int)
years = df['LAUNCH_DATE'].unique()
inactive = list()
active = list()
for year in years:
active.append(len(df[
((df['OPS_STATUS_CODE'].isin(['+', 'P', 'B', 'S', 'X'])) & (df['LAUNCH_DATE'] <= year))
| ((df['DECAY_DATE'] > year) & (df['OPS_STATUS_CODE'] == 'D') & (df['LAUNCH_DATE'] <= year))
].index))
inactive.append(len(df[
((df['OPS_STATUS_CODE'] == 'D') & (df['DECAY_DATE'] <= year))
| ((df['OPS_STATUS_CODE'] == '-') & (df['LAUNCH_DATE'] <= year) )
].index))
###Output
_____no_output_____
###Markdown
Output Display plot of the number of satellites in space over time
###Code
fig = go.Figure(data=[
go.Bar(name='Inactive satellites', x=years, y=inactive),
go.Bar(name='Active satellites', x=years, y=active)
])
# Change the bar mode
fig.update_layout(
title="Number of satellites in space over time",
xaxis_title="Years",
yaxis_title="Number of satellites",
barmode='stack'
)
fig.show()
###Output
_____no_output_____
###Markdown
Source: http://www.celestrak.com/pub/satcat.csv Display the percentage of inactive VS active satellites from 1957 to now
###Code
labels = years
widths = [100/len(years) for year in years]
active_percentage = list()
inactive_percentage = list()
for index, _ in np.ndenumerate(active):
total = active[index[0]] + inactive[index[0]]
active_percentage.append(active[index[0]]/total*100)
inactive_percentage.append(inactive[index[0]]/total*100)
data = {
"Inactive": inactive_percentage,
"Active": active_percentage
}
fig = go.Figure()
for key in data:
fig.add_trace(go.Bar(
name=key,
y=data[key],
x=years,
offset=0
))
fig.update_xaxes(range=[years[0],years[-1]])
fig.update_yaxes(range=[0,100])
fig.update_layout(
title_text="Percentage of inactive VS active satellites from 1957 to now",
barmode="stack",
uniformtext=dict(mode="hide", minsize=10),
)
###Output
_____no_output_____
###Markdown
Celestrak - Satellites over time **Tags:** celestrak opendata satelltes We analyze the number of satellites in the space from the first launch to now, and the part of them which became inactive.These data come from http://www.celestrak.com/. It provides free data as CSV files easily accessible.The CSV file we got contains many data as the name of the satellite, its owner, launch site, id, apogee and many others.What interested us the most were the status code, the launch date and the decay date, in order to create a graph with years in X axis and number of satellites in Y axis. **Tags:** celestrak opendata satelltes Input Import librairies
###Code
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
###Output
_____no_output_____
###Markdown
Variables
###Code
URL = 'http://www.celestrak.com/pub/satcat.csv'
###Output
_____no_output_____
###Markdown
Model Recovery and processing of CSV data
###Code
df = pd.read_csv(URL)
df = df[['OPS_STATUS_CODE', 'LAUNCH_DATE', 'DECAY_DATE']]
df['DECAY_DATE'] = df['DECAY_DATE'].mask(df['DECAY_DATE'].isnull(), '9999')
df['LAUNCH_DATE'] = df['LAUNCH_DATE'].str[:4].astype(int)
df['DECAY_DATE'] = df['DECAY_DATE'].str[:4].astype(int)
years = df['LAUNCH_DATE'].unique()
inactive = list()
active = list()
for year in years:
active.append(len(df[
((df['OPS_STATUS_CODE'].isin(['+', 'P', 'B', 'S', 'X'])) & (df['LAUNCH_DATE'] <= year))
| ((df['DECAY_DATE'] > year) & (df['OPS_STATUS_CODE'] == 'D') & (df['LAUNCH_DATE'] <= year))
].index))
inactive.append(len(df[
((df['OPS_STATUS_CODE'] == 'D') & (df['DECAY_DATE'] <= year))
| ((df['OPS_STATUS_CODE'] == '-') & (df['LAUNCH_DATE'] <= year) )
].index))
###Output
_____no_output_____
###Markdown
Output Display plot of the number of satellites in space over time
###Code
fig = go.Figure(data=[
go.Bar(name='Inactive satellites', x=years, y=inactive),
go.Bar(name='Active satellites', x=years, y=active)
])
# Change the bar mode
fig.update_layout(
title="Number of satellites in space over time",
xaxis_title="Years",
yaxis_title="Number of satellites",
barmode='stack'
)
fig.show()
###Output
_____no_output_____
###Markdown
Source: http://www.celestrak.com/pub/satcat.csv Display the percentage of inactive VS active satellites from 1957 to now
###Code
labels = years
widths = [100/len(years) for year in years]
active_percentage = list()
inactive_percentage = list()
for index, _ in np.ndenumerate(active):
total = active[index[0]] + inactive[index[0]]
active_percentage.append(active[index[0]]/total*100)
inactive_percentage.append(inactive[index[0]]/total*100)
data = {
"Inactive": inactive_percentage,
"Active": active_percentage
}
fig = go.Figure()
for key in data:
fig.add_trace(go.Bar(
name=key,
y=data[key],
x=years,
offset=0
))
fig.update_xaxes(range=[years[0],years[-1]])
fig.update_yaxes(range=[0,100])
fig.update_layout(
title_text="Percentage of inactive VS active satellites from 1957 to now",
barmode="stack",
uniformtext=dict(mode="hide", minsize=10),
)
###Output
_____no_output_____
###Markdown
Celestrak - Satellites over time We analyze the number of satellites in the space from the first launch to now, and the part of them which became inactive.These data come from http://www.celestrak.com/. It provides free data as CSV files easily accessible.The CSV file we got contains many data as the name of the satellite, its owner, launch site, id, apogee and many others.What interested us the most were the status code, the launch date and the decay date, in order to create a graph with years in X axis and number of satellites in Y axis.
###Code
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
###Output
_____no_output_____
###Markdown
Input
###Code
url = 'http://www.celestrak.com/pub/satcat.csv'
df = pd.read_csv(url)
df.head()
###Output
_____no_output_____
###Markdown
Model
###Code
df = df[['OPS_STATUS_CODE', 'LAUNCH_DATE', 'DECAY_DATE']]
df['DECAY_DATE'] = df['DECAY_DATE'].mask(df['DECAY_DATE'].isnull(), '9999')
df['LAUNCH_DATE'] = df['LAUNCH_DATE'].str[:4].astype(int)
df['DECAY_DATE'] = df['DECAY_DATE'].str[:4].astype(int)
years = df['LAUNCH_DATE'].unique()
inactive = list()
active = list()
for year in years:
active.append(len(df[
((df['OPS_STATUS_CODE'].isin(['+', 'P', 'B', 'S', 'X'])) & (df['LAUNCH_DATE'] <= year))
| ((df['DECAY_DATE'] > year) & (df['OPS_STATUS_CODE'] == 'D') & (df['LAUNCH_DATE'] <= year))
].index))
inactive.append(len(df[
((df['OPS_STATUS_CODE'] == 'D') & (df['DECAY_DATE'] <= year))
| ((df['OPS_STATUS_CODE'] == '-') & (df['LAUNCH_DATE'] <= year) )
].index))
###Output
_____no_output_____
###Markdown
Output
###Code
fig = go.Figure(data=[
go.Bar(name='Inactive satellites', x=years, y=inactive),
go.Bar(name='Active satellites', x=years, y=active)
])
# Change the bar mode
fig.update_layout(
title="Number of satellites in space over time",
xaxis_title="Years",
yaxis_title="Number of satellites",
barmode='stack'
)
fig.show()
###Output
_____no_output_____
###Markdown
Source: http://www.celestrak.com/pub/satcat.csv
###Code
labels = years
widths = [100/len(years) for year in years]
active_percentage = list()
inactive_percentage = list()
for index, _ in np.ndenumerate(active):
total = active[index[0]] + inactive[index[0]]
active_percentage.append(active[index[0]]/total*100)
inactive_percentage.append(inactive[index[0]]/total*100)
data = {
"Inactive": inactive_percentage,
"Active": active_percentage
}
fig = go.Figure()
for key in data:
fig.add_trace(go.Bar(
name=key,
y=data[key],
x=years,
offset=0
))
fig.update_xaxes(range=[years[0],years[-1]])
fig.update_yaxes(range=[0,100])
fig.update_layout(
title_text="Perecntage of inactive VS active satellites from 1957 to now",
barmode="stack",
uniformtext=dict(mode="hide", minsize=10),
)
###Output
_____no_output_____
###Markdown
Celestrak - Satellites over time **Tags:** celestrak opendata satellites analytics plotly **Author:** [Dumorya](https://github.com/Dumorya) We analyze the number of satellites in the space from the first launch to now, and the part of them which became inactive.These data come from http://www.celestrak.com/. It provides free data as CSV files easily accessible.The CSV file we got contains many data as the name of the satellite, its owner, launch site, id, apogee and many others.What interested us the most were the status code, the launch date and the decay date, in order to create a graph with years in X axis and number of satellites in Y axis. Input Import librairies
###Code
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
###Output
_____no_output_____
###Markdown
Variables
###Code
URL = 'http://www.celestrak.com/pub/satcat.csv'
###Output
_____no_output_____
###Markdown
Model Recovery and processing of CSV data
###Code
df = pd.read_csv(URL)
df = df[['OPS_STATUS_CODE', 'LAUNCH_DATE', 'DECAY_DATE']]
df['DECAY_DATE'] = df['DECAY_DATE'].mask(df['DECAY_DATE'].isnull(), '9999')
df['LAUNCH_DATE'] = df['LAUNCH_DATE'].str[:4].astype(int)
df['DECAY_DATE'] = df['DECAY_DATE'].str[:4].astype(int)
years = df['LAUNCH_DATE'].unique()
inactive = list()
active = list()
for year in years:
active.append(len(df[
((df['OPS_STATUS_CODE'].isin(['+', 'P', 'B', 'S', 'X'])) & (df['LAUNCH_DATE'] <= year))
| ((df['DECAY_DATE'] > year) & (df['OPS_STATUS_CODE'] == 'D') & (df['LAUNCH_DATE'] <= year))
].index))
inactive.append(len(df[
((df['OPS_STATUS_CODE'] == 'D') & (df['DECAY_DATE'] <= year))
| ((df['OPS_STATUS_CODE'] == '-') & (df['LAUNCH_DATE'] <= year) )
].index))
###Output
_____no_output_____
###Markdown
Output Display plot of the number of satellites in space over time
###Code
fig = go.Figure(data=[
go.Bar(name='Inactive satellites', x=years, y=inactive),
go.Bar(name='Active satellites', x=years, y=active)
])
# Change the bar mode
fig.update_layout(
title="Number of satellites in space over time",
xaxis_title="Years",
yaxis_title="Number of satellites",
barmode='stack'
)
fig.show()
###Output
_____no_output_____
###Markdown
Source: http://www.celestrak.com/pub/satcat.csv Display the percentage of inactive VS active satellites from 1957 to now
###Code
labels = years
widths = [100/len(years) for year in years]
active_percentage = list()
inactive_percentage = list()
for index, _ in np.ndenumerate(active):
total = active[index[0]] + inactive[index[0]]
active_percentage.append(active[index[0]]/total*100)
inactive_percentage.append(inactive[index[0]]/total*100)
data = {
"Inactive": inactive_percentage,
"Active": active_percentage
}
fig = go.Figure()
for key in data:
fig.add_trace(go.Bar(
name=key,
y=data[key],
x=years,
offset=0
))
fig.update_xaxes(range=[years[0],years[-1]])
fig.update_yaxes(range=[0,100])
fig.update_layout(
title_text="Percentage of inactive VS active satellites from 1957 to now",
barmode="stack",
uniformtext=dict(mode="hide", minsize=10),
)
###Output
_____no_output_____
###Markdown
Celestrak - Satellites over time **Tags:** celestrak opendata satelltes We analyze the number of satellites in the space from the first launch to now, and the part of them which became inactive.These data come from http://www.celestrak.com/. It provides free data as CSV files easily accessible.The CSV file we got contains many data as the name of the satellite, its owner, launch site, id, apogee and many others.What interested us the most were the status code, the launch date and the decay date, in order to create a graph with years in X axis and number of satellites in Y axis. **Tags:** celestrak opendata satelltes Input Import librairies
###Code
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
###Output
_____no_output_____
###Markdown
Variables
###Code
URL = 'http://www.celestrak.com/pub/satcat.csv'
###Output
_____no_output_____
###Markdown
Model Recovery and processing of CSV data
###Code
df = pd.read_csv(URL)
df = df[['OPS_STATUS_CODE', 'LAUNCH_DATE', 'DECAY_DATE']]
df['DECAY_DATE'] = df['DECAY_DATE'].mask(df['DECAY_DATE'].isnull(), '9999')
df['LAUNCH_DATE'] = df['LAUNCH_DATE'].str[:4].astype(int)
df['DECAY_DATE'] = df['DECAY_DATE'].str[:4].astype(int)
years = df['LAUNCH_DATE'].unique()
inactive = list()
active = list()
for year in years:
active.append(len(df[
((df['OPS_STATUS_CODE'].isin(['+', 'P', 'B', 'S', 'X'])) & (df['LAUNCH_DATE'] <= year))
| ((df['DECAY_DATE'] > year) & (df['OPS_STATUS_CODE'] == 'D') & (df['LAUNCH_DATE'] <= year))
].index))
inactive.append(len(df[
((df['OPS_STATUS_CODE'] == 'D') & (df['DECAY_DATE'] <= year))
| ((df['OPS_STATUS_CODE'] == '-') & (df['LAUNCH_DATE'] <= year) )
].index))
###Output
_____no_output_____
###Markdown
Output Display plot of the number of satellites in space over time
###Code
fig = go.Figure(data=[
go.Bar(name='Inactive satellites', x=years, y=inactive),
go.Bar(name='Active satellites', x=years, y=active)
])
# Change the bar mode
fig.update_layout(
title="Number of satellites in space over time",
xaxis_title="Years",
yaxis_title="Number of satellites",
barmode='stack'
)
fig.show()
###Output
_____no_output_____
###Markdown
Source: http://www.celestrak.com/pub/satcat.csv Display the percentage of inactive VS active satellites from 1957 to now
###Code
labels = years
widths = [100/len(years) for year in years]
active_percentage = list()
inactive_percentage = list()
for index, _ in np.ndenumerate(active):
total = active[index[0]] + inactive[index[0]]
active_percentage.append(active[index[0]]/total*100)
inactive_percentage.append(inactive[index[0]]/total*100)
data = {
"Inactive": inactive_percentage,
"Active": active_percentage
}
fig = go.Figure()
for key in data:
fig.add_trace(go.Bar(
name=key,
y=data[key],
x=years,
offset=0
))
fig.update_xaxes(range=[years[0],years[-1]])
fig.update_yaxes(range=[0,100])
fig.update_layout(
title_text="Percentage of inactive VS active satellites from 1957 to now",
barmode="stack",
uniformtext=dict(mode="hide", minsize=10),
)
###Output
_____no_output_____
###Markdown
Celestrak - Satellites over time **Tags:** celestrak opendata satellites analytics plotly **Author:** [Dumorya](https://github.com/Dumorya) We analyze the number of satellites in the space from the first launch to now, and the part of them which became inactive.These data come from http://www.celestrak.com/. It provides free data as CSV files easily accessible.The CSV file we got contains many data as the name of the satellite, its owner, launch site, id, apogee and many others.What interested us the most were the status code, the launch date and the decay date, in order to create a graph with years in X axis and number of satellites in Y axis. Input Import librairies
###Code
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
###Output
_____no_output_____
###Markdown
Variables
###Code
URL = 'http://www.celestrak.com/pub/satcat.csv'
###Output
_____no_output_____
###Markdown
Model Recovery and processing of CSV data
###Code
df = pd.read_csv(URL)
df = df[['OPS_STATUS_CODE', 'LAUNCH_DATE', 'DECAY_DATE']]
df['DECAY_DATE'] = df['DECAY_DATE'].mask(df['DECAY_DATE'].isnull(), '9999')
df['LAUNCH_DATE'] = df['LAUNCH_DATE'].str[:4].astype(int)
df['DECAY_DATE'] = df['DECAY_DATE'].str[:4].astype(int)
years = df['LAUNCH_DATE'].unique()
inactive = list()
active = list()
for year in years:
active.append(len(df[
((df['OPS_STATUS_CODE'].isin(['+', 'P', 'B', 'S', 'X'])) & (df['LAUNCH_DATE'] <= year))
| ((df['DECAY_DATE'] > year) & (df['OPS_STATUS_CODE'] == 'D') & (df['LAUNCH_DATE'] <= year))
].index))
inactive.append(len(df[
((df['OPS_STATUS_CODE'] == 'D') & (df['DECAY_DATE'] <= year))
| ((df['OPS_STATUS_CODE'] == '-') & (df['LAUNCH_DATE'] <= year) )
].index))
###Output
_____no_output_____
###Markdown
Output Display plot of the number of satellites in space over time
###Code
fig = go.Figure(data=[
go.Bar(name='Inactive satellites', x=years, y=inactive),
go.Bar(name='Active satellites', x=years, y=active)
])
# Change the bar mode
fig.update_layout(
title="Number of satellites in space over time",
xaxis_title="Years",
yaxis_title="Number of satellites",
barmode='stack'
)
fig.show()
###Output
_____no_output_____
###Markdown
Source: http://www.celestrak.com/pub/satcat.csv Display the percentage of inactive VS active satellites from 1957 to now
###Code
labels = years
widths = [100/len(years) for year in years]
active_percentage = list()
inactive_percentage = list()
for index, _ in np.ndenumerate(active):
total = active[index[0]] + inactive[index[0]]
active_percentage.append(active[index[0]]/total*100)
inactive_percentage.append(inactive[index[0]]/total*100)
data = {
"Inactive": inactive_percentage,
"Active": active_percentage
}
fig = go.Figure()
for key in data:
fig.add_trace(go.Bar(
name=key,
y=data[key],
x=years,
offset=0
))
fig.update_xaxes(range=[years[0],years[-1]])
fig.update_yaxes(range=[0,100])
fig.update_layout(
title_text="Percentage of inactive VS active satellites from 1957 to now",
barmode="stack",
uniformtext=dict(mode="hide", minsize=10),
)
###Output
_____no_output_____
###Markdown
Celestrak - Satellites over time **Tags:** celestrak opendata satelltes **Author:** [Dumorya](https://github.com/Dumorya) We analyze the number of satellites in the space from the first launch to now, and the part of them which became inactive.These data come from http://www.celestrak.com/. It provides free data as CSV files easily accessible.The CSV file we got contains many data as the name of the satellite, its owner, launch site, id, apogee and many others.What interested us the most were the status code, the launch date and the decay date, in order to create a graph with years in X axis and number of satellites in Y axis. Input Import librairies
###Code
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
###Output
_____no_output_____
###Markdown
Variables
###Code
URL = 'http://www.celestrak.com/pub/satcat.csv'
###Output
_____no_output_____
###Markdown
Model Recovery and processing of CSV data
###Code
df = pd.read_csv(URL)
df = df[['OPS_STATUS_CODE', 'LAUNCH_DATE', 'DECAY_DATE']]
df['DECAY_DATE'] = df['DECAY_DATE'].mask(df['DECAY_DATE'].isnull(), '9999')
df['LAUNCH_DATE'] = df['LAUNCH_DATE'].str[:4].astype(int)
df['DECAY_DATE'] = df['DECAY_DATE'].str[:4].astype(int)
years = df['LAUNCH_DATE'].unique()
inactive = list()
active = list()
for year in years:
active.append(len(df[
((df['OPS_STATUS_CODE'].isin(['+', 'P', 'B', 'S', 'X'])) & (df['LAUNCH_DATE'] <= year))
| ((df['DECAY_DATE'] > year) & (df['OPS_STATUS_CODE'] == 'D') & (df['LAUNCH_DATE'] <= year))
].index))
inactive.append(len(df[
((df['OPS_STATUS_CODE'] == 'D') & (df['DECAY_DATE'] <= year))
| ((df['OPS_STATUS_CODE'] == '-') & (df['LAUNCH_DATE'] <= year) )
].index))
###Output
_____no_output_____
###Markdown
Output Display plot of the number of satellites in space over time
###Code
fig = go.Figure(data=[
go.Bar(name='Inactive satellites', x=years, y=inactive),
go.Bar(name='Active satellites', x=years, y=active)
])
# Change the bar mode
fig.update_layout(
title="Number of satellites in space over time",
xaxis_title="Years",
yaxis_title="Number of satellites",
barmode='stack'
)
fig.show()
###Output
_____no_output_____
###Markdown
Source: http://www.celestrak.com/pub/satcat.csv Display the percentage of inactive VS active satellites from 1957 to now
###Code
labels = years
widths = [100/len(years) for year in years]
active_percentage = list()
inactive_percentage = list()
for index, _ in np.ndenumerate(active):
total = active[index[0]] + inactive[index[0]]
active_percentage.append(active[index[0]]/total*100)
inactive_percentage.append(inactive[index[0]]/total*100)
data = {
"Inactive": inactive_percentage,
"Active": active_percentage
}
fig = go.Figure()
for key in data:
fig.add_trace(go.Bar(
name=key,
y=data[key],
x=years,
offset=0
))
fig.update_xaxes(range=[years[0],years[-1]])
fig.update_yaxes(range=[0,100])
fig.update_layout(
title_text="Percentage of inactive VS active satellites from 1957 to now",
barmode="stack",
uniformtext=dict(mode="hide", minsize=10),
)
###Output
_____no_output_____ |
.ipynb_checkpoints/recommendation-checkpoint.ipynb | ###Markdown
using KNN CLUSTERING
###Code
from sklearn.neighbors import NearestNeighbors
avg_movie_rating.head()
#only include movies with more than 10 ratings
movie_plus_10_ratings = avg_movie_rating.loc[avg_movie_rating['count']>=10]
print(len(movie_plus_10_ratings))
movie_plus_10_ratings
filtered_ratings = pd.merge(movie_plus_10_ratings, ratings, on="movieId")
len(filtered_ratings)
filtered_ratings.head()
#create a matrix table with movieIds on the rows and userIds in the columns.
#replace NAN values with 0
movie_wide = filtered_ratings.pivot(index = 'movieId', columns = 'userId', values = 'rating').fillna(0)
movie_wide.head()
#specify model parameters
model_knn = NearestNeighbors(metric='cosine',algorithm='brute')
#fit model to the data set
model_knn.fit(movie_wide)
#Gets the top 10 nearest neighbours got the movie
def print_similar_movies(query_index) :
#get the list of user ratings for a specific userId
query_index_movie_ratings = movie_wide.loc[query_index,:].values.reshape(1,-1)
#get the closest 10 movies and their distances from the movie specified
distances,indices = model_knn.kneighbors(query_index_movie_ratings,n_neighbors = 11)
#write a loop that prints the similar movies for a specified movie.
for i in range(0,len(distances.flatten())):
#get the title of the random movie that was chosen
get_movie = movie_list.loc[movie_list['movieId']==query_index]['title']
#for the first movie in the list i.e closest print the title
if i==0:
print('Recommendations for {0}:\n'.format(get_movie))
else :
#get the indiciees for the closest movies
indices_flat = indices.flatten()[i]
#get the title of the movie
get_movie = movie_list.loc[movie_list['movieId']==movie_wide.iloc[indices_flat,:].name]['title']
#print the movie
print('{0}: {1}, with distance of {2}:'.format(i,get_movie,distances.flatten()[i]))
print_similar_movies(112552)
print_similar_movies(1)
print_similar_movies(96079)
movies_with_genres.head()
#Getting the movies list with only genres like Musical and other such columns
movie_content_df_temp = movies_with_genres.copy()
movie_content_df_temp.set_index('movieId')
movie_content_df = movie_content_df_temp.drop(columns = ['movieId','title','genres'])
#movie_content_df = movie_content_df.as_matrix()
movie_content_df
# Import linear_kernel
from sklearn.metrics.pairwise import linear_kernel
# Compute the cosine similarity matrix
cosine_sim = linear_kernel(movie_content_df,movie_content_df)
###Output
_____no_output_____ |
model-selector.ipynb | ###Markdown
Reference* [vit_jax_augreg.ipynb](https://colab.research.google.com/github/google-research/vision_transformer/blob/master/vit_jax_augreg.ipynb)
###Code
import tensorflow as tf
import pandas as pd
# Load master table from Cloud.
with tf.io.gfile.GFile("gs://vit_models/augreg/index.csv") as f:
df = pd.read_csv(f)
df.columns
# How many different pre-training datasets?
df["ds"].value_counts()
###Output
_____no_output_____
###Markdown
Filter based on the following criteria:* Models should be pre-trained on ImageNet-21k and fine-tuned on ImageNet-1k.* The final ImageNet-1k validation accuracy should be at least 75%. * The transfer resolution should be 224 $\times$ 224.
###Code
i21k_i1k_models = df.query("ds=='i21k' & adapt_ds=='imagenet2012'")
models_ge_75 = i21k_i1k_models.query("adapt_final_test >= 0.75 & adapt_resolution==224")
models_ge_75.head()
models_ge_75["name"].value_counts()
###Output
_____no_output_____
###Markdown
Now, we first fetch the maximum accuracies with respect to a given model type and then we pick the underlying models.
###Code
best_scores_by_model_type = (
models_ge_75.groupby("name")["adapt_final_test"].max().values
)
results = models_ge_75["adapt_final_test"].apply(
lambda x: x in best_scores_by_model_type
)
models_ge_75[results].sort_values(by=["adapt_final_test"], ascending=False).head(10)
models_ge_75[results].sort_values(by=["adapt_final_test"], ascending=False).head(10)[
"adapt_filename"
].values.tolist()
###Output
_____no_output_____ |
notebooks/10.3.3-PytorchSentimentAnalysis.ipynb | ###Markdown
1 - Simple Sentiment AnalysisIn this series we'll be building a machine learning model to detect sentiment (i.e. detect if a sentence is positive or negative) using PyTorch and TorchText. This will be done on movie reviews, using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/).In this first notebook, we'll start very simple to understand the general concepts whilst not really caring about good results. Further notebooks will build on this knowledge and we'll actually get good results. IntroductionWe'll be using a **recurrent neural network** (RNN) as they are commonly used in analysing sequences. An RNN takes in sequence of words, $X=\{x_1, ..., x_T\}$, one at a time, and produces a _hidden state_, $h$, for each word. We use the RNN _recurrently_ by feeding in the current word $x_t$ as well as the hidden state from the previous word, $h_{t-1}$, to produce the next hidden state, $h_t$. $$h_t = \text{RNN}(x_t, h_{t-1})$$Once we have our final hidden state, $h_T$, (from feeding in the last word in the sequence, $x_T$) we feed it through a linear layer, $f$, (also known as a fully connected layer), to receive our predicted sentiment, $\hat{y} = f(h_T)$.Below shows an example sentence, with the RNN predicting zero, which indicates a negative sentiment. The RNN is shown in orange and the linear layer shown in silver. Note that we use the same RNN for every word, i.e. it has the same parameters. The initial hidden state, $h_0$, is a tensor initialized to all zeros. **Note:** some layers and steps have been omitted from the diagram, but these will be explained later. Preparing DataOne of the main concepts of TorchText is the `Field`. These define how your data should be processed. In our sentiment classification task the data consists of both the raw string of the review and the sentiment, either "pos" or "neg".The parameters of a `Field` specify how the data should be processed. We use the `TEXT` field to define how the review should be processed, and the `LABEL` field to process the sentiment. Our `TEXT` field has `tokenize='spacy'` as an argument. This defines that the "tokenization" (the act of splitting the string into discrete "tokens") should be done using the [spaCy](https://spacy.io) tokenizer. If no `tokenize` argument is passed, the default is simply splitting the string on spaces.`LABEL` is defined by a `LabelField`, a special subset of the `Field` class specifically used for handling labels. We will explain the `dtype` argument later.For more on `Fields`, go [here](https://github.com/pytorch/text/blob/master/torchtext/data/field.py).We also set the random seeds for reproducibility.
###Code
import torch
from torchtext import data
SEED = 1234
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize='spacy')
LABEL = data.LabelField(dtype=torch.float)
###Output
_____no_output_____
###Markdown
Another handy feature of TorchText is that it has support for common datasets used in natural language process (NLP). The following code automatically downloads the IMDb dataset and splits it into the canonical train/test splits as `torchtext.datasets` objects. It process the data using the `Fields` we have previously defined.
###Code
from torchtext import datasets
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
###Output
_____no_output_____
###Markdown
We can see how many examples are in each split by checking their length.
###Code
print(f'Number of training examples: {len(train_data)}')
print(f'Number of testing examples: {len(test_data)}')
###Output
Number of training examples: 25000
Number of testing examples: 25000
###Markdown
We can also check an example.
###Code
print(vars(train_data.examples[0]))
###Output
{'text': ['it', "'s", 'all', 'very', 'simple', '.', 'Jake', 'goes', 'to', 'prison', ',', 'and', 'spends', 'five', 'years', 'with', 'the', 'con', 'and', 'the', 'chess', 'masters', '.', 'they', 'get', 'compassionate', 'about', 'his', 'history', 'of', 'loss', 'and', 'failure', ',', 'and', 'utterly', 'misery', 'that', 'he', 'lives', 'on', 'because', 'of', 'his', 'belief', 'in', 'his', 'mastery', 'of', 'small', 'tricks', 'and', 'control', 'of', 'the', 'rules', 'of', 'small', 'crooks', '.', 'they', 'decide', 'to', 'give', 'Jake', 'the', 'ultimate', 'freedom', ':', 'from', 'his', 'innermost', 'fears', ',', 'from', 'what', 'he', 'believes', 'to', 'be', 'himself', '.', 'for', 'that', ',', 'they', 'take', 'him', 'on', 'a', 'trip', 'where', 'he', 'got', 'to', 'let', 'go', 'all', 'the', 'fear', ',', 'all', 'the', 'pride', ',', 'all', 'the', 'hope', '-', 'to', 'be', 'reborn', 'as', 'true', 'master', 'of', 'his', 'will.<br', '/><br', '/>it', "'s", 'a', 'clever', 'movie', 'about', 'the', 'journey', 'of', 'illumination', ',', 'about', 'the', 'infinite', 'gambles', 'and', 'games', 'that', 'we', 'do', 'with', 'and', 'within', 'ourselves', '.', '10/10', ',', 'no', 'doubt', '.'], 'label': 'pos'}
###Markdown
The IMDb dataset only has train/test splits, so we need to create a validation set. We can do this with the `.split()` method. By default this splits 70/30, however by passing a `split_ratio` argument, we can change the ratio of the split, i.e. a `split_ratio` of 0.8 would mean 80% of the examples make up the training set and 20% make up the validation set. We also pass our random seed to the `random_state` argument, ensuring that we get the same train/validation split each time.
###Code
import random
train_data, valid_data = train_data.split(random_state=random.seed(SEED))
###Output
_____no_output_____
###Markdown
Again, we'll view how many examples are in each split.
###Code
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
###Output
Number of training examples: 17500
Number of validation examples: 7500
Number of testing examples: 25000
###Markdown
Next, we have to build a _vocabulary_. This is a effectively a look up table where every unique word in your data set has a corresponding _index_ (an integer).We do this as our machine learning model cannot operate on strings, only numbers. Each _index_ is used to construct a _one-hot_ vector for each word. A one-hot vector is a vector where all of the elements are 0, except one, which is 1, and dimensionality is the total number of unique words in your vocabulary, commonly denoted by $V$.The number of unique words in our training set is over 100,000, which means that our one-hot vectors will have over 100,000 dimensions! This will make training slow and possibly won't fit onto your GPU (if you're using one). There are two ways effectively cut down our vocabulary, we can either only take the top $n$ most common words or ignore words that appear less than $m$ times. We'll do the former, only keeping the top 25,000 words.What do we do with words that appear in examples but we have cut from the vocabulary? We replace them with a special _unknown_ or `` token. For example, if the sentence was "This film is great and I love it" but the word "love" was not in the vocabulary, it would become "This film is great and I `` it".The following builds the vocabulary, only keeping the most common `max_size` tokens.
###Code
TEXT.build_vocab(train_data, max_size=25000)
LABEL.build_vocab(train_data)
###Output
_____no_output_____
###Markdown
Why do we only build the vocabulary on the training set? When testing any machine learning system you do not want to look at the test set in any way. We do not include the validation set as we want it to reflect the test set as much as possible.
###Code
print(f"Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}")
print(f"Unique tokens in LABEL vocabulary: {len(LABEL.vocab)}")
###Output
Unique tokens in TEXT vocabulary: 25002
Unique tokens in LABEL vocabulary: 2
###Markdown
Why is the vocab size 25002 and not 25000? One of the addition tokens is the `` token and the other is a `` token.When we feed sentences into our model, we feed a _batch_ of them at a time, i.e. more than one at a time, and all sentences in the batch need to be the same size. Thus, to ensure each sentence in the batch is the same size, any shorter than the longest within the batch are padded.We can also view the most common words in the vocabulary.
###Code
print(TEXT.vocab.freqs.most_common(20))
###Output
[('the', 203481), (',', 192055), ('.', 165314), ('and', 109621), ('a', 109392), ('of', 100810), ('to', 93354), ('is', 76292), ('in', 61329), ('I', 54220), ('it', 53449), ('that', 49077), ('"', 44294), ("'s", 43334), ('this', 42327), ('-', 36891), ('/><br', 35921), ('was', 35321), ('as', 30436), ('with', 30001)]
###Markdown
We can also see the vocabulary directly using either the `stoi` (**s**tring **to** **i**nt) or `itos` (**i**nt **to** **s**tring) method.
###Code
print(TEXT.vocab.itos[:10])
###Output
['<unk>', '<pad>', 'the', ',', '.', 'and', 'a', 'of', 'to', 'is']
###Markdown
We can also check the labels, ensuring 0 is for negative and 1 is for positive.
###Code
print(LABEL.vocab.stoi)
###Output
defaultdict(<function _default_unk_index at 0x7f799132c8c8>, {'neg': 0, 'pos': 1})
###Markdown
The final step of preparing the data is creating the iterators. We iterate over these in the training/evaluation loop, and they return a batch of examples (indexed and converted into tensors) at each iteration.We'll use a `BucketIterator` which is a special type of iterator that will return a batch of examples where each example is of a similar length, minimizing the amount of padding per example.We also want to place the tensors returned by the iterator on the GPU (if you're using one). PyTorch handles this using `torch.device`, we then pass this device to the iterator.
###Code
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device)
###Output
_____no_output_____
###Markdown
Build the ModelThe next stage is building the model that we'll eventually train and evaluate. There is a small amount of boilerplate code when creating models in PyTorch, note how our `RNN` class is a sub-class of `nn.Module` and the use of `super`.Within the `__init__` we define the _layers_ of the module. Our three layers are an _embedding_ layer, our RNN, and a _linear_ layer. All layers have their parameters initialized to random values, unless explicitly specified.The embedding layer is used to transform our sparse one-hot vector (sparse as most of the elements are 0) into a dense embedding vector (dense as the dimensionality is a lot smaller and all the elements are real numbers). This embedding layer is simply a single fully connected layer. As well as reducing the dimensionality of the input to the RNN, there is the theory that words which have similar impact on the sentiment of the review are mapped close together in this dense vector space. For more information about word embeddings, see [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/).The RNN layer is our RNN which takes in our dense vector and the previous hidden state $h_{t-1}$, which it uses to calculate the next hidden state, $h_t$.Finally, the linear layer takes the final hidden state and feeds it through a fully connected layer, $f(h_T)$, transforming it to the correct output dimension.The `forward` method is called when we feed examples into our model.Each batch, `x`, is a tensor of size _**[sentence length, batch size]**_. That is a batch of sentences, each having each word converted into a one-hot vector. You may notice that this tensor should have another dimension due to the one-hot vectors, however PyTorch conveniently stores a one-hot vector as it's index value, i.e. the tensor representing a sentence is just a tensor of the indexes for each token in that sentence.The input batch is then passed through the embedding layer to get `embedded`, which gives us a dense vector representation of our sentences. `embedded` is a tensor of size _**[sentence length, batch size, embedding dim]**_.`embedded` is then fed into the RNN. In some frameworks you must feed the initial hidden state, $h_0$, into the RNN, however in PyTorch, if no initial hidden state is passed as an argument it defaults to a tensor of all zeros.The RNN returns 2 tensors, `output` of size _**[sentence length, batch size, hidden dim]**_ and `hidden` of size _**[1, batch size, embedding dim]**_. `output` is the concatenation of the hidden state from every time step, whereas `hidden` is simply the final hidden state. We verify this using the `assert` statement. Note the `squeeze` method, which is used to remove a dimension of size 1. Finally, we feed the last hidden state, `hidden`, through the linear layer, `fc`, to produce a prediction.
###Code
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.rnn = nn.RNN(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
#x = [sent len, batch size]
embedded = self.embedding(x)
#embedded = [sent len, batch size, emb dim]
output, hidden = self.rnn(embedded)
#output = [sent len, batch size, hid dim]
#hidden = [1, batch size, hid dim]
assert torch.equal(output[-1,:,:], hidden.squeeze(0))
return self.fc(hidden.squeeze(0))
###Output
_____no_output_____
###Markdown
We now create an instance of our RNN class. The input dimension is the dimension of the one-hot vectors, which is equal to the vocabulary size. The embedding dimension is the size of the dense word vectors. This is usually around 50-250 dimensions, but depends on the size of the vocabulary.The hidden dimension is the size of the hidden states. This is usually around 100-500 dimensions, but also depends on factors such as on the vocabulary size, the size of the dense vectors and the complexity of the task.The output dimension is usually the number of classes, however in the case of only 2 classes the output value is between 0 and 1 and thus can be 1-dimensional, i.e. a single scalar real number.
###Code
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
###Output
_____no_output_____
###Markdown
Train the Model Now we'll set up the training and then train the model.First, we'll create an optimizer. This is the algorithm we use to update the parameters of the module. Here, we'll use _stochastic gradient descent_ (SGD). The first argument is the parameters will be updated by the optimizer, the second is the learning rate, i.e. how much we'll change the parameters by when we do a parameter update.
###Code
import torch.optim as optim
optimizer = optim.SGD(model.parameters(), lr=1e-3)
###Output
_____no_output_____
###Markdown
Next, we'll define our loss function. In PyTorch this is commonly called a criterion. The loss function here is _binary cross entropy with logits_. The prediction for each sentence is an unbound real number, as our labels are either 0 or 1, we want to restrict the number between 0 and 1, we do this using the _sigmoid_ or _logit_ functions. We then calculate this bound scalar using binary cross entropy. The `BCEWithLogitsLoss` criterion carries out both the sigmoid and the binary cross entropy steps.
###Code
criterion = nn.BCEWithLogitsLoss()
###Output
_____no_output_____
###Markdown
Using `.to`, we can place the model and the criterion on the GPU (if we have one).
###Code
model = model.to(device)
criterion = criterion.to(device)
###Output
_____no_output_____
###Markdown
Our criterion function calculates the loss, however we have to write our function to calculate the accuracy. This function first feeds the predictions through a sigmoid layer, squashing the values between 0 and 1, we then round them to the nearest integer. This rounds any value greater than 0.5 to 1 (a positive sentiment) and the rest to 0 (a negative sentiment).We then calculate how many rounded predictions equal the actual labels and average it across the batch.
###Code
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum()/len(correct)
return acc
###Output
_____no_output_____
###Markdown
The `train` function iterates over all examples, one batch at a time. `model.train()` is used to put the model in "training mode", which turns on _dropout_ and _batch normalization_. Although we aren't using them in this model, it's good practice to include it.For each batch, we first zero the gradients. Each parameter in a model has a `grad` attribute which stores the gradient calculated by the `criterion`. PyTorch does not automatically remove (or "zero") the gradients calculated from the last gradient calculation, so they must be manually zeroed.We then feed the batch of sentences, `batch.text`, into the model. Note, you do not need to do `model.forward(batch.text)`, simply calling the model works. The `squeeze` is needed as the predictions are initially size _**[batch size, 1]**_, and we need to remove the dimension of size 1 as PyTorch expects the predictions input to a loss function to simply be of size _**[batch size]**_.The loss and accuracy are then calculated using our predictions and the labels, `batch.label`. We calculate the gradient of each parameter with `loss.backward()`, and then update the parameters using the gradients and optimizer algorithm with `optimizer.step()`.The loss and accuracy is accumulated across the epoch, the `.item()` method is used to extract a scalar from a tensor which only contains a single value.Finally, we return the loss and accuracy, averaged across the epoch. The `len` of an iterator is the number of batches in the iterator.You may recall when initializing the `LABEL` field, we set `dtype=torch.float`. This is because TorchText sets tensors to be `LongTensor`s by default, however our criterion expects both inputs to be `FloatTensor`s. As we have manually set the `dtype` to be `torch.float`, this is automatically done for us. The alternative method of doing this would be to do the conversion inside the `train` function by passing `batch.label.float()` instad of `batch.label` to the criterion.
###Code
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
###Output
_____no_output_____
###Markdown
`evaluate` is similar to `train`, with a few modifications as you don't want to update the parameters when evaluating.`model.eval()` puts the model in "evaluation mode", this turns off _dropout_ and _batch normalization_. Again, we are not using them in this model, but it is good practice to include them.No gradients are calculated on PyTorch operations inside the `with no_grad()` block. This causes less memory to be used and speeds up computation.The rest of the function is the same as `train`, with the removal of `optimizer.zero_grad()`, `loss.backward()` and `optimizer.step()`, as we do not update the model's parameters when evaluating.
###Code
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
###Output
_____no_output_____
###Markdown
We then train the model through multiple epochs, an epoch being a complete pass through all examples in the split.
###Code
N_EPOCHS = 5
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
print(f'| Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}% |')
###Output
| Epoch: 01 | Train Loss: 0.694 | Train Acc: 50.30% | Val. Loss: 0.696 | Val. Acc: 50.26% |
| Epoch: 02 | Train Loss: 0.693 | Train Acc: 49.91% | Val. Loss: 0.696 | Val. Acc: 49.92% |
| Epoch: 03 | Train Loss: 0.693 | Train Acc: 50.05% | Val. Loss: 0.697 | Val. Acc: 50.55% |
| Epoch: 04 | Train Loss: 0.693 | Train Acc: 49.72% | Val. Loss: 0.696 | Val. Acc: 50.22% |
| Epoch: 05 | Train Loss: 0.693 | Train Acc: 50.16% | Val. Loss: 0.697 | Val. Acc: 50.68% |
###Markdown
You may have noticed the loss is not really decreasing and the accuracy is poor. This is due to several issues with the model which we'll improve in the next notebook.Finally, the metric we actually care about, the test loss and accuracy.
###Code
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% |')
###Output
| Test Loss: 0.712 | Test Acc: 47.17% |
###Markdown
1 - Simple Sentiment AnalysisIn this series we'll be building a machine learning model to detect sentiment (i.e. detect if a sentence is positive or negative) using PyTorch and TorchText. This will be done on movie reviews, using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/).In this first notebook, we'll start very simple to understand the general concepts whilst not really caring about good results. Further notebooks will build on this knowledge and we'll actually get good results. IntroductionWe'll be using a **recurrent neural network** (RNN) as they are commonly used in analysing sequences. An RNN takes in sequence of words, $X=\{x_1, ..., x_T\}$, one at a time, and produces a _hidden state_, $h$, for each word. We use the RNN _recurrently_ by feeding in the current word $x_t$ as well as the hidden state from the previous word, $h_{t-1}$, to produce the next hidden state, $h_t$. $$h_t = \text{RNN}(x_t, h_{t-1})$$Once we have our final hidden state, $h_T$, (from feeding in the last word in the sequence, $x_T$) we feed it through a linear layer, $f$, (also known as a fully connected layer), to receive our predicted sentiment, $\hat{y} = f(h_T)$.Below shows an example sentence, with the RNN predicting zero, which indicates a negative sentiment. The RNN is shown in orange and the linear layer shown in silver. Note that we use the same RNN for every word, i.e. it has the same parameters. The initial hidden state, $h_0$, is a tensor initialized to all zeros. **Note:** some layers and steps have been omitted from the diagram, but these will be explained later. Preparing DataOne of the main concepts of TorchText is the `Field`. These define how your data should be processed. In our sentiment classification task the data consists of both the raw string of the review and the sentiment, either "pos" or "neg".The parameters of a `Field` specify how the data should be processed. We use the `TEXT` field to define how the review should be processed, and the `LABEL` field to process the sentiment. Our `TEXT` field has `tokenize='spacy'` as an argument. This defines that the "tokenization" (the act of splitting the string into discrete "tokens") should be done using the [spaCy](https://spacy.io) tokenizer. If no `tokenize` argument is passed, the default is simply splitting the string on spaces.`LABEL` is defined by a `LabelField`, a special subset of the `Field` class specifically used for handling labels. We will explain the `dtype` argument later.For more on `Fields`, go [here](https://github.com/pytorch/text/blob/master/torchtext/data/field.py).We also set the random seeds for reproducibility.
###Code
import sys
# !{sys.executable} -m pip install spacy
!{sys.executable} -m spacy download en
import torch
from torchtext import data
SEED = 1234
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize='spacy')
LABEL = data.LabelField(dtype=torch.float)
###Output
_____no_output_____
###Markdown
Another handy feature of TorchText is that it has support for common datasets used in natural language process (NLP). The following code automatically downloads the IMDb dataset and splits it into the canonical train/test splits as `torchtext.datasets` objects. It process the data using the `Fields` we have previously defined.
###Code
from torchtext import datasets
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
###Output
_____no_output_____
###Markdown
We can see how many examples are in each split by checking their length.
###Code
print(f'Number of training examples: {len(train_data)}')
print(f'Number of testing examples: {len(test_data)}')
###Output
_____no_output_____
###Markdown
We can also check an example.
###Code
print(vars(train_data.examples[0]))
###Output
_____no_output_____
###Markdown
The IMDb dataset only has train/test splits, so we need to create a validation set. We can do this with the `.split()` method. By default this splits 70/30, however by passing a `split_ratio` argument, we can change the ratio of the split, i.e. a `split_ratio` of 0.8 would mean 80% of the examples make up the training set and 20% make up the validation set. We also pass our random seed to the `random_state` argument, ensuring that we get the same train/validation split each time.
###Code
import random
train_data, valid_data = train_data.split(random_state=random.seed(SEED))
###Output
_____no_output_____
###Markdown
Again, we'll view how many examples are in each split.
###Code
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
###Output
Number of training examples: 17500
Number of validation examples: 7500
Number of testing examples: 25000
###Markdown
Next, we have to build a _vocabulary_. This is a effectively a look up table where every unique word in your data set has a corresponding _index_ (an integer).We do this as our machine learning model cannot operate on strings, only numbers. Each _index_ is used to construct a _one-hot_ vector for each word. A one-hot vector is a vector where all of the elements are 0, except one, which is 1, and dimensionality is the total number of unique words in your vocabulary, commonly denoted by $V$.The number of unique words in our training set is over 100,000, which means that our one-hot vectors will have over 100,000 dimensions! This will make training slow and possibly won't fit onto your GPU (if you're using one). There are two ways effectively cut down our vocabulary, we can either only take the top $n$ most common words or ignore words that appear less than $m$ times. We'll do the former, only keeping the top 25,000 words.What do we do with words that appear in examples but we have cut from the vocabulary? We replace them with a special _unknown_ or `` token. For example, if the sentence was "This film is great and I love it" but the word "love" was not in the vocabulary, it would become "This film is great and I `` it".The following builds the vocabulary, only keeping the most common `max_size` tokens.
###Code
TEXT.build_vocab(train_data, max_size=25000)
LABEL.build_vocab(train_data)
###Output
_____no_output_____
###Markdown
Why do we only build the vocabulary on the training set? When testing any machine learning system you do not want to look at the test set in any way. We do not include the validation set as we want it to reflect the test set as much as possible.
###Code
print(f"Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}")
print(f"Unique tokens in LABEL vocabulary: {len(LABEL.vocab)}")
###Output
Unique tokens in TEXT vocabulary: 25002
Unique tokens in LABEL vocabulary: 2
###Markdown
Why is the vocab size 25002 and not 25000? One of the addition tokens is the `` token and the other is a `` token.When we feed sentences into our model, we feed a _batch_ of them at a time, i.e. more than one at a time, and all sentences in the batch need to be the same size. Thus, to ensure each sentence in the batch is the same size, any shorter than the longest within the batch are padded.We can also view the most common words in the vocabulary.
###Code
print(TEXT.vocab.freqs.most_common(20))
###Output
[('the', 203481), (',', 192055), ('.', 165314), ('and', 109621), ('a', 109392), ('of', 100810), ('to', 93354), ('is', 76292), ('in', 61329), ('I', 54220), ('it', 53449), ('that', 49077), ('"', 44294), ("'s", 43334), ('this', 42327), ('-', 36891), ('/><br', 35921), ('was', 35321), ('as', 30436), ('with', 30001)]
###Markdown
We can also see the vocabulary directly using either the `stoi` (**s**tring **to** **i**nt) or `itos` (**i**nt **to** **s**tring) method.
###Code
print(TEXT.vocab.itos[:10])
###Output
['<unk>', '<pad>', 'the', ',', '.', 'and', 'a', 'of', 'to', 'is']
###Markdown
We can also check the labels, ensuring 0 is for negative and 1 is for positive.
###Code
print(LABEL.vocab.stoi)
###Output
defaultdict(<function _default_unk_index at 0x7f799132c8c8>, {'neg': 0, 'pos': 1})
###Markdown
The final step of preparing the data is creating the iterators. We iterate over these in the training/evaluation loop, and they return a batch of examples (indexed and converted into tensors) at each iteration.We'll use a `BucketIterator` which is a special type of iterator that will return a batch of examples where each example is of a similar length, minimizing the amount of padding per example.We also want to place the tensors returned by the iterator on the GPU (if you're using one). PyTorch handles this using `torch.device`, we then pass this device to the iterator.
###Code
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device)
###Output
_____no_output_____
###Markdown
Build the ModelThe next stage is building the model that we'll eventually train and evaluate. There is a small amount of boilerplate code when creating models in PyTorch, note how our `RNN` class is a sub-class of `nn.Module` and the use of `super`.Within the `__init__` we define the _layers_ of the module. Our three layers are an _embedding_ layer, our RNN, and a _linear_ layer. All layers have their parameters initialized to random values, unless explicitly specified.The embedding layer is used to transform our sparse one-hot vector (sparse as most of the elements are 0) into a dense embedding vector (dense as the dimensionality is a lot smaller and all the elements are real numbers). This embedding layer is simply a single fully connected layer. As well as reducing the dimensionality of the input to the RNN, there is the theory that words which have similar impact on the sentiment of the review are mapped close together in this dense vector space. For more information about word embeddings, see [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/).The RNN layer is our RNN which takes in our dense vector and the previous hidden state $h_{t-1}$, which it uses to calculate the next hidden state, $h_t$.Finally, the linear layer takes the final hidden state and feeds it through a fully connected layer, $f(h_T)$, transforming it to the correct output dimension.The `forward` method is called when we feed examples into our model.Each batch, `x`, is a tensor of size _**[sentence length, batch size]**_. That is a batch of sentences, each having each word converted into a one-hot vector. You may notice that this tensor should have another dimension due to the one-hot vectors, however PyTorch conveniently stores a one-hot vector as it's index value, i.e. the tensor representing a sentence is just a tensor of the indexes for each token in that sentence.The input batch is then passed through the embedding layer to get `embedded`, which gives us a dense vector representation of our sentences. `embedded` is a tensor of size _**[sentence length, batch size, embedding dim]**_.`embedded` is then fed into the RNN. In some frameworks you must feed the initial hidden state, $h_0$, into the RNN, however in PyTorch, if no initial hidden state is passed as an argument it defaults to a tensor of all zeros.The RNN returns 2 tensors, `output` of size _**[sentence length, batch size, hidden dim]**_ and `hidden` of size _**[1, batch size, embedding dim]**_. `output` is the concatenation of the hidden state from every time step, whereas `hidden` is simply the final hidden state. We verify this using the `assert` statement. Note the `squeeze` method, which is used to remove a dimension of size 1. Finally, we feed the last hidden state, `hidden`, through the linear layer, `fc`, to produce a prediction.
###Code
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.rnn = nn.RNN(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
#x = [sent len, batch size]
embedded = self.embedding(x)
#embedded = [sent len, batch size, emb dim]
output, hidden = self.rnn(embedded)
#output = [sent len, batch size, hid dim]
#hidden = [1, batch size, hid dim]
assert torch.equal(output[-1,:,:], hidden.squeeze(0))
return self.fc(hidden.squeeze(0))
###Output
_____no_output_____
###Markdown
We now create an instance of our RNN class. The input dimension is the dimension of the one-hot vectors, which is equal to the vocabulary size. The embedding dimension is the size of the dense word vectors. This is usually around 50-250 dimensions, but depends on the size of the vocabulary.The hidden dimension is the size of the hidden states. This is usually around 100-500 dimensions, but also depends on factors such as on the vocabulary size, the size of the dense vectors and the complexity of the task.The output dimension is usually the number of classes, however in the case of only 2 classes the output value is between 0 and 1 and thus can be 1-dimensional, i.e. a single scalar real number.
###Code
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
###Output
_____no_output_____
###Markdown
Train the Model Now we'll set up the training and then train the model.First, we'll create an optimizer. This is the algorithm we use to update the parameters of the module. Here, we'll use _stochastic gradient descent_ (SGD). The first argument is the parameters will be updated by the optimizer, the second is the learning rate, i.e. how much we'll change the parameters by when we do a parameter update.
###Code
import torch.optim as optim
optimizer = optim.SGD(model.parameters(), lr=1e-3)
###Output
_____no_output_____
###Markdown
Next, we'll define our loss function. In PyTorch this is commonly called a criterion. The loss function here is _binary cross entropy with logits_. The prediction for each sentence is an unbound real number, as our labels are either 0 or 1, we want to restrict the number between 0 and 1, we do this using the _sigmoid_ or _logit_ functions. We then calculate this bound scalar using binary cross entropy. The `BCEWithLogitsLoss` criterion carries out both the sigmoid and the binary cross entropy steps.
###Code
criterion = nn.BCEWithLogitsLoss()
###Output
_____no_output_____
###Markdown
Using `.to`, we can place the model and the criterion on the GPU (if we have one).
###Code
model = model.to(device)
criterion = criterion.to(device)
###Output
_____no_output_____
###Markdown
Our criterion function calculates the loss, however we have to write our function to calculate the accuracy. This function first feeds the predictions through a sigmoid layer, squashing the values between 0 and 1, we then round them to the nearest integer. This rounds any value greater than 0.5 to 1 (a positive sentiment) and the rest to 0 (a negative sentiment).We then calculate how many rounded predictions equal the actual labels and average it across the batch.
###Code
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum()/len(correct)
return acc
###Output
_____no_output_____
###Markdown
The `train` function iterates over all examples, one batch at a time. `model.train()` is used to put the model in "training mode", which turns on _dropout_ and _batch normalization_. Although we aren't using them in this model, it's good practice to include it.For each batch, we first zero the gradients. Each parameter in a model has a `grad` attribute which stores the gradient calculated by the `criterion`. PyTorch does not automatically remove (or "zero") the gradients calculated from the last gradient calculation, so they must be manually zeroed.We then feed the batch of sentences, `batch.text`, into the model. Note, you do not need to do `model.forward(batch.text)`, simply calling the model works. The `squeeze` is needed as the predictions are initially size _**[batch size, 1]**_, and we need to remove the dimension of size 1 as PyTorch expects the predictions input to a loss function to simply be of size _**[batch size]**_.The loss and accuracy are then calculated using our predictions and the labels, `batch.label`. We calculate the gradient of each parameter with `loss.backward()`, and then update the parameters using the gradients and optimizer algorithm with `optimizer.step()`.The loss and accuracy is accumulated across the epoch, the `.item()` method is used to extract a scalar from a tensor which only contains a single value.Finally, we return the loss and accuracy, averaged across the epoch. The `len` of an iterator is the number of batches in the iterator.You may recall when initializing the `LABEL` field, we set `dtype=torch.float`. This is because TorchText sets tensors to be `LongTensor`s by default, however our criterion expects both inputs to be `FloatTensor`s. As we have manually set the `dtype` to be `torch.float`, this is automatically done for us. The alternative method of doing this would be to do the conversion inside the `train` function by passing `batch.label.float()` instad of `batch.label` to the criterion.
###Code
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
###Output
_____no_output_____
###Markdown
`evaluate` is similar to `train`, with a few modifications as you don't want to update the parameters when evaluating.`model.eval()` puts the model in "evaluation mode", this turns off _dropout_ and _batch normalization_. Again, we are not using them in this model, but it is good practice to include them.No gradients are calculated on PyTorch operations inside the `with no_grad()` block. This causes less memory to be used and speeds up computation.The rest of the function is the same as `train`, with the removal of `optimizer.zero_grad()`, `loss.backward()` and `optimizer.step()`, as we do not update the model's parameters when evaluating.
###Code
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
###Output
_____no_output_____
###Markdown
We then train the model through multiple epochs, an epoch being a complete pass through all examples in the split.
###Code
N_EPOCHS = 5
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
print(f'| Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}% |')
###Output
| Epoch: 01 | Train Loss: 0.694 | Train Acc: 50.30% | Val. Loss: 0.696 | Val. Acc: 50.26% |
| Epoch: 02 | Train Loss: 0.693 | Train Acc: 49.91% | Val. Loss: 0.696 | Val. Acc: 49.92% |
| Epoch: 03 | Train Loss: 0.693 | Train Acc: 50.05% | Val. Loss: 0.697 | Val. Acc: 50.55% |
| Epoch: 04 | Train Loss: 0.693 | Train Acc: 49.72% | Val. Loss: 0.696 | Val. Acc: 50.22% |
| Epoch: 05 | Train Loss: 0.693 | Train Acc: 50.16% | Val. Loss: 0.697 | Val. Acc: 50.68% |
###Markdown
You may have noticed the loss is not really decreasing and the accuracy is poor. This is due to several issues with the model which we'll improve in the next notebook.Finally, the metric we actually care about, the test loss and accuracy.
###Code
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% |')
###Output
| Test Loss: 0.712 | Test Acc: 47.17% |
|
notebooks/Coase.ipynb | ###Markdown
Coase, Property rights and the 'Coase Theorem'Coase, R. H. 1960. “The Problem of Social Cost.” *The Journal of Law and Economics* 3:1–44.Coase, Ronald H. 1937. “The Nature of the Firm.” *Economica* 4 (16):386–405. **Note:** this jupyter notebook mixes text, math, visualizations and python code. To keep things uncluttered most of the code was placed in a [code section](codesection) at the end. If you are running on a jupyter server and want to recreate or modify content or run interactive widgets, navigate to the code section for instructions first.**Slideshow mode**: this notebook can be viewed as a slideshow by pressing Alt-R if run on a server. Coase (1960) A rancher and wheat farmer.Both are utilizing adjacent plots of land. There is no fence separating the lands. **The Wheat Farmer**The wheat farm choose a production method the delivers a maximum profit of $\Pi_W =8$. - to keep this simple suppose this is the farmer's only production choice. **The Rancher**Chooses herd size $x$ to maximize profits: $$\Pi_C(x) = P \cdot F(x) - c \cdot x$$where $P$ is cattle price and $c$ is the cost of feeding each animal. To simplify we allow decimal levels (but conclusions would hardly change if restrictedto integers). First-order necessary condition for herd size $x^*$ to max profits:$$P \cdot F'(x^*) = c$$ **Example:* $P_c=4$, $F(x) = \sqrt{x}$ and $c=1$The FOC are $\frac{4}{2\sqrt x*} = 1$ And the rancher's privately optimal herd size:$$x^* = 4$$ **The external cost**No effective barrier exists between the fields so cattle sometimes strays into the wheat farmer's fields, trampling crops and reducing wheat farmer's profits.Specifically, if rancher keeps a herd size $x$ net profits in wheat are reduced to :$$\Pi_W(x) = \Pi_W - d \cdot x^2$$ **The external cost**Suppose $d=\frac{1}{2}$At rancher's private optimum herd size of $x*=4$ the farmer's profit is reduced from 8 to zero:$$\begin{align}\Pi_W(x) &= \Pi_W - d \cdot x^2 \\ & = 8 - \frac{1}{2} \cdot 4^2 = 0 \end{align}$$
###Code
Pc = 4
Pw = 8
c = 1/2
d = 1/2
CE, TE = copt(),topt()
CE, TE
###Output
_____no_output_____
###Markdown
If the rancher chose his private optimum he'd earn \$8 but drive the farmer's earnings to zero.
###Code
coaseplot1()
###Output
_____no_output_____
###Markdown
Private and social marginal benefits and costs can be plotted to see deadweight loss (DWL) differently:
###Code
coaseplot2()
###Output
_____no_output_____
###Markdown
The assignment of property rights (liability) **Scenario 1:** Farmer has right to enjoin cattle herding (prohibit via an injunction).Rancher now earns \$0. Farmer \$8. This is not efficient either. If rancher herded just 2 would earn \$6. Could offer \$2 compensation to the wheat farmer and capture \$6-2 =$4....or they could bargain to divide the gains to trade of \$4 in other ways. **Scenario 2:** Rancher has right to graze with impunity.Farmer earns \$0 if rancher herds private optimal of 4 cattle. Farmer could offer to pay \$2 to have rancher reduce herd to 2 which would leave rancher as well off and take the farmer from \$0 to \$4 (= 6-2). ...or they could bargain to divide the gains to trade of \$4 in other ways. With zero transactions costs- **The initial assignment of property rights does not matter:** The parties bargain to an efficient outcome either way. - However, like any scarce resource, legal rights are valuable, so **the initial allocation will affect the distribution of benefits and incomes between parties*- **The emergence of property rights**: Even if there is no initial assignment of property rights, with zero transactions costs it should be in the interests of the parties to negotiate to an efficient outcome. With positive transactions costs - The initial distribution of property rights typically will matter. - It's not so clear from this example but suppose that we had a situation with one wheat farmer and many ranchers. It might be difficult to get the ranchers Coase and the development of a land marketSuppose there is an open field. In the absence of a land market whoever gets to the land first (possibly the more powerful in the the village) will prepare/clear land until the marginal value product of the last unit of land is equal to the clearing cost. We contrast two situations:(1) Open frontier: where land is still abundant(2) Land Scarcity.There will be a misallocation in (2) shown by DWL in the diagram... but also an incentive for the parties to bargain to a more efficient outcome. A well functionining land market would also deliver that outcome. Abundant land environment$\bar T$ units of land and $N$=2 households.Land clearing cost $c$. Frontier land not yet exhausted.Maximize profits at $P \cdot F_T(T) = c$
###Code
landmarket(P=5, cl = 3, title = 'Open Frontier')
landmarket(P=8, cl = 3, title = 'Land Scarcity')
###Output
_____no_output_____
###Markdown
The 'Coase Theorem'Costless bargaining between the parties will lead to an efficient outcome regardless of which party is awarded the rights? Coase Theorem: True, False or Tautology?Tautology?: "if there are no costs to fixing things, then things will be fixed."Like the First Welfare Theorem (complete competitive markets will lead to efficient allocations, regardless of initial allocation of property rights). The Coase Theorem makes legal entitlements tradable More useful reading of Coase result When transactions costs the initial allocation of property rights will matter for the efficiency of the outcome. Further notes on Coase (incomplete) Coase can be seen as generalizing the neo-classical propositions about the exchange of goods (i.e. 1st Welfare Theorem) to the exchange of legal entitlements (Cooter, 1990). "The initial allocation of legal enttilements dos not matter foram an efficiency perspetive so long as they can be freely exchanged...'Suggests insuring efficiency of law is matter or removing impediments to free exchange of legal entitlements...... define entitlements clearly and enforce private contracts for their exchagne...But conditions needed for efficient resource allocation...Nice discussion [here](https://afinetheorem.wordpress.com/2013/09/03/on-coases-two-famous-theorems/): Tautology: "Costless bargaining is efficient tautologically; if I assume people can agree on socially efficient bargains, then of course they will""The fact that side payments can be agreed upon is true even when there are no property rights at all."" In the absence of property rights, a bargain *establishes* a contract between parties with novel rights that needn’t exist ex-ante.""The interesting case is when transaction costs make bargaining difficult. What you should take from Coase is that social efficiency can be enhanced by institutions (including the firm!) which allow socially efficient bargains to be reached by removing restrictive transaction costs, and particularly that the assignment of property rights to different parties can either help or hinder those institutions." Transactions cost: time and effort to carry out a transaction.. any resources needed to negotiate and enforce contracts...Coase: initial allocation of legal entitlements does not matter from an efficiency perspective so long as transaction costs of exchange are nil...Like frictionless plane in Phyisics... a logical construction rather than something encountered in real life..Legal procedure to 'lubricate' exchange rather than allocate legal entitlement efficiently in the first place...As with ordinary goods the gains from legal The Political Coase Theorem Acemoglu, Daron. 2003. “Why Not a Political Coase Theorem? Social Conflict, Commitment, and Politics.” Journal of Comparative Economics 31 (4):620–652. Incomplete contracts - Hard to think of all contingencies- Hard to negotiate all contingencies- Hard to write contracts to cover all contingenciesIncomplete contracts- silent about parties' obligations in some states or state these only coarsely or ambiguosly - Incomplete contracts will be revised and renegotiated as future unfolds...This implies - ex-post costs (my fail to reach agreement..). - ex-ante costs Relationship-specific investments.. Party may be reluctant to invest because fears expropriation by the other party at recontracting stage.. - hold up: after tenant has made investment it is sunk, landlord may hike rent to match higher value of property (entirely due to tenant investment)... Expecting this tenant may not invest.. "Ownership – or power – is distributed among the parties to maximise their investment incentives. Hart and Moore show that complementarities between the assets and the parties have important implications. If the assets are so complementary that they are productive only when used together, they should have a single owner. Separating such complementary assets does not give power to anybody, while when the assets have a single owner, the owner has power and improved incentives." Code Section**Note:** To re-create or modify any content go to the 'Cell' menu above run all code cells below by choosing 'Run All Below'. Then 'Run all Above' to recreate all output above (or go to the top and step through each code cell manually).
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
%matplotlib inline
###Output
_____no_output_____
###Markdown
Default parameter values:
###Code
Pc = 4
Pw = 8
c = 1/2
d = 1/2
def F(x,P=Pc,c=c):
'''Cattle Profit'''
return P*x - c*x**2
def AG(x, P=Pw):
'''Wheat farm profit before crop damage'''
return P*(x**0) # to return an array of len(x)
def AGD(x,P=Pw,d=d):
return AG(x,P) - d*x**2
def copt(P=Pc,c=c):
'''rancher private optimum'''
return P/(2*c)
def topt(P=Pc,c=c, d=d):
'''Social effient optimum'''
return P/(2*(c+d))
CE, TE = copt(),topt()
CE, TE
xx = np.linspace(0,6,100)
def coaseplot1():
fig = plt.subplots(figsize=(12,8))
plt.plot(xx, F(xx), label = 'Rancher Profit' )
plt.plot(xx, AG(xx), '--', label = 'Farmer w/ no cattle' )
plt.plot(xx, AGD(xx), label = 'Farmer w/ cattle')
plt.plot(xx, F(xx) + AGD(xx),label='Sum of both activities')
plt.scatter(copt(),F(copt()))
plt.scatter(topt(),F(topt()) + AGD(topt()))
plt.grid()
plt.ylim(0,14)
plt.xlabel('x -- head of cattle', fontsize=18)
plt.ylabel('Benefits/Profit', fontsize=18)
plt.legend(fontsize=14);
coaseplot1()
###Output
_____no_output_____
###Markdown
Let's plot a standard 'external cost' diagram
###Code
def MC(x,c=1/2):
'''Cattle MC'''
return 2*c*x
def excost(x,d=1/2):
return 2*d*x
def coaseplot2(Pw=Pw, Pc=Pc):
fig = plt.subplots(figsize=(12,8))
plt.axhline(Pc);
plt.plot(xx, MC(xx), label = 'Rancher PMC' )
plt.plot(xx, MC(xx)+excost(xx), label = 'SMC')
plt.fill_between(xx, MC(xx)+excost(xx),Pc*xx**0, where=((MC(xx)<=Pc*xx**0) & (xx>2)),
facecolor='green', alpha=0.2, label='DWL')
plt.text(3,5,'DWL' )
plt.text(5,3.5,r'$SMB = P_C$')
plt.text(5,5.5, r'$PMC$')
plt.text(5,10.5, r'$SMC$')
#plt.scatter(topt(),G(topt()) + AGD(topt()))
plt.grid()
plt.ylim(0,13)
plt.xlabel('x -- head of cattle')
plt.ylabel('Benefits/Profit')
plt.legend();
###Output
_____no_output_____
###Markdown
Code for land example
###Code
A=1
def F(T, A=A):
return A*np.sqrt(T)
def MVPT(P,T,A=A):
return A*P/T**(1/2)
def LD(P,r,A=A):
return (P*A/r)**2
A=1
Tbar = 10 # Total land endowment
P = 5.5 # Price of output
cl = 3 # cost of clearing land
###Output
_____no_output_____
###Markdown
Land demand for each farmer is given by $P\cdot F_T(T_i) = r$. So for this production $P \frac{1}{\sqrt T_i} = r$ or $P \frac{1}{\sqrt T_i} = cl$ so we can write$$T^*_i(r) = (P/r)^2$$If there is an open frontier the sum or demands falls short of total land supply and the marginal cost of land is the cost of clearing $r=c_l$. 'Land scarcity' results when there is an equilibrium price of land and$r>c_l$ where $r$ is found from $$\sum T^*_i(r) = \bar T$$
###Code
def req(P,cl, Tb=Tbar, N=2, A=A):
'''equilibrium rental rate'''
def landemand(r):
return N*(A*P/r)**2 - Tb
return fsolve(landemand, 1)[0]
P, cl, req(P,cl)
LD(P, req(P,cl))*2, Tbar
def mopt(P,cl,A=A):
'''Optimum land use for each i at the P*MPT = max(cl,r)'''
r = req(P,cl)
ru = max(cl, r)
return (A*P/ru)**2
###Output
_____no_output_____
###Markdown
Farmer A will demand
###Code
mopt(P,cl), MVPT(P, mopt(P,cl) )
#plt.style.use('bmh')
def landmarket(P, cl, title, A=A):
t = np.linspace(0.1,Tbar-0.1, 2*Tbar)
fig = plt.subplots(figsize=(12,8))
x0 = mopt(P,cl,A=A)
plt.ylim(0,5)
#plt.axhline(cl,linestyle=':')
plt.axhline(max(cl,req(P,cl,A=A)),linestyle='--')
plt.axhline(cl,linestyle=':')
plt.plot(t,MVPT(P,t))
plt.text(8, MVPT(P,8),r'$P \cdot F_T(T)$', fontsize=18)
plt.text(1, MVPT(P,Tbar-1),r'$P \cdot F_T(\bar T - T)$', fontsize=18)
plt.xlabel('T -- land use', fontsize=18)
plt.ylabel('MVPT', fontsize=18)
plt.scatter(x0, MVPT(P,x0))
plt.scatter(Tbar-mopt(P,cl),MVPT(P,x0))
plt.plot([x0,x0],[0,MVPT(P,x0)],':')
plt.plot([Tbar-x0,Tbar-x0],[0,MVPT(P,x0)],':')
plt.plot(t,MVPT(P,Tbar - t))
plt.plot(t,MVPT(P,Tbar-t))
plt.title(title)
plt.xlim(0,Tbar);
landmarket(P=5.5, cl = 3, title = 'Open Frontier')
landmarket(P=8, cl = 3, title = 'Land Scarcity')
###Output
_____no_output_____
###Markdown
Notes on Coase, Property rights and the 'Coase Theorem'Coase, R. H. 1960. “The Problem of Social Cost.” *The Journal of Law and Economics* 3:1–44.Coase, Ronald H. 1937. “The Nature of the Firm.” *Economica* 4 (16):386–405. **Slideshow mode**: this notebook can be viewed as a slideshow by pressing Alt-R if run on a server. Coase (1960) The Problem of Social Cost A rancher and wheat farmer.Both are utilizing adjacent plots of land. No fence separates the lands. **The Wheat Farmer:** chooses a production method that delivers a maximum profit of $\Pi_W =8$. - to keep this simple suppose this is the farmer's only production choice. **The Rancher:** chooses herd size $x$ to maximize profits $\Pi_C(x) = P \cdot F(x) - c \cdot x^2$- $P$ is cattle price and $c$ is the cost of feeding each animal. - The herd size $x^*$ that maximizes profits given by:$$P \cdot F'(x^*) = c$$ **Example:** If $F(x) = x$, $c=\frac{1}{2}$. The FOC are $x^{*} = P_c$ With $P_c=4$ and $c=\frac{1}{2}$, the rancher's privately optimal herd size of $x^* = 4$ Missing Property Rights impose external costsWith no effective barrier separating the fields cattle sometimes strays into the wheat farmer's fields, damaging crops and reducing wheat farmer's profits.Assume that if the rancher keeps a herd size $x$ net profits in wheat are reduced from $\Pi_W$ to:$$\Pi_W(x) = \Pi_W - d \cdot x^2$$ **The external cost**Suppose $d=\frac{1}{2}$At the rancher's private optimum herd size of $x^*=4$, the farmer's profit is reduced from 8 to zero:$$\begin{align}\Pi_W(x) &= \Pi_W - d \cdot x^2 \\ & = 8 - \frac{1}{2} \cdot 4^2 = 0 \end{align}$$
###Code
%load_ext autoreload
%autoreload 2
from coase import *
###Output
_____no_output_____
###Markdown
At private optimum Rancher earns \$8 but imposes external costs that drive the farmer's earnings to zero.
###Code
coaseplot1()
###Output
_____no_output_____
###Markdown
Private and social marginal benefits and costs can be plotted to see deadweight loss (DWL) differently:
###Code
coaseplot2()
###Output
_____no_output_____
###Markdown
The assignment of property rights (liability) **Scenario 1:** Farmer is given the right to enjoin (i.e. limit or prohibit) cattle herding.If the farmer enforces a prohibition on all cattle herding:- Rancher now earns \$0. - Farmer earns \$8. - But this is not efficient! Total output is smaller than it could be. - If transactions costs are low the two parties can bargain to a more efficient outcome. **Scenario 1:** Farmer is given the right to enjoin (i.e. limit or prohibit) cattle herding.Rancher reasons that if she were permitted to herd 2 cattle she'd earn $\$6$ while imposing \$2 in damage. - She could offer $\$2$ in full compensation for damage, pocketing remaining \$4 - or they could bargain over how to divide the gains to trade of \$4 in other ways. **Scenario 2:** Rancher is granted right to graze with impunity.Farmer reasons that if herd size could be reduced from 4 to 2- farm profits would rise from $\$0$ to $\$6$- rancher's profits would fall from $\$8$ to $\$6$ - So farmer could offer to fully compensate rancher for $\$2$ loss and pocket remaining $\$4$- or they could bargain to divide those gains to trade of $\$4$ in other ways. Who causes the externality?- The rancher, because his cows trample the crops?- The farmer, for placing his field too close to the rancher?- Ronald Coase point is that there is no clear answer to this question. - Hence Pigouvian tax/subsidy 'solutions' are not obvious. Should we tax the rancher, or subsidize them to keep their herd size down? - 'Externality' problem is due to the non-assignment of property rights. The 'Coase Theorem' With zero/low transactions costs- **The initial assignment of property rights does not matter for efficiency:** The parties traded to an efficient solution no matter who first got the rights. - **The 'emergence' of property rights**: Even with no initial third-party assignment of property rights, it should be in the interests of the parties to create such rights and negotiate/trade to an efficient outcome. - **The initial allocation does matter for the distribution of benefits between parties.** Legally tradable entitlements are valuable, generate income to those who can then sell. Coase Theorem: True, False or Tautology?> "Costless bargaining is efficient tautologically; if I assume people can agree on socially efficient bargains, then of course they will... In the absence of property rights, a bargain *establishes* a contract between parties with novel rights that needn’t exist ex-ante."Cooter (1990)In the Farmer and Rancher example there was a missing market for legal entitlements. Once the market is made complete (by an assumed third party) then the First Welfare Theorem applies: complete competitive markets will lead to efficient allocations, regardless of initial allocation of property rights. The "Coase Theorem" makes legal entitlements tradable. In this view insuring efficiency is matter or removing impediments to free exchange of legal entitlements. However, >"The interesting case is when transaction costs make bargaining difficult. What you should take from Coase is that social efficiency can be enhanced by institutions (including the firm!) which allow socially efficient bargains to be reached by removing restrictive transaction costs, and particularly that the assignment of property rights to different parties can either help or hinder those institutions."Good further discussions from [D. Mcloskey](http://www.deirdremccloskey.com/docs/pdf/Article_306.pdf) and [here](https://afinetheorem.wordpress.com/2013/09/03/on-coases-two-famous-theorems/): With positive transactions costs initial rights allocations matter for efficiency- 'Coase Theorem' (Stigler) interpretation sweeps under the rug the complicated political question of who gets initial rights. - Parties may engage in costly conflict, expend real resources to try to establish control over initial allocation of rights. - The [Myerson Satterthaite theorem](https://en.wikipedia.org/wiki/Myerson%E2%80%93Satterthwaite_theorem) establishes that when parties are asymmetrically informed about each other's valuations (here of damages or benefits) then efficient exchange may become difficult/impossible. Each party tries to extract rents, hold-up the other. - Suppose we had many farmers and ranchers. It might be costly/difficult to bring all relevant ranchers and farmers together and to agree on bargain terms. - Coase himself thought transactions costs mattered and hence initial allocation mechanisms had to be thought through carefully (e.g. spectrum auctions). A Coasian view of the development of a land marketSuppose there is an open field. In the absence of a land market whoever gets to the land first (possibly the more powerful in the the village) will prepare/clear land until the marginal value product of the last unit of land is equal to the clearing cost. We contrast two situations:(1) Open frontier: where land is still abundant(2) Land Scarcity.There will be a misallocation in (2) shown by DWL in the diagram... but also an incentive for the parties to bargain to a more efficient outcome. A well functionining land market would also deliver that outcome. Abundant land environment$\bar T$ units of land and $N$=2 households.Land clearing cost $c$. Frontier land not yet exhausted.Maximize profits at $P \cdot F_T(T) = c$ Land demand for each farmer is given by $P\cdot F_T(T_i) = r$. So for this production $P \frac{1}{\sqrt T_i} = r$ or $P \frac{1}{\sqrt T_i} = cl$ so we can write$$T^*_i(r) = (P/r)^2$$If there is an open frontier the sum or demands falls short of total land supply and the marginal cost of land is the cost of clearing $r=c_l$. 'Land scarcity' results on the other hand when there is an equilibrium price of land $r>c_l$ where $r$ is found from $$\sum T^*_i(r) = \bar T$$Now land rent $r-c$ can be charged on the right to access and use land. Trade in these legal entitlements can raise output and efficiency. But there may be conflict and a 'scramble' to establish those rights of first access. 'Customary' land rights- Suppose norm is that all in the village can use as much land as they can farm- Higher status individuals get allocation first- As long as land is abundant everyone gets the land they want- No "land rent" -- cannot charge rent above $c$ since villagers are free to clear at cost $c$
###Code
landmarket(P=5, cl = 3, title = 'Open Frontier')
###Output
_____no_output_____
###Markdown
The closing of the frontier- Rising population or improving price or technology increases demand for land.- Suppose price at which product can be sold increases - demand for land increases.- Suppose total demandat clearing cost $c$ exceedsavailable land supply. - High-status individuals (who have first-access) leave less land available than is needed to satisfy remaining villagers demand.- Inefficient allocation of land - marginal products of land not equalized across households. - output would increase if we establish a market for trading land
###Code
landmarket(P=8, cl = 3, title = 'Land Scarcity')
###Output
_____no_output_____
###Markdown
Coase and Property> Coase, R. H. 1960. “The Problem of Social Cost.” *The Journal of Law and Economics* 3:1–44.> Coase, Ronald H. 1937. “The Nature of the Firm.” *Economica* 4 (16):386–405. **Slideshow mode**: this notebook can be viewed as a slideshow by pressing Alt-R if run on a server. Coase (1960) The Problem of Social Cost A rancher and wheat farmer.Both are utilizing adjacent plots of land. No fence separates the lands. **The Wheat Farmer:** chooses a production method that delivers a maximum profit of $\Pi_W =8$. - to keep this simple suppose this is the farmer's only production choice. **The Rancher:** chooses herd size $x$ to maximize profits $\Pi_C(x) = P \cdot F(x) - c \cdot x^2$- $P$ is cattle price and $c$ is the cost of feeding each animal. - The herd size $x^*$ that maximizes profits given by:$$P \cdot F'(x^*) = c$$ **Example:** If $F(x) = x$, $c=\frac{1}{2}$. The FOC are $x^{*} = P_c$ With $P_c=4$ and $c=\frac{1}{2}$, the rancher's privately optimal herd size of $x^* = 4$ Missing Property Rights impose external costsWith no effective barrier separating the fields cattle sometimes strays into the wheat farmer's fields, damaging crops and reducing wheat farmer's profits.Assume that if the rancher keeps a herd size $x$ net profits in wheat are reduced from $\Pi_W$ to:$$\Pi_W(x) = \Pi_W - d \cdot x^2$$ **The external cost**Suppose $d=\frac{1}{2}$At the rancher's private optimum herd size of $x^*=4$, the farmer's profit is reduced from 8 to zero:$$\begin{align}\Pi_W(x) &= \Pi_W - d \cdot x^2 \\ & = 8 - \frac{1}{2} \cdot 4^2 = 0 \end{align}$$
###Code
from coase import *
from ipywidgets import interact, fixed
###Output
_____no_output_____
###Markdown
At private optimum Rancher earns \$8 but imposes external costs that drive the farmer's earnings to zero.
###Code
coaseplot1()
###Output
_____no_output_____
###Markdown
Private and social marginal benefits and costs can be plotted to see deadweight loss (DWL) differently:
###Code
coaseplot2()
###Output
_____no_output_____
###Markdown
The assignment of property rights (liability) **Scenario 1:** Farmer is given the right to enjoin (i.e. limit or prohibit) cattle herding.If the farmer enforces a prohibition on all cattle herding:- Rancher now earns \$0. - Farmer earns \$8. - But this is not efficient! Total output is smaller than it could be. - If transactions costs are low the two parties can bargain to a more efficient outcome. **Scenario 1:** Farmer is given the right to enjoin (i.e. limit or prohibit) cattle herding.Rancher reasons that if she were permitted to herd 2 cattle she'd earn $\$6$ while imposing \$2 in damage. - She could offer $\$2$ in full compensation for damage, pocketing remaining \$4 - or they could bargain over how to divide the gains to trade of \$4 in other ways. **Scenario 2:** Rancher is granted right to graze with impunity.Farmer reasons that if herd size could be reduced from 4 to 2- farm profits would rise from $\$0$ to $\$6$- rancher's profits would fall from $\$8$ to $\$6$ - So farmer could offer to fully compensate rancher for $\$2$ loss and pocket remaining $\$4$- or they could bargain to divide those gains to trade of $\$4$ in other ways. Who causes the externality?- The rancher, because his cows trample the crops?- The farmer, for placing his field too close to the rancher?- Ronald Coase point is that there is no clear answer to this question. - Hence Pigouvian tax/subsidy 'solutions' are not obvious. Should we tax the rancher, or subsidize them to keep their herd size down? - 'Externality' problem is due to the non-assignment of property rights. The 'Coase Theorem' With zero/low transactions costs- **The initial assignment of property rights does not matter for efficiency:** The parties traded to an efficient solution no matter who first got the rights. - **The 'emergence' of property rights**: Even with no initial third-party assignment of property rights, it should be in the interests of the parties to create such rights and negotiate/trade to an efficient outcome. - **The initial allocation does matter for the distribution of benefits between parties.** Legally tradable entitlements are valuable, generate income to those who can then sell. Coase Theorem: True, False or Tautology?> "Costless bargaining is efficient tautologically; if I assume people can agree on socially efficient bargains, then of course they will... In the absence of property rights, a bargain *establishes* a contract between parties with novel rights that needn’t exist ex-ante."Cooter (1990)In the Farmer and Rancher example there was a missing market for legal entitlements. Once the market is made complete (by an assumed third party) then the First Welfare Theorem applies: complete competitive markets will lead to efficient allocations, regardless of initial allocation of property rights. The "Coase Theorem" makes legal entitlements tradable. In this view insuring efficiency is matter or removing impediments to free exchange of legal entitlements. However, >"The interesting case is when transaction costs make bargaining difficult. What you should take from Coase is that social efficiency can be enhanced by institutions (including the firm!) which allow socially efficient bargains to be reached by removing restrictive transaction costs, and particularly that the assignment of property rights to different parties can either help or hinder those institutions."Good further discussions from [D. Mcloskey](http://www.deirdremccloskey.com/docs/pdf/Article_306.pdf) and [here](https://afinetheorem.wordpress.com/2013/09/03/on-coases-two-famous-theorems/): When initial rights allocations matters for efficiency- 'Coase Theorem' (Stigler) interpretation sweeps under the rug the complicated political question of who gets initial rights. - Parties may engage in costly conflict, expend real resources to try to establish control over initial allocation of rights. - The [Myerson Satterthaite theorem](https://en.wikipedia.org/wiki/Myerson%E2%80%93Satterthwaite_theorem) establishes that when parties are asymmetrically informed about each other's valuations (e.g. here about the value of damages or benefits) then efficient exchange may become difficult/impossible. Each party may try to extract rents by trying to "hold-up" the other. - Suppose we had many farmers and ranchers. It might be costly/difficult to bring all relevant ranchers and farmers together and to agree on bargain terms. - Coase himself thought transactions costs mattered and hence initial allocation mechanisms had to be thought through carefully (e.g. spectrum auctions). A Coasian view of land market developmentSuppose there is an open field. In the absence of a land market whoever gets to the land first (possibly the more powerful in the the village) will prepare/clear land until the marginal value product of the last unit of land is equal to the clearing cost. We contrast two situations:(1) Open frontier: where land is still abundant(2) Land Scarcity.There will be a misallocation in (2) shown by DWL in the diagram... but also an incentive for the parties to bargain to a more efficient outcome. A well functionining land market would also deliver that outcome. Abundant land environment$\bar T$ units of land and $N$=2 households.Land clearing cost $c$. Frontier land not yet exhausted.Maximize profits at $P \cdot F_T(T) = c$ Land demand for each farmer is given by $P\cdot F_T(T_i) = r$. So for this production $P \frac{1}{\sqrt T_i} = r$ or $P \frac{1}{\sqrt T_i} = cl$ so we can write$$T^*_i(r) = (P/r)^2$$If there is an open frontier the sum or demands falls short of total land supply and the marginal cost of land is the cost of clearing $r=c_l$. 'Land scarcity' results on the other hand when there is an equilibrium price of land $r>c_l$ where $r$ is found from $$\sum T^*_i(r) = \bar T$$Now land rent $r-c$ can be charged on the right to access and use land. Trade in these legal entitlements can raise output and efficiency. But there may be conflict and a 'scramble' to establish those rights of first access. 'Customary' land rights- Suppose norm is that all in the village can use as much land as they can farm- Higher status individuals get allocation first- As long as land is abundant everyone gets the land they want- No "land rent" -- cannot charge rent above $c$ since villagers are free to clear at cost $c$
###Code
landmarket(P=5, cl = 3, title = 'Open Frontier')
###Output
_____no_output_____
###Markdown
The closing of the frontier- Rising population or improving price or technology increases demand for land.- Suppose price at which product can be sold increases - demand for land increases.- Suppose total demandat clearing cost $c$ exceedsavailable land supply. - High-status individuals (who have first-access) leave less land available than is needed to satisfy remaining villagers demand.- Inefficient allocation of land - marginal products of land not equalized across households. - output would increase if we establish a market for trading land
###Code
landmarket(P=8, cl = 3, title = 'Land Scarcity')
###Output
_____no_output_____
###Markdown
We can solve for the equilibrium rental rate $r$ given environmental paramters including the price $P$, land endowment $\bar T$, population size $N$ and technology parameters $A) To do:(things to still do in this notebook) - indicate DWL on landmarket diagrams - create widget to see how diagram shifts with changing parameters
###Code
interact(landmarket, P=(4,10,0.2), cl = (0,5,0.5),
title = fixed('Land'), A=fixed(1));
###Output
_____no_output_____
###Markdown
Coase, Property rights and the 'Coase Theorem'Coase, R. H. 1960. “The Problem of Social Cost.” *The Journal of Law and Economics* 3:1–44.Coase, Ronald H. 1937. “The Nature of the Firm.” *Economica* 4 (16):386–405. **Note:** Most of the code to generate visuals and results below is in a [code section](codesection) at the end of this notebook. To run interactive widgets or modify content, navigate to the code section for instructions.**Slideshow mode**: this notebook can be viewed as a slideshow by pressing Alt-R if run on a server. Coase (1960) A rancher and wheat farmer.Both are utilizing adjacent plots of land. There is no fence separating the lands. **The Wheat Farmer**The wheat farm choose a production method the delivers a maximum profit of $\Pi_W =8$. - to keep this simple suppose this is the farmer's only production choice. **The Rancher**Chooses herd size $x$ to maximize profits: $$\Pi_C(x) = P \cdot F(x) - c \cdot x$$where $P$ is cattle price and $c$ is the cost of feeding each animal. To simplify we allow decimal levels (but conclusions would hardly change if restrictedto integers). First-order necessary condition for herd size $x^*$ to max profits:$$P \cdot F'(x^*) = c$$ **Example:* $P_c=4$, $F(x) = \sqrt{x}$ and $c=1$The FOC are $\frac{4}{2\sqrt x*} = 1$ And the rancher's privately optimal herd size:$$x^* = 4$$ **The external cost**No effective barrier exists between the fields so cattle sometimes strays into the wheat farmer's fields, trampling crops and reducing wheat farmer's profits.Specifically, if rancher keeps a herd size $x$ net profits in wheat are reduced to :$$\Pi_W(x) = \Pi_W - d \cdot x^2$$ **The external cost**Suppose $d=\frac{1}{2}$At rancher's private optimum herd size of $x*=4$ the farmer's profit is reduced from 8 to zero:$$\begin{align}\Pi_W(x) &= \Pi_W - d \cdot x^2 \\ & = 8 - \frac{1}{2} \cdot 4^2 = 0 \end{align}$$
###Code
Pc = 4
Pw = 8
c = 1/2
d = 1/2
CE, TE = copt(),topt()
CE, TE
###Output
_____no_output_____
###Markdown
If the rancher chose his private optimum he'd earn \$8 but drive the farmer's earnings to zero.
###Code
coaseplot1()
###Output
_____no_output_____
###Markdown
Private and social marginal benefits and costs can be plotted to see deadweight loss (DWL) differently:
###Code
coaseplot2()
###Output
_____no_output_____
###Markdown
The assignment of property rights (liability) **Scenario 1:** Farmer has right to enjoin cattle herding (prohibit via an injunction).Rancher now earns \$0. Farmer \$8. This is not efficient either. If rancher herded just 2 would earn \$6. Could offer \$2 compensation to the wheat farmer and capture \$6-2 =$4....or they could bargain to divide the gains to trade of \$4 in other ways. **Scenario 2:** Rancher has right to graze with impunity.Farmer earns \$0 if rancher herds private optimal of 4 cattle. Farmer could offer to pay \$2 to have rancher reduce herd to 2 which would leave rancher as well off and take the farmer from \$0 to \$4 (= 6-2). ...or they could bargain to divide the gains to trade of \$4 in other ways. With zero transactions costs- **The initial assignment of property rights does not matter:** The parties bargain to an efficient outcome either way. - However, like any scarce resource, legal rights are valuable, so **the initial allocation will affect the distribution of benefits and incomes between parties*- **The emergence of property rights**: Even if there is no initial assignment of property rights, with zero transactions costs it should be in the interests of the parties to negotiate to an efficient outcome. With positive transactions costs - The initial distribution of property rights typically will matter. - It's not so clear from this example but suppose that we had a situation with one wheat farmer and many ranchers. It might be difficult to get the ranchers Coase and the development of a land marketSuppose there is an open field. In the absence of a land market whoever gets to the land first (possibly the more powerful in the the village) will prepare/clear land until the marginal value product of the last unit of land is equal to the clearing cost. We contrast two situations:(1) Open frontier: where land is still abundant(2) Land Scarcity.There will be a misallocation in (2) shown by DWL in the diagram... but also an incentive for the parties to bargain to a more efficient outcome. A well functionining land market would also deliver that outcome. Abundant land environment$\bar T$ units of land and $N$=2 households.Land clearing cost $c$. Frontier land not yet exhausted.Maximize profits at $P \cdot F_T(T) = c$
###Code
landmarket(P=5, cl = 3, title = 'Open Frontier')
landmarket(P=8, cl = 3, title = 'Land Scarcity')
###Output
_____no_output_____
###Markdown
The 'Coase Theorem'Costless bargaining between the parties will lead to an efficient outcome regardless of which party is awarded the rights? Coase Theorem: True, False or Tautology?Tautology?: "if there are no costs to fixing things, then things will be fixed."Like the First Welfare Theorem (complete competitive markets will lead to efficient allocations, regardless of initial allocation of property rights). The Coase Theorem makes legal entitlements tradable More useful reading of Coase result When transactions costs the initial allocation of property rights will matter for the efficiency of the outcome. Further notes on Coase (incomplete) Coase can be seen as generalizing the neo-classical propositions about the exchange of goods (i.e. 1st Welfare Theorem) to the exchange of legal entitlements (Cooter, 1990). "The initial allocation of legal enttilements dos not matter foram an efficiency perspetive so long as they can be freely exchanged...'Suggests insuring efficiency of law is matter or removing impediments to free exchange of legal entitlements...... define entitlements clearly and enforce private contracts for their exchagne...But conditions needed for efficient resource allocation...Nice discussion [here](https://afinetheorem.wordpress.com/2013/09/03/on-coases-two-famous-theorems/): Tautology: "Costless bargaining is efficient tautologically; if I assume people can agree on socially efficient bargains, then of course they will""The fact that side payments can be agreed upon is true even when there are no property rights at all."" In the absence of property rights, a bargain *establishes* a contract between parties with novel rights that needn’t exist ex-ante.""The interesting case is when transaction costs make bargaining difficult. What you should take from Coase is that social efficiency can be enhanced by institutions (including the firm!) which allow socially efficient bargains to be reached by removing restrictive transaction costs, and particularly that the assignment of property rights to different parties can either help or hinder those institutions." Transactions cost: time and effort to carry out a transaction.. any resources needed to negotiate and enforce contracts...Coase: initial allocation of legal entitlements does not matter from an efficiency perspective so long as transaction costs of exchange are nil...Like frictionless plane in Phyisics... a logical construction rather than something encountered in real life..Legal procedure to 'lubricate' exchange rather than allocate legal entitlement efficiently in the first place...As with ordinary goods the gains from legal The Political Coase Theorem Acemoglu, Daron. 2003. “Why Not a Political Coase Theorem? Social Conflict, Commitment, and Politics.” Journal of Comparative Economics 31 (4):620–652. Incomplete contracts - Hard to think of all contingencies- Hard to negotiate all contingencies- Hard to write contracts to cover all contingenciesIncomplete contracts- silent about parties' obligations in some states or state these only coarsely or ambiguosly - Incomplete contracts will be revised and renegotiated as future unfolds...This implies - ex-post costs (my fail to reach agreement..). - ex-ante costs Relationship-specific investments.. Party may be reluctant to invest because fears expropriation by the other party at recontracting stage.. - hold up: after tenant has made investment it is sunk, landlord may hike rent to match higher value of property (entirely due to tenant investment)... Expecting this tenant may not invest.. "Ownership – or power – is distributed among the parties to maximise their investment incentives. Hart and Moore show that complementarities between the assets and the parties have important implications. If the assets are so complementary that they are productive only when used together, they should have a single owner. Separating such complementary assets does not give power to anybody, while when the assets have a single owner, the owner has power and improved incentives." Code Section**Note:** To re-create or modify any content go to the 'Cell' menu above run all code cells below by choosing 'Run All Below'. Then 'Run all Above' to recreate all output above (or go to the top and step through each code cell manually).
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
%matplotlib inline
###Output
_____no_output_____
###Markdown
Default parameter values:
###Code
Pc = 4
Pw = 8
c = 1/2
d = 1/2
def F(x,P=Pc,c=c):
'''Cattle Profit'''
return P*x - c*x**2
def AG(x, P=Pw):
'''Wheat farm profit before crop damage'''
return P*(x**0) # to return an array of len(x)
def AGD(x,P=Pw,d=d):
return AG(x,P) - d*x**2
def copt(P=Pc,c=c):
'''rancher private optimum'''
return P/(2*c)
def topt(P=Pc,c=c, d=d):
'''Social effient optimum'''
return P/(2*(c+d))
CE, TE = copt(),topt()
CE, TE
xx = np.linspace(0,6,100)
def coaseplot1():
fig = plt.subplots(figsize=(12,8))
plt.plot(xx, F(xx), label = 'Rancher Profit' )
plt.plot(xx, AG(xx), '--', label = 'Farmer w/ no cattle' )
plt.plot(xx, AGD(xx), label = 'Farmer w/ cattle')
plt.plot(xx, F(xx) + AGD(xx),label='Sum of both activities')
plt.scatter(copt(),F(copt()))
plt.scatter(topt(),F(topt()) + AGD(topt()))
plt.grid()
plt.ylim(0,14)
plt.xlabel('x -- head of cattle', fontsize=18)
plt.ylabel('Benefits/Profit', fontsize=18)
plt.legend(fontsize=14);
coaseplot1()
###Output
_____no_output_____
###Markdown
Let's plot a standard 'external cost' diagram
###Code
def MC(x,c=1/2):
'''Cattle MC'''
return 2*c*x
def excost(x,d=1/2):
return 2*d*x
def coaseplot2(Pw=Pw, Pc=Pc):
fig = plt.subplots(figsize=(12,8))
plt.axhline(Pc);
plt.plot(xx, MC(xx), label = 'Rancher PMC' )
plt.plot(xx, MC(xx)+excost(xx), label = 'SMC')
plt.fill_between(xx, MC(xx)+excost(xx),Pc*xx**0, where=((MC(xx)<=Pc*xx**0) & (xx>2)),
facecolor='green', alpha=0.2, label='DWL')
plt.text(3,5,'DWL' )
plt.text(5,3.5,r'$SMB = P_C$')
plt.text(5,5.5, r'$PMC$')
plt.text(5,10.5, r'$SMC$')
#plt.scatter(topt(),G(topt()) + AGD(topt()))
plt.grid()
plt.ylim(0,13)
plt.xlabel('x -- head of cattle')
plt.ylabel('Benefits/Profit')
plt.legend();
###Output
_____no_output_____
###Markdown
Code for land example
###Code
A=1
def F(T, A=A):
return A*np.sqrt(T)
def MVPT(P,T,A=A):
return A*P/T**(1/2)
def LD(P,r,A=A):
return (P*A/r)**2
A=1
Tbar = 10 # Total land endowment
P = 5.5 # Price of output
cl = 3 # cost of clearing land
###Output
_____no_output_____
###Markdown
Land demand for each farmer is given by $P\cdot F_T(T_i) = r$. So for this production $P \frac{1}{\sqrt T_i} = r$ or $P \frac{1}{\sqrt T_i} = cl$ so we can write$$T^*_i(r) = (P/r)^2$$If there is an open frontier the sum or demands falls short of total land supply and the marginal cost of land is the cost of clearing $r=c_l$. 'Land scarcity' results when there is an equilibrium price of land and$r>c_l$ where $r$ is found from $$\sum T^*_i(r) = \bar T$$
###Code
def req(P,cl, Tb=Tbar, N=2, A=A):
'''equilibrium rental rate'''
def landemand(r):
return N*(A*P/r)**2 - Tb
return fsolve(landemand, 1)[0]
P, cl, req(P,cl)
LD(P, req(P,cl))*2, Tbar
def mopt(P,cl,A=A):
'''Optimum land use for each i at the P*MPT = max(cl,r)'''
r = req(P,cl)
ru = max(cl, r)
return (A*P/ru)**2
###Output
_____no_output_____
###Markdown
Farmer A will demand
###Code
mopt(P,cl), MVPT(P, mopt(P,cl) )
#plt.style.use('bmh')
def landmarket(P, cl, title, A=A):
t = np.linspace(0.1,Tbar-0.1, 2*Tbar)
fig = plt.subplots(figsize=(12,8))
x0 = mopt(P,cl,A=A)
plt.ylim(0,5)
#plt.axhline(cl,linestyle=':')
plt.axhline(max(cl,req(P,cl,A=A)),linestyle='--')
plt.axhline(cl,linestyle=':')
plt.plot(t,MVPT(P,t))
plt.text(8, MVPT(P,8),r'$P \cdot F_T(T)$', fontsize=18)
plt.text(1, MVPT(P,Tbar-1),r'$P \cdot F_T(\bar T - T)$', fontsize=18)
plt.xlabel('T -- land use', fontsize=18)
plt.ylabel('MVPT', fontsize=18)
plt.scatter(x0, MVPT(P,x0))
plt.scatter(Tbar-mopt(P,cl),MVPT(P,x0))
plt.plot([x0,x0],[0,MVPT(P,x0)],':')
plt.plot([Tbar-x0,Tbar-x0],[0,MVPT(P,x0)],':')
plt.plot(t,MVPT(P,Tbar - t))
plt.plot(t,MVPT(P,Tbar-t))
plt.title(title)
plt.xlim(0,Tbar);
landmarket(P=5.5, cl = 3, title = 'Open Frontier')
landmarket(P=8, cl = 3, title = 'Land Scarcity')
###Output
_____no_output_____ |
hpc/exercises/assignment2.ipynb | ###Markdown
Sparse Matrices
###Code
%matplotlib inline
import numpy as np
import pandas as pd
from scipy import sparse
import scipy.sparse.linalg as spla
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('notebook', font_scale=1.5)
###Output
_____no_output_____
###Markdown
A review of the essentialsThere are many applications in which we deal with matrices that are mostly zeros. For example, a matrix representing a text corpus is very sparse - there are many thousands of words used but each document only uses a small subset of the total. Social networks are similar because there are a lot of people, but most people are only connected to a few hundred or thousand others directly. Storing such a social network as a sparse rather than dense matrix will offer orders of magnitude reductions in memory requirements and corresponding speed-ups in computation. Coordinate formatThe simplest sparse matrix format is built from the coordinates and values of the non-zero entries. From dense matrix
###Code
A = np.random.poisson(0.2, (5,15)) * np.random.randint(0, 10, (5, 15))
A
rows, cols = np.nonzero(A)
vals = A[rows, cols]
vals
rows
cols
X1 = sparse.coo_matrix(A)
X1
print(X1)
###Output
(0, 14) 3
(2, 6) 5
(2, 8) 1
(2, 13) 5
(3, 0) 6
(3, 10) 8
(4, 3) 7
(4, 6) 5
(4, 13) 7
###Markdown
From coordinatesNote that the (values, (rows, cols)) argument is a single tuple.
###Code
X2 = sparse.coo_matrix((vals, (rows, cols)))
X2
print(X2)
###Output
(0, 14) 3
(2, 6) 5
(2, 8) 1
(2, 13) 5
(3, 0) 6
(3, 10) 8
(4, 3) 7
(4, 6) 5
(4, 13) 7
###Markdown
Convert back to dense matrix
###Code
X2.todense()
###Output
_____no_output_____
###Markdown
Compressed Sparse Row and Column formatsWhen we have repeated entries in the rows or cols, we can remove the redundancy by indicating the location of the first occurrence of a value and its increment instead of the full coordinates. Note that the final index location must be the number of rows or cols since there is no other way to know the shape. These are known as CSR or CSC formats.
###Code
np.vstack([rows, cols])
indptr = np.r_[np.searchsorted(rows, np.unique(rows)), len(rows)]
indptr
X3 = sparse.csr_matrix((vals, cols, indptr))
X3
print(X3)
X3.todense()
###Output
_____no_output_____
###Markdown
Casting from COO formatBecause the coordinate format is more intuitive, it is often more convenient to first create a COO matrix then cast to CSR or CSC form.
###Code
X4 = X2.tocsr()
X4
rows = np.repeat([0,1], 4)
cols = np.repeat([0,1], 4)
vals = np.arange(8)
rows
cols
vals
X5 = sparse.coo_matrix((vals, (rows, cols)))
X5.todense()
###Output
_____no_output_____
###Markdown
Question 1: Preparing the dataRead in the data in from `airports.csv` and `names.csv`. We are interested in the following columns* ORIGIN_AIRPORT_ID * DEST_AIRPORT_IDThe second file will help us figure out what each code meansRead both into data frames in a way that will allow you to lookup the `description` of an airport based on its ID.
###Code
## ANSWER
###Output
_____no_output_____
###Markdown
Question 2: Summarize the dataprint out summarizing data that helps you understand the data set.
###Code
###Output
_____no_output_____
###Markdown
Solving large sparse linear systemsSciPy provides efficient routines for solving large sparse systems as for dense matrices. We will illustrate by calculating the page rank for airports using data from the [Bureau of Transportation Statisitcs](http://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236). The [PageRank](https://en.wikipedia.org/wiki/PageRank) algorithm is used to rank web pages for search results, but it can be used to rank any node in a directed graph (here we have airports instead of web pages). PageRank is fundamentally about finding the steady state in a Markov chain and can be solved as a linear system.The update at each time step for the page rank $PR$ of a page $p_i$ is In the above equation, $B_u$ is the set of all nodes $v$ that link to $u$, where each $v$ node contributes its page rank divided by its number of outgoing links $L(v)$. So a node $v$ with a high page rank contributes a large value to a linked node $u$ if $v$ has relatively few other links. The figure shows a network with four nodes, all of which start with a page rank of $1/4$. The values on the edges shows how much of its page rank one nodes contributes to its linked nodes in the first step.By letting the sum of all page ranks to be equal to one, we essentially have a probability distribution over the nodes of the graph. Since the state of the graph only depends on its previous state, we have a Markov chain. If we assume that every node can be reached from every other node, the system will have a steady state - which is what the PageRank algorithm seeks to find. To guard against case where a node has out-degree 0, we allow every node a small random chance of transitioning to any other node using a damping factor $d$. Then we solve the linear system to find the pagerank score $R$.In matrix notation, this iswhereAt steady state,and we can rearrange terms to solve for $R$ Working with graphs We will use the Python package NetworkX for this example, but later in the week we will dig into this package.
###Code
import networkx as nx
###Output
_____no_output_____
###Markdown
Question 3: create matrix that represents a graphUse the following code to construct an [adjacency matrix](https://en.wikipedia.org/wiki/Adjacency_matrix), using the package NetworkX.```pythong = nx.from_pandas_edgelist(data, source='ORIGIN_AIRPORT_ID', target='DEST_AIRPORT_ID')airports = np.array(g.nodes())adj_matrix = nx.to_scipy_sparse_matrix(g)```1. What kind of matrix are we working with?2. Calculate the sparsity3. Also stage the adjacency matrix for future use (save it to disk).
###Code
## Answer
###Output
_____no_output_____
###Markdown
Question 4: Construct the transition matrixThese are the necessary steps to construct a [transition matrix](https://en.wikipedia.org/wiki/Stochastic_matrix)1. determine the out degree (the number of outward edges connected to a node)The output will be a `np.array` with 294 elements each corresponding to the number of outbound flights2. Create a diag_matrix using `scipy.sparse.diag` that uses `1/out_degrees`. Then transform it to a csr matrix.The matrix should be 294 x 2943. Use matrix multiplication to multiply the diagonal matrix by the adjacency matrix. Then transpose the result.The matrix should be 294 x 294
###Code
## Answer
###Output
_____no_output_____
###Markdown
Question 5: Create the equation that we need to use to solve for steady statesuse the following code to create the equation```n = len(airports) d = 0.85 I = sparse.eye(n, format='csc')A = I - d * Mb = (1-d) / n * np.ones(n) so the sum of all page ranks is 1```For each line write a comment that explains the step in the process
###Code
## Answer
###Output
_____no_output_____
###Markdown
Question 6: Solve linear system1. Use [spsolve](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.spsolve.html) to solve the linear system. Each value represents the page rank for a specific airport.HINT: your result should sum to 1.02. use the sorted result to index back into your data to print Description, degree, and page rank. Print the top 10.
###Code
from scipy.sparse.linalg import spsolve
## Answer
###Output
_____no_output_____
###Markdown
Question 7: Visualize the airport connections graph (Extra Credit)NetworkX has many [drawing capabilities](https://networkx.github.io/documentation/stable/reference/drawing.html). 1. Go through [this tutorial](https://networkx.github.io/documentation/stable/tutorial.html) to familiarize yourself with how to work with graphs. Hint the variable, `g` should still be a NetworkX Graph object. 2. Use the `Drawing Graphs` section to create a visualization of this network3. Create a visualization that labels the top 5 airports by pagerank
###Code
## Answer
###Output
_____no_output_____ |
03_03_skylines_reloaded.ipynb | ###Markdown
The divide and conquer way
###Code
def skyline(rects, i, f):
if i == f:
return toContour(rects[i])
else:
mid = (i + f) // 2
c1 = skyline(rects, i, mid)
c2 = skyline(rects, mid + 1, f)
return merge(c1, c2)
contour = skyline(rects, 0, len(rects) - 1)
print(contour)
drawContour(contour)
def contourDots(contour):
x, y = zip(*contour)
plt.scatter(x, y)
bbox = {'boxstyle': 'round',
'facecolor': 'pink',
'edgecolor': 'orange'}
for x, y in contour:
plt.text(x, y, f"{x}, {y}", position=(x, y+.5), bbox=bbox, rotation=30)
plt.subplots(figsize=(12, 6))
plt.subplot(1, 2, 1)
for rect in rects:
r = toRect(rect)
r.append(r[0])
x, y = zip(*r)
plt.plot(x, y)
plt.subplot(1, 2, 2)
for rect in rects:
r = toRect(rect)
r.append(r[0])
x, y = zip(*r)
plt.plot(x, y, color="lightgray", linestyle="dashed", zorder=0)
drawContour(contour)
contourDots(contour)
plt.show()
###Output
_____no_output_____
###Markdown
Skyline problemUn problema clásico para sobreponer imágenes es la eliminación de las líneas ocultas. En el caso en2D el objetivo es de dibujar el skyline de una ciudad. Para simplificar, supongamos que todos los edificioscorrespondent (por proyección) a rectángulos que comparten todos la misma base (i.e. la ciudad es plana).Un edificio es una tripleta `(g, h, d)`, $d \gt g \ge 0, h \ge 0$ que representa al rectángulo `(g, 0),(g, h),(d, h),(d, 0)`. Por ejemplo, para 6 edificios dados por `(3, 13, 9), (1, 11, 5), (19, 18, 22), (3, 6, 7), (16, 3, 25), (12, 7, 16)` (verla figura 1a), el skyline obtenido será dado por la figura 1b.
###Code
rects = [(3, 13, 9), (1, 11, 5), (19, 18, 22), (3, 6, 7), (16, 3, 25), (12, 7, 16)]
def toRect(triple):
g, h, d = triple
return [(g, 0),(g, h),(d, h),(d, 0)]
import matplotlib.pyplot as plt
rect = toRect(rects[0])
print(rect)
x, y = zip(*rect)
print(x)
print(y)
###Output
[(3, 0), (3, 13), (9, 13), (9, 0)]
(3, 3, 9, 9)
(0, 13, 13, 0)
###Markdown
Fig 1a
###Code
for rect in rects:
r = toRect(rect)
r.append(r[0])
x, y = zip(*r)
plt.plot(x, y)
plt.axis("off")
plt.show()
def drawContour(contour):
points = [(0, 0)]
for x, y in contour:
points.append((x, points[-1][1]))
points.append((x, y))
x, y = zip(*points)
plt.plot(x, y)
###Output
_____no_output_____
###Markdown
Contour
###Code
def toContour(triple):
g, h, d = triple
return [(g, h), (d, 0)]
contour0 = toContour(rects[0])
contour1 = toContour(rects[1])
drawContour(contour0)
drawContour(contour1)
plt.show()
def addPoint(res, x, y):
if len(res) > 0:
if res[-1][1] == y:
return
elif res[-1][0] == x:
x0, y0 = res[-1]
res[-1] = (x, max(y, y0))
return
res.append((x, y))
def merge(c1, c2):
res = []
n1, n2 = len(c1), len(c2)
i1, i2 = 0, 0
y1, y2 = 0, 0
while i1 < n1 or i2 < n2:
if i2 >= n2 or i1 < n1 and c1[i1][0] < c2[i2][0]:
x, y1 = c1[i1]
i1 += 1
else:
x, y2 = c2[i2]
i2 += 1
y = max(y1, y2)
addPoint(res, x, y)
return res
###Output
_____no_output_____
###Markdown
1b
###Code
contour = toContour(rects[0])
for i in range(1, len(rects)):
contour0 = toContour(rects[i])
contour = merge(contour, contour0)
print(contour)
drawContour(contour)
###Output
_____no_output_____ |
notebooks/05 Generate GEI database.ipynb | ###Markdown
much better!!!
###Code
# get the indexes of database with 12 words
mat_data = os.path.join(mat_root, "database12_100x100.mat")
mat_data = sio.loadmat(mat_data, simplify_cells=True)
database = mat_data['database']
l = []
for data in database:
l.append(data['label'])
l = np.array(l)
index12 = np.unique(l)
index12
# lets check how the data is stored in .mat
mat_data['database']
# Lets generate out .mat containing the gei features
from dataset.dataset import Dataset
from utils.image import generate_gei
from tqdm.notebook import tqdm
dataset = Dataset('../data/database_convertida')
body_parts=['RightHand',
'LeftHand',
'UpperArmLeft',
'UpperArmRight',
'LowerArmLeft',
'LowerArmRight',
'Head']
nb_words = 24
dim = 100
output_dir = os.path.join(mat_root, 'gei')
database = []
for video in tqdm(dataset, total=len(dataset)):
if nb_words == 12 and classes[video.word] not in index12:
continue
gei = generate_gei(video, output_dim=(dim, dim), body_parts=body_parts)
label = classes[video.word]
data = dict()
data['sample'] = gei
data['label'] = label
database.append(data)
dt = np.dtype([('label',(np.uint8,1)),('sample',np.float32,(dim,dim))])
arr = np.zeros((len(database), ), dtype=dt)
for idx, d in enumerate(database):
arr[idx]['label'] = d['label']
arr[idx]['sample'] = d['sample']
os.makedirs(output_dir, exist_ok=True)
mat_filename = f'database{nb_words}_gei_{dim}x{dim}.mat'
sio.savemat(os.path.join(output_dir, mat_filename), {'database': arr})
# The code above is encapsulated in the script ../tools/generate_database.py
mat_saved = sio.loadmat(os.path.join(output_dir, mat_filename), simplify_cells=True)
mat_saved
###Output
_____no_output_____ |
3_manuscript_figure_and_table_notebooks/Figures_3_6_9-auPRC_Dot_Plots.ipynb | ###Markdown
Load Test Set Predictions + Labels
###Code
def get_preds_file(tf, train_species, test_species):
preds_root = ROOT + "/model_out/"
return preds_root + tf + "_" + train_species + "-trained_" + test_species + "-test.preds.npy"
def load_all_test_set_preds(test_species):
# takes a while to run.
preds_dict = defaultdict(lambda : dict())
# loop over mouse-trained, human-trained models, and DA mouse-trained models
for train_species in all_trainspecies:
for tf in tfs:
print("=== " + tf + ", " + train_species + "-trained ===")
# load predictions for all 5 independent model runs
preds_file = get_preds_file(tf, train_species, test_species)
try:
preds_dict[train_species][tf] = np.load(preds_file)
except:
if not (test_species == "mm10" and train_species == "NS"):
print("Could not load preds file:", preds_file)
return preds_dict
### Functions for loading in the test set labels
def get_test_bed_file(tf, species):
# This function returns the path to a BED-format file
# containing the chromosome names, starts, ends, and
# binding labels for all examples to test the model with.
# This file is specific to each tf -- the last column
# should contain the binding label for each window
return(ROOT + "data/" + species + "/" + tf + "/chr2.bed")
def get_test_labels(tf, species):
# This function reads in the test-data bed file
# for a given species and TF and returns the binding labels
# for each example in that file.
labels_file = get_test_bed_file(tf, species)
with open(labels_file) as f:
return np.array([int(line.split()[-1]) for line in f])
###Output
_____no_output_____
###Markdown
Calculate auPRCs
###Code
def get_auPRCs(preds, labels):
# This function calculates the auPRC for each set of
# predictions passed in. The length of the 2nd axis
# of the predictions array passed in will be the # of
# auPRCs returned as a list. The length of the 1st axis
# of the predictions array should match the length
# of the labels array.
assert preds.shape[0] <= len(labels), (preds.shape, len(labels))
if preds.shape[0] < len(labels):
labels = labels[:preds.shape[0]]
return [average_precision_score(labels, model_preds) for model_preds in preds.T]
def get_auPRC_df(preds_dict, test_species):
# This function loads in binding labels for each TF for
# a given test species, and for each TF, calculates the auPRC
# using each set of predictions that is input in "preds_dict".
auPRC_dicts = defaultdict(lambda : dict())
for tf in tfs:
test_labels = get_test_labels(tf, test_species)
for species in all_trainspecies:
if species in preds_dict.keys():
auPRC_dicts[species][tf] = get_auPRCs(preds_dict[species][tf],
test_labels)
# before returning all the auPRCs in dictionaries,
# we just need to reformat how they are stored
# because seaborn expects particularly formatted input
return format_data_for_seaborn(auPRC_dicts)
### Plot data preprocessing
def format_data_for_seaborn(auPRC_dicts):
# This function re-formats the "auPRC_dicts" list of dicts
# into one pandas DataFrame that matches how seaborn expects
# data to be input for the plot we will be making
tf_col = []
species_col = []
auprc_col = []
# assumes reps are constant across training species and TFs
species_list = list(auPRC_dicts.keys())
reps = len(auPRC_dicts[species_list[0]][tfs[0]])
for tf in tfs:
tf_col.extend([tf] * reps * len(species_list))
for species in species_list:
species_col.extend([model_names_dict[species]] * reps)
auprc_col.extend(auPRC_dicts[species][tf])
return pd.DataFrame({"TF":tf_col, "Species":species_col, "auPRC":auprc_col})
###Output
_____no_output_____
###Markdown
Generate Plots
###Code
# Plotting code
# Constants to specify plot appearance details
DOT_SIZE = 5
FIG_SIZE_UNIT = 5
FIG_SIZE = (FIG_SIZE_UNIT + 1.5, FIG_SIZE_UNIT - 1)
FIG_SIZE_SMALL = (FIG_SIZE_UNIT, FIG_SIZE_UNIT - 1)
COLORS = ["#0062B8", "#A2FFB6", "#FF0145", "#FFA600"]
AX_FONTSIZE = 16
AXTICK_FONTSIZE = 13
TITLESIZE = 17
from matplotlib.lines import Line2D
def make_boxplot(df, species, save_files = False, include = [],
fig_size = FIG_SIZE, colors_to_use = COLORS,
dot_size = DOT_SIZE, titlesize = TITLESIZE,
ax_fontsize = AX_FONTSIZE,
axtick_fontsize = AXTICK_FONTSIZE):
# This function creates one boxplot using seaborn.
# The data plotted must be stored in a pandas DataFrame (input = "df"),
# including 3 columns: TF, Species, and auPRC (case-sensitive names).
# Use the argument save_files to toggle between saving plots
# and outputting them within the notebook.
# If you want to create a plot containing only a subset of the data
# in your input DataFrame, specify which training species / model types
# to include by listing the model types by name in a list and give
# to the argument "include" (see cell below for examples). Plotting
# will follow the order of the model types as they are listed in "include".
# determine y-axis upper limit of plots
# this is done before data is subsetted to keep axis consistent
# regardless of which subset of data is used
yax_max = max(df["auPRC"]) + 0.05
# include should have species to plot in order of how you want them sorted on plot
if len(include) > 0:
model_names_include = set([model_names_dict[species] for species in include])
df_to_use = df[[species in model_names_include for species in df["Species"]]]
cols_list = []
labels_list = []
for index, col in enumerate(colors_to_use):
if all_trainspecies[index] in include:
cols_list.append(col)
labels_list.append(model_names_dict[all_trainspecies[index]])
cols = sns.color_palette(cols_list)
else:
df_to_use = df
cols_list = colors_to_use
cols = sns.color_palette(colors_to_use)
sns.set(style = "white")
# plot individual dots
ax = sns.swarmplot(x = "TF", y = "auPRC", hue = "Species",
data = df_to_use,
dodge = True,
palette = cols,
size = dot_size,
#edgecolor = "0.0001",
linewidth = 1)
legend_elements = [Line2D([0], [0], marker='o', color='w', label=species,
markeredgecolor='k', markeredgewidth=1,
markerfacecolor=c, markersize=10) for c, species in zip(cols_list, labels_list)]
ax.legend(handles=legend_elements, loc = 'upper right', ncol = 1)
# add legend
#ax.legend(loc = 'upper right', ncol = 1, frameon = False)
# format and label axes
ax.set_xlabel("", fontsize = 0)
ax.set_ylabel("Area Under PRC", fontsize = ax_fontsize)
ax.set_xticklabels(labels = tfs_latex_names, fontsize = ax_fontsize)
ax.tick_params(axis='y', which='major', pad = -2, labelsize = axtick_fontsize)
plt.ylim(0, yax_max) # limit is hard-coded so that it's constant across all plots
plt.yticks([0, 0.2, 0.4, 0.6])
# modify font size if there isn't much to plot
if len(include) < 3:
titlesize = titlesize - 2
# use plot-acceptable version of test data species name
# e.g. "mm10" --> "Mouse"
title = "Model Performance, "
title += r"$\bf{" + model_names_dict[species].replace("-trained", "") + "}$"
title += " Test Data"
plt.title(title, fontsize = titlesize)
if include is None:
save_suffix = "all"
else:
save_suffix = ",".join(include)
if save_files:
plt.savefig(ROOT + "plots/dotplots_" + species + "_test_" + save_suffix + ".png",
bbox_inches='tight', pad_inches = 0.1, dpi = 300)
plt.savefig(ROOT + "plots/dotplots_" + species + "_test_" + save_suffix + ".pdf",
bbox_inches='tight', pad_inches = 0.1)
plt.show()
###Output
_____no_output_____
###Markdown
Human Test Set
###Code
all_preds_human_test = load_all_test_set_preds("hg38")
# process predictions into auPRCs for every model
human_test_df = get_auPRC_df(all_preds_human_test, "hg38")
# optionally, save results to file to save time
human_test_df.to_csv(ROOT + "plots/hg38_test_all_auPRCs.csv")
# listing out all the groups of model types (training species)
# to make plots with
include_list_all = [
["mm10", "hg38"],
["mm10", "DA", "hg38"],
["mm10", "NS", "hg38"]
]
for index, include_list in enumerate(include_list_all):
sns.set(rc = {'figure.figsize' : FIG_SIZE})
plt.figure(index)
make_boxplot(human_test_df, "hg38", save_files = True, include = include_list)
###Output
_____no_output_____
###Markdown
Mouse Test Set
###Code
all_preds_mouse_test = load_all_test_set_preds("mm10")
mouse_test_df = get_auPRC_df(all_preds_mouse_test, "mm10")
mouse_test_df.to_csv(ROOT + "plots/mm10_test_all_auPRCs.csv")
# Generate plots for performance on MOUSE test data
include_list_all = [
["mm10", "hg38"],
["mm10", "DA", "hg38"]
]
for index, include_list in enumerate(include_list_all):
sns.set(rc = {'figure.figsize' : FIG_SIZE})
plt.figure(index)
make_boxplot(mouse_test_df, "mm10", save_files = True, include = include_list)
###Output
_____no_output_____ |
notebooks/Data - Modeling.ipynb | ###Markdown
Predict Delayed Flights
###Code
import pandas as pd
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
import os
df = pd.read_csv('../assets/data_prep_majority.txt', sep="\t", encoding="utf-8")
_df = df.copy()
_df.sample(5)
_df['Weekday'] = [str(datetime.strptime(date, '%Y-%m-%d').strftime('%A')) for date in _df['Date']]
selected_vars = _df[['ETD_Group', 'Weekday', 'Origin_City', 'Destination_City', 'Equipment_Type', 'Carrier_Name', 'Carrier_Class', 'Delayed']]
selected_vars.head()
for _ in list(selected_vars.columns):
selected_vars[_] = LabelEncoder().fit_transform(_df[_])
df_split = selected_vars.loc[np.random.choice(selected_vars[selected_vars['Delayed']==1].index, 20000, replace = True)]
df_split2 = selected_vars.loc[np.random.choice(selected_vars[selected_vars['Delayed']==0].index, 30000, replace = True)]
df_split = df_split.append(df_split2, ignore_index=True)
x = selected_vars.drop(['Delayed'], axis=1)
y = selected_vars['Delayed']
###Output
_____no_output_____
###Markdown
Modeling
###Code
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from collections import defaultdict
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.externals.six import StringIO
from IPython.display import Image
#import pydotplus
from sklearn.tree import export_graphviz
from sklearn.preprocessing import LabelEncoder
#Split data into 3: 60% train, 20% validation, 20% test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=888)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.25, random_state=999)
gbm = xgb.XGBClassifier(
n_estimators=30000, #arbitrary large number
max_depth=3,
objective="reg:linear",
learning_rate=.3,
subsample=1,scale_pos_weight=5,
min_child_weight=1,
colsample_bytree=.8
)
eval_set=[(x_train,y_train),(x_val,y_val)] #tracking train/validation error as we go
fit_model = gbm.fit(
x_train, y_train,
eval_set=eval_set,
eval_metric='auc',
early_stopping_rounds=20,
verbose=True #gives output log as below
)
gbm_pred = gbm.predict(x_test, ntree_limit=gbm.best_ntree_limit)
gbm_accuracy = accuracy_score(y_test, gbm_pred)
gbm_f1 = f1_score(y_test, gbm_pred)
print('XGBoost Accuracy:', gbm_accuracy)
print('XGBoost F1:', gbm_f1)
xgb_confusion = confusion_matrix(y_test, gbm_pred)
plt.figure(figsize=(6,6))
labels = ['Delayed', 'Not Delayed']
sns.heatmap(xgb_confusion, cmap=plt.cm.Blues, annot=True, square=True, xticklabels=labels, yticklabels=labels, fmt='g')
plt.xlabel('\nPredicted delays')
plt.ylabel('Actual delays\n')
plt.title('XGBoost Confusion Matrix\n');
gbm_all = xgb.XGBClassifier(
n_estimators=30000, #arbitrary large number
max_depth=3,
objective="reg:linear",
learning_rate=.3,
subsample=1, scale_pos_weight=5,
min_child_weight=1,
colsample_bytree=.8
)
eval_set=[(x_train,y_train),(x_val,y_val)] #tracking train/validation error as we go
fit_model = gbm_all.fit(
x_train, y_train,
eval_set=eval_set,
eval_metric='auc',
early_stopping_rounds=20,
verbose=True #gives output log as below
)
gbm_all_pred = gbm_all.predict(x_test, ntree_limit=gbm_all.best_ntree_limit)
gbm_all_accuracy = accuracy_score(y_test, gbm_all_pred)
gbm_all_f1 = f1_score(y_test, gbm_all_pred)
print('XGBoost Test Accuracy:', gbm_all_accuracy)
print('XGBoost Test F1:', gbm_all_f1)
gbm_val_pred = gbm_all.predict(x_val, ntree_limit=gbm_all.best_ntree_limit)
gbm_val_accuracy = accuracy_score(y_val, gbm_val_pred)
gbm_val_f1 = f1_score(y_val, gbm_val_pred)
print('XGBoost Validation Accuracy:', gbm_val_accuracy)
print('XGBoost Validation F1:', gbm_val_f1)
xgb_confusion = confusion_matrix(y_test, gbm_all_pred)
plt.figure(figsize=(6,6))
labels = ['Delayed', 'Not Delayed']
sns.heatmap(xgb_confusion, cmap='PuBu', annot=True, square=True
, xticklabels=labels, yticklabels=labels, fmt='g')
plt.xlabel('\nPredicted delays')
plt.ylabel('Actual delays\n')
plt.title('XGBoost Confusion Matrix\n')
_df.Delayed.value_counts()
fig, ax = plt.subplots(figsize=(20,20))
# xgb.plot_importance(gbm)
xgb.plot_importance(gbm_all, importance_type='gain', ax=ax)
###Output
_____no_output_____ |
tutorials/005-EVI-with-Overloaded-Operators-Sentinel-2.ipynb | ###Markdown
Using Overloaded Operators to Compute the EVI (Sentinel-2) - GitHub Repo: [https://github.com/davemlz/eemont](https://github.com/davemlz/eemont)- PyPI link: [https://pypi.org/project/eemont/](https://pypi.org/project/eemont/)- Documentation: [https://eemont.readthedocs.io/en/0.1.7/index.html](https://eemont.readthedocs.io/en/0.1.7/index.html) Let's start! If required, please uncomment:
###Code
#!pip install eemont
#!pip install geemap
###Output
_____no_output_____
###Markdown
Import the required packges.
###Code
import ee, eemont, datetime, geemap
###Output
_____no_output_____
###Markdown
Authenticate and Initialize Earth Engine and geemap.
###Code
Map = geemap.Map()
###Output
_____no_output_____
###Markdown
Point of interest.
###Code
point = ee.Geometry.Point([-76.0269,2.92846])
###Output
_____no_output_____
###Markdown
Get, filter, mask clouds and scale the image collection.
###Code
S2 = (ee.ImageCollection('COPERNICUS/S2_SR')
.filterBounds(point)
.sort('CLOUDY_PIXEL_PERCENTAGE')
.first()
.maskClouds()
.scale()
.index('EVI')) # Let's compute the EVI using the index() method for comparison
###Output
_____no_output_____
###Markdown
Let's select the required bands for EVI:
###Code
N = S2.select('B8')
R = S2.select('B4')
B = S2.select('B2')
###Output
_____no_output_____
###Markdown
Overloaded Operators`eemont` has overloaded the binary operators, rich comparisons and unary operators in the following list for the `ee.Image` class:(+, -, \*\, /, //, %, \**\, >, &, |, , >=, -, ~)Therefore, you can now use them for image operations!
###Code
EVI = 2.5 * (N - R) / (N + 6.0 * R - 7.5 * B + 1.0)
###Output
_____no_output_____
###Markdown
Visualization Let's define the EVI visualization parameters:
###Code
visEVI = {
'min':0,
'max':1,
'palette':[
'FFFFFF', 'CE7E45', 'DF923D', 'F1B555', 'FCD163', '99B718', '74A901', '66A000',
'529400', '3E8601', '207401', '056201', '004C00', '023B01', '012E01', '011D01', '011301'
]
}
###Output
_____no_output_____
###Markdown
And also the RGB visualization parameters:
###Code
visRGB = {
'min':0,
'max':0.3,
'bands':['B4', 'B3', 'B2']
}
###Output
_____no_output_____
###Markdown
Use `geemap` to display results:
###Code
Map.addLayer(S2,visRGB,'RGB')
Map.addLayer(EVI,visEVI,'EVI With Overloaded Operators')
Map.addLayer(S2.select('EVI'),visEVI,'EVI With index() Method')
Map.add_colorbar(visEVI['palette'], caption = 'EVI')
Map.centerObject(point,9)
Map
###Output
_____no_output_____ |
source/Physics-Informed NN_bond-poro-with_Pre-trained model.ipynb | ###Markdown
Imports
###Code
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop, Adadelta, Adagrad, Adam, Nadam, SGD
from keras.callbacks import EarlyStopping, TerminateOnNaN
from keras import backend as K
from keras.losses import mean_squared_error
from keras.models import load_model, Model
import tensorflow as tf
# Normalize the data.
from sklearn import preprocessing
from keras.regularizers import l1_l2
import random
def fix_seeds(seed):
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# fix_seeds(1)
###Output
Using TensorFlow backend.
C:\Users\berkc\Miniconda3\envs\Clone_Research_AM_2020\lib\site-packages\tensorflow\python\framework\dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
C:\Users\berkc\Miniconda3\envs\Clone_Research_AM_2020\lib\site-packages\tensorflow\python\framework\dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
C:\Users\berkc\Miniconda3\envs\Clone_Research_AM_2020\lib\site-packages\tensorflow\python\framework\dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
C:\Users\berkc\Miniconda3\envs\Clone_Research_AM_2020\lib\site-packages\tensorflow\python\framework\dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
C:\Users\berkc\Miniconda3\envs\Clone_Research_AM_2020\lib\site-packages\tensorflow\python\framework\dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
C:\Users\berkc\Miniconda3\envs\Clone_Research_AM_2020\lib\site-packages\tensorflow\python\framework\dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Functions
###Code
import pickle
def save_obj(obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
# Compute the RMSE given the ground truth (y_true) and the predictions(y_pred)
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
# Making sure dimensionless bond length is less than 1
def bond(bl):
# with tf.Session() as sess:
# init = tf.global_variables_initializer()
# sess.run(init)
# print("bl:",bl.eval())
# print("bl:",tf.negative(bl).eval())
# print("bl:",K.relu(tf.negative(bl)).eval())
# print(tf.add(K.relu(tf.negative(bl)), K.relu(bl-1.0)).eval())
# return bl-1.0
return tf.add(K.relu(tf.negative(bl)), K.relu(bl-1.0))
# Making sure final porosity is less than initial
def poros(poroi, porof):
# H0, w0 = nonscaled_unlabel_inp[:,2], nonscaled_unlabel_inp[:,3]
# # H0, w0 = H0[:,np.newaxis], w0[:,np.newaxis]
# pi = tf.constant(np.pi)
# a, b = w0/2, H0/2
# theta: bond angle, bl: bond length in mm
# theta = tf.math.asin(bl)
# poroi = (H0**2 - pi*b**2)/(H0*w0+H0**2)
# dela = b**2*(2*theta-np.sin(2*theta))
# wt = w0 + H0*theta/4 - H0*np.sin(2*theta)/8
# porof = (H0**2*np.cos(theta) - (pi*b**2 - dela))/(H0*wt+H0**2*np.cos(theta))
# with tf.Session() as sess:
# init = tf.global_variables_initializer()
# sess.run(init)
# print(K.relu(tf.negative(porof)).eval())
# print((K.relu(tf.negative(porof)) + K.relu(porof-poroi)).eval())
return K.relu(tf.negative(porof)) + K.relu(porof-poroi)
def strength1(bl, porof, nlayer=6):
sigma01, sigma02, C1s = 6, 31, 21
sigma_long = sigma01*(K.exp((1.0-porof)**(C1s*nlayer))-porof) + sigma02*(1.0-porof)
n = K.shape(sigma_long)[0]
sorted_strength, sortedIndices = tf.math.top_k(sigma_long, n, True)
sorted_bl = K.gather(bl, sortedIndices)
sorted_porof = K.gather(porof, sortedIndices)
argg = tf.argsort(sorted_bl,axis=-1,direction='DESCENDING',stable=False,name=None)
sorted_bl_corr = K.gather(sorted_bl, argg)
# with tf.Session() as sess:
# init = tf.global_variables_initializer()
# sess.run(init)
# print("strength,porof:", sorted_strength.eval(), sorted_porof.eval())
# print("sorted_bl-bl, str,porof:",sorted_bl.eval()-bl.eval(), sorted_strength.eval(), sorted_porof.eval())
# print(argg.eval())
# print("sorted_bl:",sorted_bl.eval())
# print("sorted_corrbl:",sorted_bl_corr.eval())
# print("sorted_corrbl-sortbl:",sorted_bl_corr.eval()-sorted_bl.eval())
return sorted_bl_corr-sorted_bl
def strength2(bl, porof, nlayer=6):
sigma01, sigma02, C1s = 6, 31, 21
sigma_long = sigma01*(K.exp((1.0-porof)**(C1s*nlayer))-porof) + sigma02*(1.0-porof)
n = K.shape(sigma_long)[0]
sorted_strength, sortedIndices = tf.math.top_k(sigma_long, n, True)
sorted_bl = K.gather(bl, sortedIndices)
n = K.cast(n, tf.float32)
rel = K.relu(sorted_bl[1:]-sorted_bl[0:-1])
num_vio = K.cast(tf.math.count_nonzero(rel), tf.float32)
# with tf.Session() as sess:
# init = tf.global_variables_initializer()
# sess.run(init)
# print("strength", sorted_strength.eval())
# print("sorted_bl:",sorted_bl.eval())
# print("diff:",(sorted_bl[1:]-sorted_bl[0:-1]).eval())
# print("Krelu:", K.relu(sorted_bl[1:]-sorted_bl[0:-1]).eval())
# print("greater than 0:", num_vio.eval())
# print("shape", n.eval())
# print("mean:", (num_vio/n).eval())
# return (sorted_bl[1:]-sorted_bl[0:-1])
return num_vio/n
def phy_loss_mean(params):
# useful for cross-checking training
loss1, loss2, loss3, loss4, lam1, lam2, lam3, lam4 = params
def loss(y_true,y_pred):
# with tf.Session() as sess:
# init = tf.global_variables_initializer()
# sess.run(init)
# print((lam2*K.mean(loss2)).eval())
# print( (lam1*K.mean(loss1) + lam2*K.mean(loss2) + lam3*K.mean(K.relu(loss3)) + lam4*loss4).eval())
return lam1*K.mean(loss1) + lam2*K.mean(loss2) + lam3*K.mean(K.relu(loss3)) + lam4*loss4
return loss
#function to calculate the combined loss = sum of rmse and phy based loss
def combined_loss(params):
loss1, loss2, loss3, loss4, lam1, lam2, lam3, lam4 = params
def loss(y_true,y_pred):
# X_scaled1 = (diff1 - K.min(diff1)) / (K.max(diff1) - K.min(diff1))
# X_scaled2 = (diff2 - K.min(diff2)) / (K.max(diff2) - K.min(diff2))
return mean_squared_error(y_true, y_pred) + lam1 * K.mean(loss1) + lam2 * K.mean(loss2) + lam3*K.mean(K.relu(loss3)) + lam4 * loss4
return loss
def PIML(optimizer_name, optimizer_val, hybrid, pre_train, tr_size, lamda, iteration, n_nodes, n_layers, drop_frac, reg, samp):
# fix_seeds(1)
# Hyper-parameters of the training process
# batch_size = int(tr_size/2)
batch_size = 2
# num_epochs = 600
num_epochs = 200
val_frac = 0.2
patience_val = 150
# Initializing results filename
exp_name = "fine-tuned_" + pre_train + optimizer_name + '_usePhy' + str(hybrid) + '_trsize' + str(tr_size) + '_lamda' + str(lamda) + '_iter' + str(iteration)
exp_name = exp_name.replace('.','pt')
# results_dir = '../results_unique/'
# results_dir = '../results_BK_v2/'
results_dir = '../results_BK_v2_cnstTest/'
model_name = results_dir + exp_name + '_model.h5' # storing the trained model
if reg==True and samp==25:
results_name = results_dir + exp_name + '_results_25_regularizer.dat' # storing the results of the model
elif reg==False and samp==25:
results_name = results_dir + exp_name + '_results_25.dat' # storing the results of the model
elif reg==True and samp==1519:
results_name = results_dir + exp_name + '_results_1519_regularizer.dat' # storing the results of the model
elif reg==False and samp==1519:
results_name = results_dir + exp_name + '_results_1519.dat' # storing the results of the model
# Load labeled data
data = np.loadtxt('../data/labeled_data.dat')
# data = np.loadtxt('../data/labeled_data_BK_constw_unique.dat')
# data = np.loadtxt('../data/labeled_data_BK_constw_v2.dat')
x_labeled = data[:, :-3]
y_labeled = data[:, -3:-1] # dimensionless bond length and porosity measurements
# data = np.loadtxt('../data/unlabeled_data_.dat')
# data = np.loadtxt('../data/unlabeled_data_25part.dat')
# data = np.loadtxt('../data/unlabeled_data_BK_constw.dat')
# data = np.loadtxt('../data/unlabeled_data_BK_constw_v2.dat'
if samp==25:
data = np.loadtxt('../data/unlabeled_data_BK_constw_v2_25.dat')
x_unlabeled = data[:, :]
elif samp==1519:
data = np.loadtxt('../data/unlabeled_data_BK_constw_v2_1525.dat')
# x_unlabeled = data[951:952, :]
x_unlabeled = data[:, :]
x_unlabeled1 = x_unlabeled[:1303, :]
x_unlabeled2 = x_unlabeled[-6:, :]
# print(x_unlabeled2)
x_unlabeled = np.vstack((x_unlabeled1,x_unlabeled2))
# print(x_unlabeled.shape, x_unlabeled[-10:,:])
# data = np.loadtxt('../data/unlabeled_data_BK_constw_v2_1519.dat')
# x_unlabeled = data[:1303, :]
# initial porosity
init_poro = x_unlabeled[:, -1]
# normalize dataset with MinMaxScaler
scaler = preprocessing.MinMaxScaler(feature_range=(0.0, 1.0))
# scaler = preprocessing.StandardScaler()
x_labeled[:,:-2] = scaler.fit_transform(x_labeled[:,:-2])
# y_labeled = scaler1.fit_transform(y_labeled)
x_unlabeled[:,:-3] = scaler.fit_transform(x_unlabeled[:,:-3])
# # scaled initial porosity
# init_poro = x_unlabeled[:, -1]
x_unlabeled = x_unlabeled[:, :-1] # for all but last column
# print(x_unlabeled[950:960])
# train and test data
trainX, trainY = x_labeled[:tr_size,:], y_labeled[:tr_size]
# testX, testY = x_labeled[tr_size:,:], y_labeled[tr_size:]
testX, testY = x_labeled[20:,:], y_labeled[20:]
if hybrid == 0:
# Removing the last column from x_unlabeled (corresponding to physics output)
x_unlabeled = x_unlabeled[:,:-1]
# dependencies = {
# 'root_mean_squared_error': root_mean_squared_error, 'loss1': phy_loss_mean([1.0, 1.0, lamda[0], lamda[1]]),
# 'loss2': combined_loss([1.0, 1.0, lamda[0], lamda[1]])
# }
dependencies = {
'root_mean_squared_error': root_mean_squared_error
}
# load the pre-trained model using non-calibrated physics-based model predictions (./data/unlabeled.dat)
loaded_model = load_model(results_dir + pre_train, custom_objects=dependencies)
# Creating the model
model = Sequential()
for layer in np.arange(n_layers):
if layer == 0:
model.add(Dense(n_nodes, activation='relu', input_shape=(np.shape(trainX)[1],)))
else:
if reg:
model.add(Dense(n_nodes, activation='relu', kernel_regularizer=l1_l2(l1=.001, l2=.001)))
else:
model.add(Dense(n_nodes, activation='relu'))
model.add(Dropout(rate=drop_frac))
model.add(Dense(2, activation='linear'))
# pass the weights to all layers but 1st input layer, whose dimensions are updated
for new_layer, layer in zip(model.layers[1:], loaded_model.layers[1:]):
new_layer.set_weights(layer.get_weights())
# physics-based regularization
uinp_sc = K.constant(value=x_unlabeled) # unlabeled input data
lam1 = K.constant(value=lamda[0]) # regularization hyper-parameter
lam2 = K.constant(value=lamda[1]) # regularization hyper-parameter
lam3 = K.constant(value=lamda[2]) # regularization hyper-parameter
lam4 = K.constant(value=lamda[3]) # regularization hyper-parameter
predictions = model(uinp_sc) # model output at depth i
phyloss1 = bond(predictions[:,0]) # physics loss 1
# uinp = K.constant(value=x_unlabeled_non) # unlabeled input data
phyloss2 = poros(init_poro, predictions[:,1]) # physics loss 1
phyloss3 = strength1(predictions[:,0], predictions[:,1])
phyloss4 = strength2(predictions[:,0], predictions[:,1])
totloss = combined_loss([phyloss1, phyloss2, phyloss3, phyloss4, lam1, lam2, lam3, lam4])
phyloss = phy_loss_mean([phyloss1, phyloss2, phyloss3, phyloss4, lam1, lam2, lam3, lam4])
model.compile(loss=totloss,
optimizer=optimizer_val,
metrics=[phyloss, root_mean_squared_error])
early_stopping = EarlyStopping(monitor='val_loss', patience=patience_val, verbose=1)
history = model.fit(trainX, trainY,
batch_size=batch_size,
epochs=num_epochs,
verbose=0,
validation_split=val_frac, callbacks=[early_stopping, TerminateOnNaN()])
test_score = model.evaluate(testX, testY, verbose=0)
print('iter: ' + str(iteration) + ' hybrid: ' + str(hybrid) +
' nL: ' + str(n_layers) + ' nN: ' + str(n_nodes) +
' lamda1: ' + str(lamda[0]) + ' lamda2: ' + str(lamda[1]) + ' trsize: ' + str(tr_size) +
' TestRMSE: ' + str(test_score[2]) + ' PhyLoss: ' + str(test_score[1]), "\n")
# model.save(model_name)
# save results
results = {'train_loss_1':history.history['loss_1'],
'val_loss_1':history.history['val_loss_1'],
'train_rmse':history.history['root_mean_squared_error'],
'val_rmse':history.history['val_root_mean_squared_error'],
'test_rmse':test_score[2],
'PhyLoss':test_score[1]}
# save_obj(results, results_name)
predictions = model.predict(testX)
return results, results_name, predictions, testY, test_score[2]
# f = open(results_name,"w")
# f.write( str(dictnry) )
# f.close()
# header = "X-Column, Y-Column\n"
# header += "Temperature, speed, layer height, layer width, # of layers, # of interface/layers, length, pred_dim_bl, pred_porosity"
# np.savetxt(results_name, data, header=header, fmt='%.14f')
# spio.savemat(results_name, {'train_loss_1':history.history['loss_1'],
# 'val_loss_1':history.history['val_loss_1'],
# 'train_rmse':history.history['root_mean_squared_error'],
# 'val_rmse':history.history['val_root_mean_squared_error'],
# 'test_rmse':test_score[2]})
# Main Function
if __name__ == '__main__':
# fix_seeds(1)
# List of optimizers to choose from
optimizer_names = ['Adagrad', 'Adadelta', 'Adam', 'Nadam', 'RMSprop', 'SGD', 'NSGD']
optimizer_vals = [Adagrad(clipnorm=1), Adadelta(clipnorm=1), Adam(clipnorm=1), Nadam(clipnorm=1), RMSprop(clipnorm=1), SGD(clipnorm=1.), SGD(clipnorm=1, nesterov=True)]
# selecting the optimizer
optimizer_num = 1
optimizer_name = optimizer_names[optimizer_num]
optimizer_val = optimizer_vals[optimizer_num]
# Selecting Other Hyper-parameters
drop_frac = 0 # Fraction of nodes to be dropped out
hybrid = 1 # Whether physics output is used as another feature in the NN model or not
n_layers = 2 # Number of hidden layers
n_nodes = 10 # Number of nodes per hidden layer
# pre-trained model
# pre_train = 'Pre-trainAdam_drop0pt05_nL2_nN20_trsize1000_iter0.h5'
# pre_train = 'Pre-train_wPhyLoss_Adam_drop0pt05_nL2_nN20_trsize1000_lamda[1, 1]_iter0.h5'
# pre_train = 'Pre-trainAdam_drop0_nL2_nN20_trsize1400_iter0.h5'
# pre_train = 'Pre-trainAdam_drop0_nL2_nN20_trsize1500_iter0.h5'
# 25 synthetic data
# pre_train = 'Pre-trainAdam_drop0_nL2_nN10_trsize24_iter0.h5'
# 1519 synthetic data
# pre_train = 'Pre-trainAdam_drop0_nL2_nN10_trsize1303_iter0.h5'
# 1519 synthetic data
# pre_train = 'Pre-trainAdam_drop0_nL2_nN10_trsize1515_iter0.h5'
# pre_train = 'Pre-trainAdadelta_drop0pt2_nL2_nN10_trsize1515_iter0.h5'
# pre_train = 'Pre-trainAdadelta_drop0_nL2_nN10_trsize1250_iter0.h5'
pre_train = 'Pre-trainAdadelta_drop0_nL2_nN10_trsize1300_iter0.h5'
# pre_train = 'Pre-trainAdadelta_drop0_nL2_nN10_trsize1200_iter0.h5'
pre_train = 'Pre-trainAdadelta_drop0_nL2_nN10_trsize1308_iter0.h5'
# pre_train = 'Pre-trainAdadelta_drop0_nL2_nN10_trsize1305_iter0.h5'
#set lamda=0 for pgnn0
# lamda = [.01, .01, .01, 0] # Physics-based regularization constant
# lamda = [.05, .05, .05, 0] # Physics-based regularization constant
lamda = [0.3, 0.15, 0.008, 0] # Physics-based regularization constant
# lamda = [.001, .001, .001, 0] # Physics-based regularization constant
# lamda = [0, 0, 0, 0] # Physics-based regularization constant
# lamda = [1.75, 1, 0.15, 0]
# Iterating over different training fractions and splitting indices for train-test splits
trsize_range = [4,6,8,10,20]
#default training size = 5000
tr_size = trsize_range[-1]
# use regularizer
reg = True
# sample size used
samp = 1519
# samp = 25
# total number of runs
iter_range = np.arange(1)
testrmse = []
# iterating through all possible params
for iteration in iter_range:
results, result_file, pred, obs, rmse = PIML(optimizer_name, optimizer_val, hybrid,
pre_train, tr_size, lamda, iteration, n_nodes, n_layers, drop_frac, reg, samp)
testrmse.append(rmse)
from sklearn.metrics import r2_score
(r2_score(obs[:,1], pred[:,1]))
r2_score(obs, pred)
# from statistics import stdev
# print(sum(testrmse) / len(testrmse),stdev(testrmse))
pred
import matplotlib.pyplot as plt
# Plot training & validation accuracy values
plt.figure()
plt.plot(obs[:,0], obs[:,1], 'd')
plt.plot(pred[:,0], pred[:,1], '*')
# plt.title('Model accuracy')
# plt.ylabel('RMSE')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
save_obj(pred, "../results_BK_v2_cnstTest/pred_piml_pre.dat")
# save_obj(obs, "../results_BK_v2_cnstTest/obs.dat")
# import pickle
# # import glob
# # print(glob.glob("../results/*.dat"))
# def save_obj(obj, name ):
# with open(name, 'wb') as f:
# pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
# save_obj(results, result_file)
# # def load_obj(name):
# # with open(name, 'rb') as f:
# # return pickle.load(f)
# # results = load_obj(result_file)
dict_keys = []
# get key and value
print ("Dict keys are: ")
for key, value in results.items():
dict_keys.append(key)
print(dict_keys)
train_loss_Phy = results[dict_keys[0]]
val_loss_Phy = results[dict_keys[1]]
train_rmse = results[dict_keys[2]]
val_rmse = results[dict_keys[3]]
test_rmse = results[dict_keys[4]]
PhyLoss = results[dict_keys[5]]
print("Test RMSE: ",test_rmse,"PhyLoss: ", PhyLoss)
import matplotlib.pyplot as plt
# Plot training & validation accuracy values
plt.figure()
plt.plot(train_rmse)
plt.plot(val_rmse)
plt.title('Model accuracy')
plt.ylabel('RMSE')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
# Plot training & validation loss values
plt.figure()
plt.plot(train_loss_Phy)
plt.plot(val_loss_Phy)
plt.title('Physical Inconsistency')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
import matplotlib.pyplot as plt
import matplotlib
import os
# Adjust your matplotlib script by adding the following lines after import matplotlib
matplotlib.use("pdf")
# matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
# add LaTeX on python path
user_name = os.getlogin()
os.environ["PATH"] += os.pathsep + 'C:/Users/' + user_name + '/AppData/Local/Programs/MiKTeX 2.9/miktex/bin/x64'
#=========================== Using LaTeX compatible fonts =============================== #
# use LaTeX fonts in the plot
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# get the figure
fig = plt.figure()
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
# plt.plot(exp[:,0],exp[:,1], 'x', color=colors[1], markersize=7)
plt.plot(train_rmse, "-",color=colors[0])
plt.plot(val_rmse, "--", color=colors[1])
plt.ylabel('RMSE', fontsize=22)
plt.xlabel('Epoch', fontsize=22)
plt.ylim(0.0,0.5)
plt.legend(['Train', 'Validation'], loc='best', fontsize=12)
plt.tick_params(labelsize=20)
plt.xticks(np.arange(0, len(train_rmse)+1, 50.0))
plt.show()
filename = '../figs/epoch_dnn_upd_hyb_loss'
# save as PDF
fig.savefig("{}.pdf".format(filename), bbox_inches='tight', dpi=300)
###Output
C:\Users\berkc\Miniconda3\envs\Clone_Research_AM_2020\lib\site-packages\ipykernel_launcher.py:38: UserWarning: Matplotlib is currently using pdf, which is a non-GUI backend, so cannot show the figure.
|
ethylene-conical/note.ipynb | ###Markdown
エチレンの円錐交差構造を探索する解説記事は[こちら](https://yamnor.me/2203131203) 準備
###Code
from matplotlib import pyplot as plt
energy = {}
s_squared = {}
###Output
_____no_output_____
###Markdown
S0 状態の最小エネルギー構造を探索する
###Code
# GAMESS の出力ファイルを取得する
!wget "https://raw.githubusercontent.com/yamnor/note/main/ethylene-conical/optimize.log"
# 出力ファイルから、構造最適化に伴うポテンシャルエネルギー(ENERGY)の変化を読み取る
energy['optimize'] = [float(line.split()[3]) for line in open('optimize.log', 'r').readlines() if ' ENERGY= ' in line]
# ENERGY の単位を au から kcal/mol に変換する
energy['optimize'] = [(x - energy['optimize'][0]) * 627.5094 for x in energy['optimize']]
# 構造最適化に伴う ENERGY の変化をグラフで表す
fig, ax = plt.subplots()
step = list(range(len(energy['optimize'])))
ax.plot(step, energy['optimize'])
ax.set_xlabel("Step")
ax.set_ylabel("Energy [kcal/mol]")
plt.show()
###Output
_____no_output_____
###Markdown
S1 状態の最小エネルギー構造を探索する
###Code
# GAMESS の出力ファイルを取得する
!wget "https://raw.githubusercontent.com/yamnor/note/main/ethylene-conical/optimize-excite.log"
# 構造最適化に伴うポテンシャルエネルギー(ENERGY)の変化を1つの基底状態(S0)と2つの励起状態(S1, S2)に対して読み取る
energy['optimize-excite'] = []
for n in range(4):
energy['optimize-excite'].append([float(line.split()[2]) for line in open('optimize-excite.log', 'r').readlines() if ' %d A '%(n) in line])
# 基底(S0)および第一励起(S1)について、構造最適化に伴う ENERGY の変化をグラフで表す
fig, ax = plt.subplots()
step = list(range(len(energy['optimize-excite'][0])))
ax.plot(step, energy['optimize-excite'][0], label="S0")
ax.plot(step, energy['optimize-excite'][1], label="S1")
ax.set_xlabel("Step")
ax.set_ylabel("Energy [au]")
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
S0/S1 状態間の円錐交差構造を探索する(スピン反転 TD-DFT 法) 1回目
###Code
!wget "https://raw.githubusercontent.com/yamnor/note/main/ethylene-conical/conical-spnflp.log"
energy['conical-1st'] = []
for n in range(4):
energy['conical-1st'].append([float(line.split()[2]) for line in open('conical-spnflp.log', 'r').readlines() if ' %d A '%(n) in line])
fig, ax = plt.subplots()
step = list(range(len(energy['conical-1st'][0])))
ax.plot(step, energy['conical-1st'][0], label="State 0")
ax.plot(step, energy['conical-1st'][1], label="State 1")
ax.plot(step, energy['conical-1st'][2], label="State 2")
ax.plot(step, energy['conical-1st'][3], label="State 3")
ax.set_xlabel("Step")
ax.set_ylabel("Energy [au]")
ax.legend()
plt.show()
s_squared['conical-1st'] = []
for n in range(1,4):
s_squared['conical-1st'].append([float(line.split()[4]) for line in open('conical-spnflp.log', 'r').readlines() if ' %d A '%(n) in line])
fig, ax = plt.subplots()
step = list(range(len(s_squared['conical-1st'][0])))
ax.plot(step, s_squared['conical-1st'][0], label="State 1")
ax.plot(step, s_squared['conical-1st'][1], label="State 2")
ax.plot(step, s_squared['conical-1st'][2], label="State 3")
ax.set_xlabel("Step")
ax.set_ylabel("$<S^2>$")
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
2回目
###Code
!wget "https://raw.githubusercontent.com/yamnor/note/main/ethylene-conical/conical-spnflp-2.log"
energy['conical-2nd'] = []
for n in range(4):
energy['conical-2nd'].append([float(line.split()[2]) for line in open('conical-spnflp-2.log', 'r').readlines() if ' %d A '%(n) in line])
step = list(range(len(energy['conical-2nd'][0])))
fig, ax = plt.subplots()
ax.plot(step, energy['conical-2nd'][0], label="State 0")
ax.plot(step, energy['conical-2nd'][1], label="State 1")
ax.plot(step, energy['conical-2nd'][2], label="State 2")
ax.plot(step, energy['conical-2nd'][3], label="State 3")
ax.set_xlabel("Step")
ax.set_ylabel("Energy [au]")
ax.legend()
plt.show()
s_squared['conical-2nd'] = []
for n in range(1,4):
s_squared['conical-2nd'].append([float(line.split()[4]) for line in open('conical-spnflp-2.log', 'r').readlines() if ' %d A '%(n) in line])
step = list(range(len(s_squared['conical-2nd'][0])))
fig, ax = plt.subplots()
ax.plot(step, s_squared['conical-2nd'][0], label="State 1")
ax.plot(step, s_squared['conical-2nd'][1], label="State 2")
ax.plot(step, s_squared['conical-2nd'][2], label="State 3")
ax.set_xlabel("Step")
ax.set_ylabel("$<S^2>$")
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
S0/S1 状態間の円錐交差構造を探索する(通常の TD-DFT 法)
###Code
!wget "https://raw.githubusercontent.com/yamnor/note/main/ethylene-conical/conical-excite.log"
energy['conical-ntddft'] = []
for n in range(4):
energy['conical-ntddft'].append([float(line.split()[2]) for line in open('conical-excite.log', 'r').readlines() if ' %d A '%(n) in line])
fig, ax = plt.subplots()
step = list(range(len(energy['conical-ntddft'][0])))
ax.plot(step, energy['conical-ntddft'][0], label="S0")
ax.plot(step, energy['conical-ntddft'][1], label="S1")
ax.set_xlabel("Step")
ax.set_ylabel("Energy [au]")
ax.legend()
plt.show()
###Output
_____no_output_____ |
exercise_notebooks/unit_testing_exercise/unit_testing_data_engineering.ipynb | ###Markdown
Unit Testing ML Code: Hands-on Exercise (Data Engineering) In this notebook we will explore unit tests for data engineering We will use a classic toy dataset: the Iris plants dataset, which comes included with scikit-learnDataset details: https://scikit-learn.org/stable/datasets/index.htmliris-plants-datasetAs we progress through the course, the complexity of examples will increase, but we will start with something basic. This notebook is designed so that it can be run in isolation, once the setup steps described below are complete. SetupLet's begin by importing the dataset and the libraries we are going to use. Make sure you have run `pip install -r requirements.txt` on requirements file located in the same directory as this notebook. We recommend doing this in a separate virtual environment (see dedicated setup lecture).If you need a refresher on jupyter, pandas or numpy, there are some links to resources in the section notes.
###Code
from sklearn import datasets
import pandas as pd
import numpy as np
# Access the iris dataset from sklearn
iris = datasets.load_iris()
# Load the iris data into a pandas dataframe. The `data` and `feature_names`
# attributes of the dataset are added by default by sklearn. We use them to
# specify the columns of our dataframes.
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
# Create a "target" column in our dataframe, and set the values to the correct
# classifications from the dataset.
iris_frame['target'] = iris.target
###Output
_____no_output_____
###Markdown
Add the `SimplePipeline` from the Test Input Values notebook (same as previous lecture, no changes here)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
class SimplePipeline:
def __init__(self):
self.frame = None
# Shorthand to specify that each value should start out as
# None when the class is instantiated.
self.X_train, self.X_test, self.y_train, self.Y_test = None, None, None, None
self.model = None
self.load_dataset()
def load_dataset(self):
"""Load the dataset and perform train test split."""
# fetch from sklearn
dataset = datasets.load_iris()
# remove units ' (cm)' from variable names
self.feature_names = [fn[:-5] for fn in dataset.feature_names]
self.frame = pd.DataFrame(dataset.data, columns=self.feature_names)
self.frame['target'] = dataset.target
# we divide the data set using the train_test_split function from sklearn,
# which takes as parameters, the dataframe with the predictor variables,
# then the target, then the percentage of data to assign to the test set,
# and finally the random_state to ensure reproducibility.
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.frame[self.feature_names], self.frame.target, test_size=0.65, random_state=42)
def train(self, algorithm=LogisticRegression):
# we set up a LogisticRegression classifier with default parameters
self.model = algorithm(solver='lbfgs', multi_class='auto')
self.model.fit(self.X_train, self.y_train)
def predict(self, input_data):
return self.model.predict(input_data)
def get_accuracy(self):
# use our X_test and y_test values generated when we used
# `train_test_split` to test accuracy.
# score is a method on the Logisitic Regression that
# returns the accuracy by default, but can be changed to other metrics, see:
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score
return self.model.score(X=self.X_test, y=self.y_test)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.train()
###Output
_____no_output_____
###Markdown
Test Engineered Data (preprocessing)Below we create an updated pipeline which inherits from the SimplePipeline but has new functionality to preprocess the data by applying a scaler. Linear models are sensitive to the scale of the features. For example features with bigger magnitudes tend to dominate if we do not apply a scaler.
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
self.X_train = self.scaler.transform(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
pipeline = PipelineWithDataEngineering()
pipeline.run_pipeline()
accuracy_score = pipeline.get_accuracy()
print(f'current model accuracy is: {accuracy_score}')
###Output
current model accuracy is: 0.9591836734693877
###Markdown
Now we Unit TestWe focus specifically on the feature engineering step
###Code
import unittest
class TestIrisDataEngineering(unittest.TestCase):
def setUp(self):
self.pipeline = PipelineWithDataEngineering()
self.pipeline.load_dataset()
def test_scaler_preprocessing_brings_x_train_mean_near_zero(self):
# Given
# convert the dataframe to be a single column with pandas stack
original_mean = self.pipeline.X_train.stack().mean()
# When
self.pipeline.apply_scaler()
# Then
# The idea behind StandardScaler is that it will transform your data
# to center the distribution at 0 and scale the variance at 1.
# Therefore we test that the mean has shifted to be less than the original
# and close to 0 using assertAlmostEqual to check to 3 decimal places:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
print(f'Original X train mean: {original_mean}')
print(f'Transformed X train mean: {self.pipeline.X_train.mean()}')
def test_scaler_preprocessing_brings_x_train_std_near_one(self):
# When
self.pipeline.apply_scaler()
# Then
# We also check that the standard deviation is close to 1
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
print(f'Transformed X train standard deviation : {self.pipeline.X_train.std()}')
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
..
###Markdown
Unit Testing ML Code: Hands-on Exercise (Data Engineering) In this notebook we will explore unit tests for data engineering We will use a classic toy dataset: the Iris plants dataset, which comes included with scikit-learnDataset details: https://scikit-learn.org/stable/datasets/index.htmliris-plants-datasetAs we progress through the course, the complexity of examples will increase, but we will start with something basic. This notebook is designed so that it can be run in isolation, once the setup steps described below are complete. SetupLet's begin by importing the dataset and the libraries we are going to use. Make sure you have run `pip install -r requirements.txt` on requirements file located in the same directory as this notebook. We recommend doing this in a separate virtual environment (see dedicated setup lecture).If you need a refresher on jupyter, pandas or numpy, there are some links to resources in the section notes.
###Code
from sklearn import datasets
import pandas as pd
import numpy as np
# Access the iris dataset from sklearn
iris = datasets.load_iris()
# Load the iris data into a pandas dataframe. The `data` and `feature_names`
# attributes of the dataset are added by default by sklearn. We use them to
# specify the columns of our dataframes.
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
# Create a "target" column in our dataframe, and set the values to the correct
# classifications from the dataset.
iris_frame['target'] = iris.target
###Output
_____no_output_____
###Markdown
Add the `SimplePipeline` from the Test Input Values notebook (same as previous lecture, no changes here)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
class SimplePipeline:
def __init__(self):
self.frame = None
# Shorthand to specify that each value should start out as
# None when the class is instantiated.
self.X_train, self.X_test, self.y_train, self.Y_test = None, None, None, None
self.model = None
self.load_dataset()
def load_dataset(self):
"""Load the dataset and perform train test split."""
# fetch from sklearn
dataset = datasets.load_iris()
# remove units ' (cm)' from variable names
self.feature_names = [fn[:-5] for fn in dataset.feature_names]
self.frame = pd.DataFrame(dataset.data, columns=self.feature_names)
self.frame['target'] = dataset.target
# we divide the data set using the train_test_split function from sklearn,
# which takes as parameters, the dataframe with the predictor variables,
# then the target, then the percentage of data to assign to the test set,
# and finally the random_state to ensure reproducibility.
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.frame[self.feature_names], self.frame.target, test_size=0.65, random_state=42)
def train(self, algorithm=LogisticRegression):
# we set up a LogisticRegression classifier with default parameters
self.model = algorithm(solver='lbfgs', multi_class='auto')
self.model.fit(self.X_train, self.y_train)
def predict(self, input_data):
return self.model.predict(input_data)
def get_accuracy(self):
# use our X_test and y_test values generated when we used
# `train_test_split` to test accuracy.
# score is a method on the Logisitic Regression that
# returns the accuracy by default, but can be changed to other metrics, see:
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score
return self.model.score(X=self.X_test, y=self.y_test)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.train()
###Output
_____no_output_____
###Markdown
Test Engineered Data (preprocessing)Below we create an updated pipeline which inherits from the SimplePipeline but has new functionality to preprocess the data by applying a scaler. Linear models are sensitive to the scale of the features. For example features with bigger magnitudes tend to dominate if we do not apply a scaler.
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
# self.X_train = self.scaler.transform(self.X_train)
self.X_train = np.array(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
pipeline = PipelineWithDataEngineering()
pipeline.run_pipeline()
accuracy_score = pipeline.get_accuracy()
print(f'current model accuracy is: {accuracy_score}')
###Output
current model accuracy is: 0.9591836734693877
###Markdown
Now we Unit TestWe focus specifically on the feature engineering step
###Code
import unittest
class TestIrisDataEngineering(unittest.TestCase):
def setUp(self):
self.pipeline = PipelineWithDataEngineering()
self.pipeline.load_dataset()
def test_scaler_preprocessing_brings_x_train_mean_near_zero(self):
# Given
# convert the dataframe to be a single column with pandas stack
original_mean = self.pipeline.X_train.stack().mean()
# When
self.pipeline.apply_scaler()
# Then
# The idea behind StandardScaler is that it will transform your data
# to center the distribution at 0 and scale the variance at 1.
# Therefore we test that the mean has shifted to be less than the original
# and close to 0 using assertAlmostEqual to check to 3 decimal places:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
print(f'Original X train mean: {original_mean}')
print(f'Transformed X train mean: {self.pipeline.X_train.mean()}')
def test_scaler_preprocessing_brings_x_train_std_near_one(self):
# When
self.pipeline.apply_scaler()
# Then
# We also check that the standard deviation is close to 1
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
print(f'Transformed X train standard deviation : {self.pipeline.X_train.std()}')
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
..
###Markdown
Data Engineering Test: Hands-on ExerciseChange the pipeline class preprocessing so that the test fails. Do you understand why the test is failing?
###Code
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
FF
======================================================================
FAIL: test_scaler_preprocessing_brings_x_train_mean_near_zero (__main__.TestIrisDataEngineering)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-30-67636ac9b09b>", line 23, in test_scaler_preprocessing_brings_x_train_mean_near_zero
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
AssertionError: False is not true
======================================================================
FAIL: test_scaler_preprocessing_brings_x_train_std_near_one (__main__.TestIrisDataEngineering)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-30-67636ac9b09b>", line 34, in test_scaler_preprocessing_brings_x_train_std_near_one
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
AssertionError: 1.9612895180453789 != 1.0 within 3 places (0.9612895180453789 difference)
----------------------------------------------------------------------
Ran 2 tests in 0.035s
FAILED (failures=2)
###Markdown
Unit Testing ML Code: Hands-on Exercise (Data Engineering) In this notebook we will explore unit tests for data engineering We will use a classic toy dataset: the Iris plants dataset, which comes included with scikit-learnDataset details: https://scikit-learn.org/stable/datasets/index.htmliris-plants-datasetAs we progress through the course, the complexity of examples will increase, but we will start with something basic. This notebook is designed so that it can be run in isolation, once the setup steps described below are complete. SetupLet's begin by importing the dataset and the libraries we are going to use. Make sure you have run `pip install -r requirements.txt` on requirements file located in the same directory as this notebook. We recommend doing this in a separate virtual environment (see dedicated setup lecture).If you need a refresher on jupyter, pandas or numpy, there are some links to resources in the section notes.
###Code
from sklearn import datasets
import pandas as pd
import numpy as np
# Access the iris dataset from sklearn
iris = datasets.load_iris()
# Load the iris data into a pandas dataframe. The `data` and `feature_names`
# attributes of the dataset are added by default by sklearn. We use them to
# specify the columns of our dataframes.
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
# Create a "target" column in our dataframe, and set the values to the correct
# classifications from the dataset.
iris_frame['target'] = iris.target
###Output
_____no_output_____
###Markdown
Add the `SimplePipeline` from the Test Input Values notebook (same as previous lecture, no changes here)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
class SimplePipeline:
def __init__(self):
self.frame = None
# Shorthand to specify that each value should start out as
# None when the class is instantiated.
self.X_train, self.X_test, self.y_train, self.Y_test = None, None, None, None
self.model = None
self.load_dataset()
def load_dataset(self):
"""Load the dataset and perform train test split."""
# fetch from sklearn
dataset = datasets.load_iris()
# remove units ' (cm)' from variable names
self.feature_names = [fn[:-5] for fn in dataset.feature_names]
self.frame = pd.DataFrame(dataset.data, columns=self.feature_names)
self.frame['target'] = dataset.target
# we divide the data set using the train_test_split function from sklearn,
# which takes as parameters, the dataframe with the predictor variables,
# then the target, then the percentage of data to assign to the test set,
# and finally the random_state to ensure reproducibility.
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.frame[self.feature_names], self.frame.target, test_size=0.65, random_state=42)
def train(self, algorithm=LogisticRegression):
# we set up a LogisticRegression classifier with default parameters
self.model = algorithm(solver='lbfgs', multi_class='auto')
self.model.fit(self.X_train, self.y_train)
def predict(self, input_data):
return self.model.predict(input_data)
def get_accuracy(self):
# use our X_test and y_test values generated when we used
# `train_test_split` to test accuracy.
# score is a method on the Logisitic Regression that
# returns the accuracy by default, but can be changed to other metrics, see:
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score
return self.model.score(X=self.X_test, y=self.y_test)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.train()
###Output
_____no_output_____
###Markdown
Test Engineered Data (preprocessing)Below we create an updated pipeline which inherits from the SimplePipeline but has new functionality to preprocess the data by applying a scaler. Linear models are sensitive to the scale of the features. For example features with bigger magnitudes tend to dominate if we do not apply a scaler.
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
self.X_train = self.scaler.transform(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
pipeline = PipelineWithDataEngineering()
pipeline.run_pipeline()
accuracy_score = pipeline.get_accuracy()
print(f'current model accuracy is: {accuracy_score}')
###Output
current model accuracy is: 0.9591836734693877
###Markdown
Now we Unit TestWe focus specifically on the feature engineering step
###Code
import unittest
class TestIrisDataEngineering(unittest.TestCase):
def setUp(self):
self.pipeline = PipelineWithDataEngineering()
self.pipeline.load_dataset()
def test_scaler_preprocessing_brings_x_train_mean_near_zero(self):
# Given
# convert the dataframe to be a single column with pandas stack
original_mean = self.pipeline.X_train.stack().mean()
# When
self.pipeline.apply_scaler()
# Then
# The idea behind StandardScaler is that it will transform your data
# to center the distribution at 0 and scale the variance at 1.
# Therefore we test that the mean has shifted to be less than the original
# and close to 0 using assertAlmostEqual to check to 3 decimal places:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
print(f'Original X train mean: {original_mean}')
print(f'Transformed X train mean: {self.pipeline.X_train.mean()}')
def test_scaler_preprocessing_brings_x_train_std_near_one(self):
# When
self.pipeline.apply_scaler()
# Then
# We also check that the standard deviation is close to 1
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
print(f'Transformed X train standard deviation : {self.pipeline.X_train.std()}')
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
..
###Markdown
Data Engineering Test: Hands-on ExerciseChange the pipeline class preprocessing so that the test fails. Do you understand why the test is failing?
###Code
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler(with_mean=False)
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
self.X_train = self.scaler.transform(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
FF
======================================================================
FAIL: test_scaler_preprocessing_brings_x_train_mean_near_zero (__main__.TestIrisDataEngineering)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-5-67636ac9b09b>", line 23, in test_scaler_preprocessing_brings_x_train_mean_near_zero
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
AssertionError: False is not true
======================================================================
FAIL: test_scaler_preprocessing_brings_x_train_std_near_one (__main__.TestIrisDataEngineering)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-5-67636ac9b09b>", line 34, in test_scaler_preprocessing_brings_x_train_std_near_one
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
AssertionError: 2.9364176229455423 != 1.0 within 3 places
----------------------------------------------------------------------
Ran 2 tests in 0.019s
FAILED (failures=2)
###Markdown
Unit Testing ML Code: Hands-on Exercise (Data Engineering) In this notebook we will explore unit tests for data engineering We will use a classic toy dataset: the Iris plants dataset, which comes included with scikit-learnDataset details: https://scikit-learn.org/stable/datasets/index.htmliris-plants-datasetAs we progress through the course, the complexity of examples will increase, but we will start with something basic. This notebook is designed so that it can be run in isolation, once the setup steps described below are complete. SetupLet's begin by importing the dataset and the libraries we are going to use. Make sure you have run `pip install -r requirements.txt` on requirements file located in the same directory as this notebook. We recommend doing this in a separate virtual environment (see dedicated setup lecture).If you need a refresher on jupyter, pandas or numpy, there are some links to resources in the section notes.
###Code
from sklearn import datasets
import pandas as pd
import numpy as np
# Access the iris dataset from sklearn
iris = datasets.load_iris()
# Load the iris data into a pandas dataframe. The `data` and `feature_names`
# attributes of the dataset are added by default by sklearn. We use them to
# specify the columns of our dataframes.
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
# Create a "target" column in our dataframe, and set the values to the correct
# classifications from the dataset.
iris_frame['target'] = iris.target
iris.feature_names
###Output
_____no_output_____
###Markdown
Add the `SimplePipeline` from the Test Input Values notebook (same as previous lecture, no changes here)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
class SimplePipeline:
def __init__(self):
self.frame = None
# Shorthand to specify that each value should start out as
# None when the class is instantiated.
self.X_train, self.X_test, self.y_train, self.Y_test = None, None, None, None
self.model = None
self.load_dataset()
def load_dataset(self):
"""Load the dataset and perform train test split."""
# fetch from sklearn
dataset = datasets.load_iris()
# remove units ' (cm)' from variable names
self.feature_names = [fn[:-5] for fn in dataset.feature_names]
self.frame = pd.DataFrame(dataset.data, columns=self.feature_names)
for col in self.frame.columns:
self.frame[col] *= -1
self.frame['target'] = dataset.target
# we divide the data set using the train_test_split function from sklearn,
# which takes as parameters, the dataframe with the predictor variables,
# then the target, then the percentage of data to assign to the test set,
# and finally the random_state to ensure reproducibility.
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.frame[self.feature_names], self.frame.target, test_size=0.65, random_state=42)
def train(self, algorithm=LogisticRegression):
# we set up a LogisticRegression classifier with default parameters
self.model = algorithm(solver='lbfgs', multi_class='auto')
self.model.fit(self.X_train, self.y_train)
def predict(self, input_data):
return self.model.predict(input_data)
def get_accuracy(self):
# use our X_test and y_test values generated when we used
# `train_test_split` to test accuracy.
# score is a method on the Logisitic Regression that
# returns the accuracy by default, but can be changed to other metrics, see:
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score
return self.model.score(X=self.X_test, y=self.y_test)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.train()
###Output
_____no_output_____
###Markdown
Test Engineered Data (preprocessing)Below we create an updated pipeline which inherits from the SimplePipeline but has new functionality to preprocess the data by applying a scaler. Linear models are sensitive to the scale of the features. For example features with bigger magnitudes tend to dominate if we do not apply a scaler.
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
self.X_train = self.scaler.transform(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
pipeline = PipelineWithDataEngineering()
pipeline.run_pipeline()
accuracy_score = pipeline.get_accuracy()
print(f'current model accuracy is: {accuracy_score}')
###Output
current model accuracy is: 0.9591836734693877
###Markdown
Now we Unit TestWe focus specifically on the feature engineering step
###Code
pipeline.load_dataset()
# pd.DataFrame(pipeline.X_train).stack().mean()
for col in pipeline.X_train.columns:
pipeline.X_train[col] *= -1
pipeline.X_train
import unittest
class TestIrisDataEngineering(unittest.TestCase):
def setUp(self):
"""Call the first method of the tested class after instantiating"""
self.pipeline = PipelineWithDataEngineering()
self.pipeline.load_dataset()
def test_scaler_preprocessing_brings_x_train_mean_near_zero(self):
""""""
# Given
# convert the dataframe to be a single column with pandas stack
original_mean = self.pipeline.X_train.stack().mean()
# When
self.pipeline.apply_scaler()
# Then
# The idea behind StandardScaler is that it will transform your data
# to center the distribution at 0 and scale the variance at 1.
# Therefore we test that the mean has shifted to be less than the original
# and close to 0 using assertAlmostEqual to check to 3 decimal places:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
print(f'Original X train mean: {original_mean}')
print(f'Transformed X train mean: {self.pipeline.X_train.mean()}')
def test_scaler_preprocessing_brings_x_train_std_near_one(self):
# When
self.pipeline.apply_scaler()
# Then
# We also check that the standard deviation is close to 1
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
print(f'Transformed X train standard deviation : {self.pipeline.X_train.std()}')
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
F.
###Markdown
Unit Testing ML Code: Hands-on Exercise (Data Engineering) In this notebook we will explore unit tests for data engineering We will use a classic toy dataset: the Iris plants dataset, which comes included with scikit-learnDataset details: https://scikit-learn.org/stable/datasets/index.htmliris-plants-datasetAs we progress through the course, the complexity of examples will increase, but we will start with something basic. This notebook is designed so that it can be run in isolation, once the setup steps described below are complete. SetupLet's begin by importing the dataset and the libraries we are going to use. Make sure you have run `pip install -r requirements.txt` on requirements file located in the same directory as this notebook. We recommend doing this in a separate virtual environment (see dedicated setup lecture).If you need a refresher on jupyter, pandas or numpy, there are some links to resources in the section notes.
###Code
from sklearn import datasets
import pandas as pd
import numpy as np
# Access the iris dataset from sklearn
iris = datasets.load_iris()
# Load the iris data into a pandas dataframe. The `data` and `feature_names`
# attributes of the dataset are added by default by sklearn. We use them to
# specify the columns of our dataframes.
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
# Create a "target" column in our dataframe, and set the values to the correct
# classifications from the dataset.
iris_frame['target'] = iris.target
###Output
_____no_output_____
###Markdown
Add the `SimplePipeline` from the Test Input Values notebook (same as previous lecture, no changes here)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
class SimplePipeline:
def __init__(self):
self.frame = None
# Shorthand to specify that each value should start out as
# None when the class is instantiated.
self.X_train, self.X_test, self.y_train, self.Y_test = None, None, None, None
self.model = None
self.load_dataset()
def load_dataset(self):
"""Load the dataset and perform train test split."""
# fetch from sklearn
dataset = datasets.load_iris()
# remove units ' (cm)' from variable names
self.feature_names = [fn[:-5] for fn in dataset.feature_names]
self.frame = pd.DataFrame(dataset.data, columns=self.feature_names)
self.frame['target'] = dataset.target
# we divide the data set using the train_test_split function from sklearn,
# which takes as parameters, the dataframe with the predictor variables,
# then the target, then the percentage of data to assign to the test set,
# and finally the random_state to ensure reproducibility.
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.frame[self.feature_names], self.frame.target, test_size=0.65, random_state=42)
def train(self, algorithm=LogisticRegression):
# we set up a LogisticRegression classifier with default parameters
self.model = algorithm(solver='lbfgs', multi_class='auto')
self.model.fit(self.X_train, self.y_train)
def predict(self, input_data):
return self.model.predict(input_data)
def get_accuracy(self):
# use our X_test and y_test values generated when we used
# `train_test_split` to test accuracy.
# score is a method on the Logisitic Regression that
# returns the accuracy by default, but can be changed to other metrics, see:
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score
return self.model.score(X=self.X_test, y=self.y_test)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.train()
###Output
_____no_output_____
###Markdown
Test Engineered Data (preprocessing)Below we create an updated pipeline which inherits from the SimplePipeline but has new functionality to preprocess the data by applying a scaler. Linear models are sensitive to the scale of the features. For example features with bigger magnitudes tend to dominate if we do not apply a scaler.
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
self.X_train = self.scaler.transform(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
pipeline = PipelineWithDataEngineering()
pipeline.run_pipeline()
accuracy_score = pipeline.get_accuracy()
print(f'current model accuracy is: {accuracy_score}')
###Output
current model accuracy is: 0.9591836734693877
###Markdown
Now we Unit TestWe focus specifically on the feature engineering step
###Code
import unittest
class TestIrisDataEngineering(unittest.TestCase):
def setUp(self):
self.pipeline = PipelineWithDataEngineering()
self.pipeline.load_dataset()
def test_scaler_preprocessing_brings_x_train_mean_near_zero(self):
# Given
# convert the dataframe to be a single column with pandas stack
original_mean = self.pipeline.X_train.stack().mean()
# When
self.pipeline.apply_scaler()
# Then
# The idea behind StandardScaler is that it will transform your data
# to center the distribution at 0 and scale the variance at 1.
# Therefore we test that the mean has shifted to be less than the original
# and close to 0 using assertAlmostEqual to check to 3 decimal places:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
print(f'Original X train mean: {original_mean}')
print(f'Transformed X train mean: {self.pipeline.X_train.mean()}')
def test_scaler_preprocessing_brings_x_train_std_near_one(self):
# When
self.pipeline.apply_scaler()
# Then
# We also check that the standard deviation is close to 1
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
print(f'Transformed X train standard deviation : {self.pipeline.X_train.std()}')
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
..
###Markdown
Data Engineering Test: Hands-on ExerciseChange the pipeline class preprocessing so that the test fails. Do you understand why the test is failing?
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
# the part I changed to make the test fail
self.X_train = self.scaler.transform(self.X_train) + 1
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
F.
###Markdown
Unit Testing ML Code: Hands-on Exercise (Data Engineering) In this notebook we will explore unit tests for data engineering We will use a classic toy dataset: the Iris plants dataset, which comes included with scikit-learnDataset details: https://scikit-learn.org/stable/datasets/index.htmliris-plants-datasetAs we progress through the course, the complexity of examples will increase, but we will start with something basic. This notebook is designed so that it can be run in isolation, once the setup steps described below are complete. SetupLet's begin by importing the dataset and the libraries we are going to use. Make sure you have run `pip install -r requirements.txt` on requirements file located in the same directory as this notebook. We recommend doing this in a separate virtual environment (see dedicated setup lecture).If you need a refresher on jupyter, pandas or numpy, there are some links to resources in the section notes.
###Code
from sklearn import datasets
import pandas as pd
import numpy as np
# Access the iris dataset from sklearn
iris = datasets.load_iris()
# Load the iris data into a pandas dataframe. The `data` and `feature_names`
# attributes of the dataset are added by default by sklearn. We use them to
# specify the columns of our dataframes.
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
# Create a "target" column in our dataframe, and set the values to the correct
# classifications from the dataset.
iris_frame['target'] = iris.target
###Output
_____no_output_____
###Markdown
Add the `SimplePipeline` from the Test Input Values notebook (same as previous lecture, no changes here)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
class SimplePipeline:
def __init__(self):
self.frame = None
# Shorthand to specify that each value should start out as
# None when the class is instantiated.
self.X_train, self.X_test, self.y_train, self.Y_test = None, None, None, None
self.model = None
self.load_dataset()
def load_dataset(self):
"""Load the dataset and perform train test split."""
# fetch from sklearn
dataset = datasets.load_iris()
# remove units ' (cm)' from variable names
self.feature_names = [fn[:-5] for fn in dataset.feature_names]
self.frame = pd.DataFrame(dataset.data, columns=self.feature_names)
self.frame['target'] = dataset.target
# we divide the data set using the train_test_split function from sklearn,
# which takes as parameters, the dataframe with the predictor variables,
# then the target, then the percentage of data to assign to the test set,
# and finally the random_state to ensure reproducibility.
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.frame[self.feature_names], self.frame.target, test_size=0.65, random_state=42)
def train(self, algorithm=LogisticRegression):
# we set up a LogisticRegression classifier with default parameters
self.model = algorithm(solver='lbfgs', multi_class='auto')
self.model.fit(self.X_train, self.y_train)
def predict(self, input_data):
return self.model.predict(input_data)
def get_accuracy(self):
# use our X_test and y_test values generated when we used
# `train_test_split` to test accuracy.
# score is a method on the Logisitic Regression that
# returns the accuracy by default, but can be changed to other metrics, see:
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score
return self.model.score(X=self.X_test, y=self.y_test)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.train()
###Output
_____no_output_____
###Markdown
Test Engineered Data (preprocessing)Below we create an updated pipeline which inherits from the SimplePipeline but has new functionality to preprocess the data by applying a scaler. Linear models are sensitive to the scale of the features. For example features with bigger magnitudes tend to dominate if we do not apply a scaler.
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
self.X_train = self.scaler.transform(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
pipeline = PipelineWithDataEngineering()
pipeline.run_pipeline()
accuracy_score = pipeline.get_accuracy()
print(f'current model accuracy is: {accuracy_score}')
###Output
current model accuracy is: 0.9591836734693877
###Markdown
Now we Unit TestWe focus specifically on the feature engineering step
###Code
import unittest
class TestIrisDataEngineering(unittest.TestCase):
def setUp(self):
self.pipeline = PipelineWithDataEngineering()
self.pipeline.load_dataset()
def test_scaler_preprocessing_brings_x_train_mean_near_zero(self):
# Given
# convert the dataframe to be a single column with pandas stack
original_mean = self.pipeline.X_train.stack().mean()
# When
self.pipeline.apply_scaler()
# Then
# The idea behind StandardScaler is that it will transform your data
# to center the distribution at 0 and scale the variance at 1.
# Therefore we test that the mean has shifted to be less than the original
# and close to 0 using assertAlmostEqual to check to 3 decimal places:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
print(f'Original X train mean: {original_mean}')
print(f'Transformed X train mean: {self.pipeline.X_train.mean()}')
def test_scaler_preprocessing_brings_x_train_std_near_one(self):
# When
self.pipeline.apply_scaler()
# Then
# We also check that the standard deviation is close to 1
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
print(f'Transformed X train standard deviation : {self.pipeline.X_train.std()}')
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
..
###Markdown
Data Engineering Test: Hands-on ExerciseChange the pipeline class preprocessing so that the test fails. Do you understand why the test is failing?
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
self.X_train = self.scaler.transform(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
# Add an erroneous data point to skew the data and fail tests
self.X_train[0 , 0] = 10.0
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
FF
======================================================================
FAIL: test_scaler_preprocessing_brings_x_train_mean_near_zero (__main__.TestIrisDataEngineering)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\geoff\AppData\Local\Temp/ipykernel_6732/4151704491.py", line 24, in test_scaler_preprocessing_brings_x_train_mean_near_zero
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
AssertionError: 0.04783808976632523 != 0.0 within 3 places (0.04783808976632523 difference)
======================================================================
FAIL: test_scaler_preprocessing_brings_x_train_std_near_one (__main__.TestIrisDataEngineering)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\geoff\AppData\Local\Temp/ipykernel_6732/4151704491.py", line 34, in test_scaler_preprocessing_brings_x_train_std_near_one
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
AssertionError: 1.21592305814796 != 1.0 within 3 places (0.2159230581479601 difference)
----------------------------------------------------------------------
Ran 2 tests in 0.051s
FAILED (failures=2)
###Markdown
Unit Testing ML Code: Hands-on Exercise (Data Engineering) In this notebook we will explore unit tests for data engineering We will use a classic toy dataset: the Iris plants dataset, which comes included with scikit-learnDataset details: https://scikit-learn.org/stable/datasets/index.htmliris-plants-datasetAs we progress through the course, the complexity of examples will increase, but we will start with something basic. This notebook is designed so that it can be run in isolation, once the setup steps described below are complete. SetupLet's begin by importing the dataset and the libraries we are going to use. Make sure you have run `pip install -r requirements.txt` on requirements file located in the same directory as this notebook. We recommend doing this in a separate virtual environment (see dedicated setup lecture).If you need a refresher on jupyter, pandas or numpy, there are some links to resources in the section notes.
###Code
from sklearn import datasets
import pandas as pd
import numpy as np
# Access the iris dataset from sklearn
iris = datasets.load_iris()
# Load the iris data into a pandas dataframe. The `data` and `feature_names`
# attributes of the dataset are added by default by sklearn. We use them to
# specify the columns of our dataframes.
iris_frame = pd.DataFrame(iris.data, columns=iris.feature_names)
# Create a "target" column in our dataframe, and set the values to the correct
# classifications from the dataset.
iris_frame['target'] = iris.target
###Output
_____no_output_____
###Markdown
Add the `SimplePipeline` from the Test Input Values notebook (same as previous lecture, no changes here)
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
class SimplePipeline:
def __init__(self):
self.frame = None
# Shorthand to specify that each value should start out as
# None when the class is instantiated.
self.X_train, self.X_test, self.y_train, self.Y_test = None, None, None, None
self.model = None
self.load_dataset()
def load_dataset(self):
"""Load the dataset and perform train test split."""
# fetch from sklearn
dataset = datasets.load_iris()
# remove units ' (cm)' from variable names
self.feature_names = [fn[:-5] for fn in dataset.feature_names]
self.frame = pd.DataFrame(dataset.data, columns=self.feature_names)
self.frame['target'] = dataset.target
# we divide the data set using the train_test_split function from sklearn,
# which takes as parameters, the dataframe with the predictor variables,
# then the target, then the percentage of data to assign to the test set,
# and finally the random_state to ensure reproducibility.
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.frame[self.feature_names], self.frame.target, test_size=0.65, random_state=42)
def train(self, algorithm=LogisticRegression):
# we set up a LogisticRegression classifier with default parameters
self.model = algorithm(solver='lbfgs', multi_class='auto')
self.model.fit(self.X_train, self.y_train)
def predict(self, input_data):
return self.model.predict(input_data)
def get_accuracy(self):
# use our X_test and y_test values generated when we used
# `train_test_split` to test accuracy.
# score is a method on the Logisitic Regression that
# returns the accuracy by default, but can be changed to other metrics, see:
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score
return self.model.score(X=self.X_test, y=self.y_test)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.train()
###Output
_____no_output_____
###Markdown
Test Engineered Data (preprocessing)Below we create an updated pipeline which inherits from the SimplePipeline but has new functionality to preprocess the data by applying a scaler. Linear models are sensitive to the scale of the features. For example features with bigger magnitudes tend to dominate if we do not apply a scaler.
###Code
from sklearn.preprocessing import StandardScaler
class PipelineWithDataEngineering(SimplePipeline):
def __init__(self):
# Call the inherited SimplePipeline __init__ method first.
super().__init__()
# scaler to standardize the variables in the dataset
self.scaler = StandardScaler()
# Train the scaler once upon pipeline instantiation:
# Compute the mean and standard deviation based on the training data
self.scaler.fit(self.X_train)
def apply_scaler(self):
# Scale the test and training data to be of mean 0 and of unit variance
# self.X_train = self.scaler.transform(self.X_train)
self.X_train = np.ones_like(self.X_train)
self.X_test = self.scaler.transform(self.X_test)
def predict(self, input_data):
# apply scaler transform on inputs before predictions
scaled_input_data = self.scaler.transform(input_data)
return self.model.predict(scaled_input_data)
def run_pipeline(self):
"""Helper method to run multiple pipeline methods with one call."""
self.load_dataset()
self.apply_scaler() # updated in the this class
self.train()
pipeline = PipelineWithDataEngineering()
pipeline.run_pipeline()
accuracy_score = pipeline.get_accuracy()
print(f'current model accuracy is: {accuracy_score}')
###Output
_____no_output_____
###Markdown
Now we Unit TestWe focus specifically on the feature engineering step
###Code
import unittest
class TestIrisDataEngineering(unittest.TestCase):
def setUp(self):
self.pipeline = PipelineWithDataEngineering()
self.pipeline.load_dataset()
def test_scaler_preprocessing_brings_x_train_mean_near_zero(self):
# Given
# convert the dataframe to be a single column with pandas stack
original_mean = self.pipeline.X_train.stack().mean()
# When
self.pipeline.apply_scaler()
# Then
# The idea behind StandardScaler is that it will transform your data
# to center the distribution at 0 and scale the variance at 1.
# Therefore we test that the mean has shifted to be less than the original
# and close to 0 using assertAlmostEqual to check to 3 decimal places:
# https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
self.assertTrue(original_mean > self.pipeline.X_train.mean()) # X_train is a numpy array at this point.
self.assertAlmostEqual(self.pipeline.X_train.mean(), 0.0, places=3)
print(f'Original X train mean: {original_mean}')
print(f'Transformed X train mean: {self.pipeline.X_train.mean()}')
def test_scaler_preprocessing_brings_x_train_std_near_one(self):
# When
self.pipeline.apply_scaler()
# Then
# We also check that the standard deviation is close to 1
self.assertAlmostEqual(self.pipeline.X_train.std(), 1.0, places=3)
print(f'Transformed X train standard deviation : {self.pipeline.X_train.std()}')
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(TestIrisDataEngineering)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
_____no_output_____ |
19 Geopandas/Homework 1.ipynb | ###Markdown
Homework 1 **Inhalt:**- Geodaten laden- Koordinaten transformieren- Einfache Datenanalyse mit Denkmalgeschützten Objekten- Food deserts mit Schweizer Daten replizieren**Nötige Skills**- Geodatenhandling 1 und 2 Setup Importieren Sie die nötigen Libraries: pandas, geopandas, shapely-geometrien Import **Schweizer Gemeinden** Ein shapefile mit den Schweizer Gemeinden ist unter folgendem Pfad abgelegt:`'dataprojects/Projections/shp/g1g17.shp'`Lesen Sie es ein, zeigen Sie die ersten Zeilen an und machen Sie einen einfachen Plot **Denkmalschutz** Ein Shapefile aller Denkmalgeschützten Objekte im Kanton Zürich ist unter folgendem Pfad abgelegt:`dataprojects/Denkmalschutz/Denkmalschutzobjekte/ARV_KAZ_DENKMALSCHUTZOBJEKTE_P.shp`Lesen Sie es ein, zeigen Sie die ersten Zeilen an und machen Sie einen einfachen Plot Wie viele denkmalgeschützte Objekte gibt es im Kanton Zürich? In welchem Koordinatensystem sind die Geodaten codiert? Transformieren Sie die Daten ins Schweizer Koordinatensystem Ein einfacher Plot Welche Kantonsnummer haben die Gemeinden aus dem Kanton Zürich im Gemeinde-Geodatenfile? Filtern Sie die Zürcher Gemeinden heraus und speichern Sie diese in einem neuen GDF Plotten Sie das Zürcher Gemeinde-GDF zusammen mit den denkmalgeschützten Objekten
###Code
###Output
_____no_output_____
###Markdown
Analyse: DenkmalschutzUns interessiert: In welcher Gemeinde gibt es wieviele denkmalgeschützte Objekte? Daten kombinieren 1 Fügen Sie jeder Zeile im Denkmalschutz-GDF die passende Geoinformation aus dem Gemeinden-GDF hinzu.**Tipp:** Sie brauchen dazu einen Spatial Join.Speichern Sie die kombinierten Infos in einem neuen GDF. Checken Sie annhand der ersten zwei Objekte, ob es funktioniert hat Gibt es Objekte, denen keine Gemeinde zugewiesen wurde? Wenn ja, warum nicht?
###Code
#
###Output
_____no_output_____
###Markdown
Erstellen Sie anhand des neuen GDF eine Liste, die für jede Gemeinde die Anzahl der Denkmalschutz-Objekte enthält.Speichern Sie die Liste wiederum als neues Dataframe. Welches sind die fünf Gemeinden, die am meisten Objekte haben? Daten kombinieren 2Fügen Sie nun dem urspünglichen Zürcher Gemeinde-GDF die Infos zur Anzahl Denkmalschutzobjekte aus dem soeben erstellten DF hinzu Checken Sie das Ergebnis anhand der Gemeinde Zürich Gibt es Gemeinden, zu denen keine Information hinzugefügt wurde? Wenn ja, ersetzen Sie in der entsprechenden Spalte die NaN-Werte durch Nullen. PlotWir möchten eine Choropleth-Map der Zürcher Gemeinden zeichnen.Die Farbcodierung der Gemeinden richtet sich nach der Anzahl Objekte.
###Code
###Output
_____no_output_____
###Markdown
Analyse 2: GeschäfteHier interessiert uns die Lage der Coop-Geschäfte im Kanton Zürich. Import Eine Liste der Coop-Filialen ist abgelegt unter: `'dataprojects/Coop/geschaefte.csv'` Welche Spalten beinhalten die relevante Information? Um was für Koordinatentypen handelt es sich? Geometrie definieren Erstellen Sie aus dem DF der Geschäfte ein GDF, indem Sie- eine Geometrie-Spalte mit Punkten erstellen- ein GDF kreieren, mithilfe dieser Spalte- das Koordinatensystem dieses GDF initiieren- die Koordinaten ins Schweizer System umrechnen Auswählen Uns interessieren nur die Geschäfte im Kanton zürich. Filtern Sie diese heraus und speichern sie in einem neuen GDF. Zum checken, ob es funktioniert hat: einfacher plot Plot Plotten Sie die Coop-Läden im Kanton Zürich auf dem Hintergrund der Gemeindekarte
###Code
###Output
_____no_output_____
###Markdown
Filter 1In welchen Gemeinden gibt es keinen Coop? Zur Vorbereitung: alle Coop-Filialen zu einem einzigen Shape zusammenfügen Filtern Sie nun die Zürcher Gemeinden: Welche davon haben keinen Berührungspunkt mit dem Coop-Filialen-Shape? Speichern Sie die gefilterte Liste in einem neuen GDF. Plot:- Alle Zürcher Gemeinden- Alle Coop-Filialen im Kt ZH- in besonderer Farbe: die Gemeinden ohne Coop
###Code
###Output
_____no_output_____
###Markdown
Filter 2Analoges Vorgehen zu Filter 1. Aber mit einer anderen Abgrenzung: Welche Gemeinden sind mindestens 2km von einer Coop-Filiale entfernt? Shape: Filter: Plot:- Alle Zürcher Gemeinden- Alle Coop-Filialen im Kt ZH- Ein Umkreis von 2km rund um die Coop-Filialen- in besonderer Farbe: die Gemeinden ohne Coop
###Code
###Output
_____no_output_____
###Markdown
Analyse 3 - BonusGibt es Coop-Geschäfte in denkmalgeschützten Häusern? Überlegen Sie sich ein mögliches Vorgehen, um dies anhand der vorhandenen Datenquellen zu überprüfen. Zum Beispiel: Gibt es Punkte aus dem Coop-GDF und dem Denkmalschutz-GDF, die weniger als 10 Meter auseinander liegen? (Achtung, je nach dem, wie man vorgeht, brauchen die Checks jeweils ein paar Minuten Rechenzeit!)
###Code
#
###Output
_____no_output_____ |
two_moons.ipynb | ###Markdown
Create data
###Code
## number of samples
N = 100
np.random.seed(10)
x,y=make_moons(n_samples=(N-15,15),shuffle=False,noise=0.05)
plt.figure()
plt.scatter(x[:,0],x[:,1],c=y)
plt.show()
## optimization variables
k = 16 # number of centroids
r1 = .18 # radius constraint for SDP, can be slacked compared to r2
r2 = .18 # radius constraint for steps2-4
card_upper = N # slacked cardinality constraint for steps2-4
d = pairwise_distances(x)**2
###Output
_____no_output_____
###Markdown
Step1: Solve SDP
###Code
### variables and parameters
r = r1
D = cp.Parameter(shape=(N,N),symmetric=True,value=d)
M =[]
for a in range(k):
M.append(cp.Variable(shape=(N,N),symmetric=True))
b = cp.Variable(shape=(N,k))
## solving SDP with constant cardinality to avoid linear fractional
n = [N//k for a in range(k)]
### constraints
cons_b = [cp.sum(b,axis=1) == (2-k)*np.ones(N)]
cons_M = [cp.diag(M[a])==1 for a in range(k)] + \
[cp.vstack([cp.hstack([M[a],b[:,a][...,None]]),
cp.hstack([b[:,a][...,None].T,np.ones((1,1))])]) >> 0 for a in range(k)]
cons_radius = [cp.multiply(
D,M[a] + np.ones((N,N)) + b[:,a] @ np.ones(N).T + np.ones(N) @ b[:,a].T) <= 16*r**2 for a in range(k)]
## cardinality constraints, avoiding to simplify linear fractional
cons_card = [cp.sum(b[:,a]) == 2*n[a]-N for a in range(k)] +\
[cp.sum(M[a],axis=1) == (2*n[a]-N)*b[:,a] for a in range(k)]
## reform linearization constriants, can be avoided for N>=100
cons_reform_M = [M[a] + np.ones((N,N)) + b[:,a] @ np.ones(N).T + np.ones(N) @ b[:,a].T >= 0 for a in range(k)] + \
[M[a] - np.ones((N,N)) + b[:,a] @ np.ones(N).T - np.ones(N) @ b[:,a].T <=0 for a in range(k)] + \
[M[a] - np.ones((N,N)) - b[:,a] @ np.ones(N).T + np.ones(N) @ b[:,a].T <=0 for a in range(k)] + \
[M[a] + np.ones((N,N)) - b[:,a] @ np.ones(N).T - np.ones(N) @ b[:,a].T >=0 for a in range(k)]
cons = cons_b + cons_M + cons_radius
obj_f = 1/8*cp.sum(cp.multiply(D,1/n[a]*cp.sum([M[a] + np.ones((N,N)) + b[:,a] @ np.ones(N).T + np.ones(N) @ b[:,a].T \
for a in range(k)])))
obj = cp.Minimize(obj_f)
problem = cp.Problem(obj,cons)
problem.solve(verbose=True,solver=cp.MOSEK)
###Output
===============================================================================
CVXPY
v1.1.15
===============================================================================
(CVXPY) Feb 12 10:19:24 PM: Your problem has 161600 variables, 49 constraints, and 10000 parameters.
(CVXPY) Feb 12 10:19:24 PM: It is compliant with the following grammars: DCP, DQCP
(CVXPY) Feb 12 10:19:24 PM: CVXPY will first compile your problem; then, it will invoke a numerical solver to obtain a solution.
-------------------------------------------------------------------------------
Compilation
-------------------------------------------------------------------------------
(CVXPY) Feb 12 10:19:24 PM: Compiling problem (target solver=MOSEK).
(CVXPY) Feb 12 10:19:24 PM: Reduction chain: Dcp2Cone -> CvxAttr2Constr -> ConeMatrixStuffing -> MOSEK
(CVXPY) Feb 12 10:19:24 PM: Applying reduction Dcp2Cone
(CVXPY) Feb 12 10:19:24 PM: Applying reduction CvxAttr2Constr
(CVXPY) Feb 12 10:19:24 PM: Applying reduction ConeMatrixStuffing
(CVXPY) Feb 12 10:48:37 PM: Applying reduction MOSEK
(CVXPY) Feb 12 10:54:52 PM: Finished problem compilation (took 2.129e+03 seconds).
(CVXPY) Feb 12 10:54:52 PM: (Subsequent compilations of this problem, using the same arguments, should take less time.)
-------------------------------------------------------------------------------
Numerical solver
-------------------------------------------------------------------------------
(CVXPY) Feb 12 10:54:52 PM: Invoking solver MOSEK to obtain a solution.
Problem
Name :
Objective sense : max
Type : CONIC (conic optimization problem)
Constraints : 82400
Cones : 0
Scalar variables : 161700
Matrix variables : 16
Integer variables : 0
Optimizer started.
Problem
Name :
Objective sense : max
Type : CONIC (conic optimization problem)
Constraints : 82400
Cones : 0
Scalar variables : 161700
Matrix variables : 16
Integer variables : 0
Optimizer - threads : 48
Optimizer - solved problem : the primal
Optimizer - Constraints : 82400
Optimizer - Cones : 1
Optimizer - Scalar variables : 80901 conic : 1701
Optimizer - Semi-definite variables: 16 scalarized : 82416
Factor - setup time : 33.27 dense det. time : 0.00
Factor - ML order time : 17.19 GP order time : 0.00
Factor - nonzeros before factor : 2.12e+08 after factor : 2.13e+08
Factor - dense dim. : 2 flops : 7.33e+11
ITE PFEAS DFEAS GFEAS PRSTATUS POBJ DOBJ MU TIME
0 2.1e+01 3.4e+04 1.7e+01 0.00e+00 -1.600000000e+01 0.000000000e+00 1.0e+00 36.00
1 2.1e+01 3.4e+04 1.7e+01 -1.00e+00 -2.430296484e+01 -8.311739646e+00 9.9e-01 44.99
2 1.9e+01 3.1e+04 1.6e+01 -1.00e+00 -1.653397121e+02 -1.494223366e+02 9.2e-01 53.33
3 1.4e+01 2.3e+04 1.4e+01 -1.00e+00 -1.051415716e+03 -1.035872788e+03 6.9e-01 61.89
4 1.0e+01 1.6e+04 1.2e+01 -1.00e+00 -2.656473165e+03 -2.641572340e+03 4.8e-01 70.19
5 8.1e+00 1.3e+04 1.1e+01 -1.00e+00 -3.974175115e+03 -3.959775704e+03 3.8e-01 78.56
6 5.8e+00 9.3e+03 8.9e+00 -1.00e+00 -6.730311201e+03 -6.716959570e+03 2.7e-01 87.08
7 5.6e+00 9.0e+03 8.8e+00 -9.93e-01 -6.792104770e+03 -6.778877617e+03 2.7e-01 95.36
8 4.5e+00 7.3e+03 7.8e+00 -9.83e-01 -9.349518867e+03 -9.337228464e+03 2.1e-01 103.67
9 1.4e+00 2.3e+03 4.3e+00 -9.84e-01 -3.456685852e+04 -3.456397106e+04 6.9e-02 112.48
10 1.9e-01 3.0e+02 1.3e+00 -9.10e-01 -2.281337375e+05 -2.281968304e+05 8.9e-03 120.95
11 5.8e-02 9.3e+01 4.7e-01 -5.03e-01 -4.831979816e+05 -4.832892742e+05 2.7e-03 129.40
12 2.7e-02 4.4e+01 1.9e-01 3.28e-02 -6.492285317e+05 -6.492947739e+05 1.3e-03 137.73
13 4.4e-03 7.1e+00 1.5e-02 4.05e-01 -8.744337093e+05 -8.744508916e+05 2.1e-04 146.47
14 3.3e-05 5.4e-02 1.2e-05 8.88e-01 -9.347712910e+05 -9.347714681e+05 1.6e-06 154.90
15 5.7e-06 9.2e-03 8.1e-07 1.00e+00 -9.355771811e+05 -9.355772104e+05 2.7e-07 163.68
16 1.5e-06 2.4e-03 1.1e-07 1.00e+00 -9.359276453e+05 -9.359276527e+05 7.1e-08 172.01
17 5.7e-07 9.3e-04 2.5e-08 1.00e+00 -9.359979422e+05 -9.359979450e+05 2.7e-08 180.27
18 2.9e-08 4.7e-05 2.9e-10 1.00e+00 -9.360293509e+05 -9.360293510e+05 1.4e-09 189.12
19 1.8e-09 3.0e-06 4.6e-12 1.00e+00 -9.360301087e+05 -9.360301088e+05 8.8e-11 197.52
20 1.1e-10 7.2e-08 5.1e-14 1.00e+00 -9.360301495e+05 -9.360301495e+05 2.1e-12 206.41
Optimizer terminated. Time: 206.63
Interior-point solution summary
Problem status : PRIMAL_AND_DUAL_FEASIBLE
Solution status : OPTIMAL
Primal. obj: -9.3603014948e+05 nrm: 7e+02 Viol. con: 7e-07 var: 4e-05 barvar: 0e+00
Dual. obj: -9.3603014948e+05 nrm: 2e+03 Viol. con: 0e+00 var: 1e-08 barvar: 9e-10
-------------------------------------------------------------------------------
Summary
-------------------------------------------------------------------------------
(CVXPY) Feb 12 10:59:02 PM: Problem status: optimal
(CVXPY) Feb 12 10:59:02 PM: Optimal value: -9.307e+05
(CVXPY) Feb 12 10:59:02 PM: Compilation took 2.129e+03 seconds
(CVXPY) Feb 12 10:59:02 PM: Solver (including time spent in interface) took 2.492e+02 seconds
###Markdown
Step 2: Solve LP for assignment
###Code
model = gp.Model('cons_kmeans')
bf = b.value
p = []
for i in range(N):
for a in range(k):
p.append(model.addVar(vtype=GRB.BINARY,name="occupancy"))
p = np.stack(p).reshape(N,k)
r=r2
model.setObjective(sum([p[i,a]*bf[i,a] for i in range(N) for a in range(k)]),GRB.MAXIMIZE)
model.addConstrs(p[:,a] @ np.ones(N) <= card_upper for a in range(k))
model.addConstrs(p[:,a] @ np.ones(N) >= 1 for a in range(k))
model.addConstrs(p[i,:] @ np.ones(k) == 1 for i in range(N));
model.addConstrs(p[i,a]*d[i,j]*p[j,a] <= 4*r**2 for i in range(N) for j in range(N) for a in range(k));
model.optimize()
###Output
Academic license - for non-commercial use only - expires 2022-10-04
Using license file /home/ah73/gurobi.lic
Gurobi Optimizer version 9.1.1 build v9.1.1rc0 (linux64)
Thread count: 48 physical cores, 96 logical processors, using up to 32 threads
Optimize a model with 132 rows, 1600 columns and 4800 nonzeros
Model fingerprint: 0x5fa54c61
Model has 160000 quadratic constraints
Variable types: 0 continuous, 1600 integer (1600 binary)
Coefficient statistics:
Matrix range [1e+00, 1e+00]
QMatrix range [9e-06, 1e+01]
Objective range [9e-01, 9e-01]
Bounds range [1e+00, 1e+00]
RHS range [1e+00, 1e+02]
QRHS range [1e-01, 1e-01]
Presolve removed 16 rows and 0 columns
Presolve time: 2.53s
Presolved: 134900 rows, 68992 columns, 272768 nonzeros
Variable types: 0 continuous, 68992 integer (68992 binary)
Found heuristic solution: objective -87.5006016
Root simplex log...
Iteration Objective Primal Inf. Dual Inf. Time
6000 8.7505622e+01 2.083062e+02 0.000000e+00 5s
13025 8.7499587e+01 0.000000e+00 0.000000e+00 7s
Root relaxation: objective 8.749959e+01, 13025 iterations, 4.20 seconds
Total elapsed time = 7.40s
Nodes | Current Node | Objective Bounds | Work
Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time
0 0 -87.49959 0 528 -87.50060 -87.49959 0.00% - 7s
Explored 1 nodes (13025 simplex iterations) in 7.59 seconds
Thread count was 32 (of 96 available processors)
Solution count 1: -87.5006
Optimal solution found (tolerance 1.00e-04)
Best objective -8.750060164422e+01, best bound -8.749958692810e+01, gap 0.0012%
###Markdown
Step 3: Get partitions and centroids
###Code
labels = np.argmax(np.stack([[n.x for n in each] for each in p]),axis=1)
labels = labelsort(x,labels)
centroids = np.stack([np.mean(x[labels==each],0)for each in np.unique(labels)])
ax = plot_partition_2D(x,centroids)
ax.set_xlim([-1.5,2.5])
ax.set_ylim([-.6,1.2])
ax.set_xticks([])
ax.set_yticks([])
plt.show()
###Output
_____no_output_____
###Markdown
Step 4: Minimize intra-cluster distance
###Code
model = gp.Model('cons_kmeans')
p = []
for i in range(N):
for a in range(k):
p.append(model.addVar(vtype=GRB.BINARY,name="occupancy"))
p = np.stack(p).reshape(N,k)
r=r2
model.setObjective(
sum(
[p[i,a]*np.linalg.norm(
x[i]-centroids[a]
) for i in range(N) for a in range(k)]
),GRB.MINIMIZE)
model.addConstrs(p[:,a] @ np.ones(N) <= card_upper for a in range(k))
model.addConstrs(p[:,a] @ np.ones(N) >= 1 for a in range(k))
model.addConstrs(p[i,:] @ np.ones(k) == 1 for i in range(N));
model.addConstrs(p[i,a]*d[i,j]*p[j,a] <= 4*r**2 for i in range(N) for j in range(N) for a in range(k));
model.optimize()
###Output
Gurobi Optimizer version 9.1.1 build v9.1.1rc0 (linux64)
Thread count: 48 physical cores, 96 logical processors, using up to 32 threads
Optimize a model with 132 rows, 1600 columns and 4800 nonzeros
Model fingerprint: 0x57cc53e3
Model has 160000 quadratic constraints
Variable types: 0 continuous, 1600 integer (1600 binary)
Coefficient statistics:
Matrix range [1e+00, 1e+00]
QMatrix range [9e-06, 1e+01]
Objective range [2e-02, 3e+00]
Bounds range [1e+00, 1e+00]
RHS range [1e+00, 1e+02]
QRHS range [1e-01, 1e-01]
Presolve removed 16 rows and 0 columns
Presolve time: 2.03s
Presolved: 134900 rows, 68992 columns, 272768 nonzeros
Variable types: 0 continuous, 68992 integer (68992 binary)
Found heuristic solution: objective 106.6289710
Root relaxation: objective 1.123025e+01, 125 iterations, 0.00 seconds
Nodes | Current Node | Objective Bounds | Work
Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time
* 0 0 0 11.2302460 11.23025 0.00% - 2s
Explored 1 nodes (125 simplex iterations) in 2.48 seconds
Thread count was 32 (of 96 available processors)
Solution count 2: 11.2302 106.629
Optimal solution found (tolerance 1.00e-04)
Best objective 1.123024597905e+01, best bound 1.123024597905e+01, gap 0.0000%
###Markdown
Plot
###Code
labels = np.argmax(np.stack([[n.x for n in each] for each in p]),axis=1)
labels = labelsort(x,labels)
centroids = np.stack([np.mean(x[labels==each],0)for each in np.unique(labels)])
ax = plot_partition_2D(x,centroids,grid_size=2000)
ax.set_xlim([-1.5,2.5])
ax.set_ylim([-.6,1.2])
ax.set_xticks([])
ax.set_yticks([])
plt.show()
partition_radius(x,centroids)
k_radius(x,centroids)
###Output
_____no_output_____
###Markdown
Baseline Methods
###Code
from sklearn.cluster import KMeans
from k_means_constrained import KMeansConstrained as KCardi
from sklearn_extra.cluster import KMedoids
from kcenter import KCenter
for KMethod in [KMeans,KMedoids,KCenter]:
## specify k-medoids++ as KMedoids init for paper reported results
kmethod = KMethod(n_clusters=16, random_state=0)
kmethod.fit(x)
labels = kmethod.labels_
labels = labelsort(x,labels)
centroids = np.stack([np.mean(x[labels==each],0)for each in np.unique(labels)])
ax = plot_partition_2D(x,centroids)
ax.set_xlim([-1.5,2.5])
ax.set_ylim([-.6,1.2])
ax.set_xticks([])
ax.set_yticks([])
plt.show()
size_min = 1
for size_max in [7,10,N]:
kmeans = KCardi(n_clusters=16,
size_max=size_max,
size_min = size_min,
).fit(x)
labels = kmeans.labels_
labels = labelsort(x,labels)
centroids = np.stack([np.mean(x[labels==each],0)for each in np.unique(labels)])
ax = plot_partition_2D(x,centroids)
ax.set_xlim([-1.5,2.5])
ax.set_ylim([-.6,1.2])
ax.set_xticks([])
ax.set_yticks([])
plt.show()
###Output
_____no_output_____ |
SLength/SLength.ipynb | ###Markdown
0. Imports
###Code
import pandas as pd
import numpy as np
import math
###Output
_____no_output_____
###Markdown
1. Load csv
###Code
df = pd.read_csv('/content/drive/MyDrive/Škola/WM/SLength/log_jeden_den.csv', ';', usecols=range(1,15))
df.tail()
###Output
_____no_output_____
###Markdown
2. Identify sessions based on estimation - 10 minutes (SLength)
###Code
df['SLength'] = ''
count = 1
time = 0
C = 600 # seconds
for i, row in df.iterrows():
if (i != 0):
if (df.at[i,'userid'] == df.at[i-1,'userid']):
if not (math.isnan(df.at[i-1, 'length'])):
time += df.at[i-1, 'length'].astype(int)
else:
count += 1
time = 0
if (time > C):
count += 1
time = 0
else:
count += 1
time = 0
df.at[i, 'SLength'] = count
df.tail()
###Output
_____no_output_____
###Markdown
3. Save to csv
###Code
df.to_csv('Laca_slength.csv', sep=';')
###Output
_____no_output_____ |
nbs/80_timeseries_data.ipynb | ###Markdown
Timeseries Data> Basic functions to read timeseries files like `.arff` and `.ts` files.
###Code
#export
class TSData():
"Class that loads .arff (soon .ts) files and returns a tuple (data.x , self.y)"
"self.x is a list of 2D array with a shape (n_samples, nb_channels, sequence_length) "
"self.y is a 1D array as y (i.e. label) with a shape (n_samples)"
"for the NATOPS_Train.arff file, the result will be : x(180, 24, 51) and y(180)"
# def __init__(self):
# self.x = self.y = self.dsname = self.fnames = [],[],[],[]
def __init__(self, fnames, has_targets=True, fill_missing='NaN'):
# self.x = self.y = self.dsname = [],[],[]
self.x = []
self.y = []
self.dsname = []
self.fnames = fnames
self.has_targets = has_targets
self.fill_missings = fill_missing
def __repr__(self): return f"{self.__class__.__name__}:\n Datasets names (concatenated): {self.dsname}\n Filenames: {self.fnames}\n Data shape: {self.x.shape}\n Targets shape: {self.y.shape}\n Nb Samples: {self.x.shape[0]}\n Nb Channels: {self.x.shape[1]}\n Sequence Length: {self.x.shape[2]}"
def get_x(self, as_list=True): return(list(self.x))
def get_y(self): return(self.y)
def get_items(self): return [(item, str(label)) for (item, label) in zip(list(self.x), self.y)]
def get_lists(self): return (list(self.x), self.y)
def __getitem__(self, i): return (self.x[i], str(self.y[i]))
def get_nb_samples(self): return self.x.shape[0]
def sample(self, cut):
n=self.x.shape[0]
rand_idx = L(int(i) for i in torch.randperm(n))
idxs = rand_idx[:cut]
return [(self.x[i], str(self.y[i])) for i in idxs]
@property
def sizes(self): return (self.x.shape, self.y.shape)
@property
def n_channels(self): return (self.x.shape[1])
def _load_arff(self, fname, has_targets=True, fill_missing='NaN'):
"load an .arff file and return a tupple of 2 numpy arrays: "
"x : array with a shape (n_samples, nb_channels, sequence_length)"
"y : array with a shape (n_samples)"
"for the NATOPS_Train.arff the result will be : x(180, 24, 51) and y(180)"
instance_list = []
class_val_list = []
data_started = False
is_multi_variate = False
is_first_case = True
with open(fname, 'r', encoding="utf8") as f:
for line in f:
if line.strip():
if is_multi_variate is False and "@attribute" in line.lower() and "relational" in line.lower():
is_multi_variate = True
if "@data" in line.lower():
data_started = True
continue
# if the 'data tag has been found, the header information has been cleared and now data can be loaded
if data_started:
line = line.replace("?", fill_missing)
if is_multi_variate:
if has_targets:
line, class_val = line.split("',")
class_val_list.append(class_val.strip())
dimensions = line.split("\\n")
dimensions[0] = dimensions[0].replace("'", "")
if is_first_case:
for d in range(len(dimensions)):
instance_list.append([])
is_first_case = False
for dim in range(len(dimensions)):
instance_list[dim].append(np.array(dimensions[dim].split(','), dtype=np.float32))
# instance_list[dim].append(np.fromiter(dimensions[dim].split(','), dtype=np.float32))
else:
if is_first_case:
instance_list.append([])
is_first_case = False
line_parts = line.split(",")
if has_targets:
instance_list[0].append(np.array(line_parts[:len(line_parts)-1], dtype=np.float32))
class_val_list.append(line_parts[-1].strip())
else:
instance_list[0].append(np.array(line_parts[:len(line_parts)-1], dtype=np.float32))
#instance_list has a shape of (dimensions, nb_samples, seq_lenght)
#for the NATOPS_Train.arff it would be (24, 180, 51)
#convert python list to numpy array and transpose the 2 first dimensions -> (180, 24, 51)
x = np.asarray(instance_list).transpose(1,0,2)
if has_targets:
y = np.asarray(class_val_list)
return x, y
else:
return x, [None*x.shape[0]]
@classmethod
def from_arff(self, fnames, has_targets=True, fill_missing='NaN'):
"load an .arff file and return a tupple of 2 numpy arrays: "
"x : array with a shape (n_samples, nb_channels, sequence_length)"
"y : array with a shape (n_samples)"
"for the NATOPS_Train.arff the result will be : x(180, 24, 51) and y(180)"
data = self(fnames, has_targets=has_targets, fill_missing=fill_missing)
if isinstance(fnames, list):
data.x = []
data.y = []
data.dsname = []
data.fnames = []
xs,ys = [],[]
for i, fn in enumerate(fnames):
x,y = data._load_arff(fn, has_targets=has_targets, fill_missing=fill_missing)
xs.append(x)
ys.append(y)
data.fnames.append(fn)
data.dsname.append(fn.stem)
data.x = np.concatenate(xs)
data.y = np.concatenate(ys)
else:
data.fnames.append(fnames)
data.dsname.append(fnames.stem)
data.x, data.y = data._load(fnames, has_targets=has_targets, fill_missing=fill_missing)
return data
# add_docs(TSData,
# from_arff="read one or serveral arff files and concatenate them, and returns a TSData object")
_docs=dict(
from_arff="read one or serveral arff files and concatenate them, and returns a TSData object",
get_items="return list of tuples. Each tuple corresponds to a timeserie (nump.ndarray) and a label (string)",
get_x="return list of timeseries (no labels)",
get_y="return list of labels corresponding to each timeserie",
sizes="return timeseries shape and labels shape (labels list size)",
n_channels="return timeserie's number of channels. For `arff` files it is called `dimension`. In the case of NATOPS_Train.arff, it returns 24")
show_doc(TSData.from_arff)
show_doc(TSData.get_items)
show_doc(TSData.n_channels)
#export
def get_ts_items(fnames):
'get_ts_items return list of tuples. Each tuple corresponds to a timeserie (nump.ndarray) and a label (string)'
data = TSData.from_arff(fnames)
return data.get_items()
show_doc(get_ts_items)
show_doc(get_ts_items)
# hide
def check_ext(fnames, ext):
if isinstance(fnames, list):
fnames = [fn if (fn.suffix!='') else f'{fn}.{ext}' for fn in fnames]
else:
fnames = fnames if (fnames.suffix!='') else f'{fnames}.{ext}'
###Output
_____no_output_____
###Markdown
Plot Timeseries
###Code
#export
def show_timeseries(ts, ctx=None, title=None, chs=None, leg=True, figsize=None, linewidth=3, linestyle='solid', color='orange', **kwargs):
"""
Plot a timeseries.
Args:
title : usually the class of the timeseries
ts : timeseries. It should have a shape of (nb_channels, sequence_length)
chs : array representing a list of channels to plot
leg : Display or not a legend
"""
fig = None
if ctx is None: fig, ctx = plt.subplots(figsize=figsize, **kwargs)
n_channels = ts.shape[0]
t = range(ts.shape[1])
chs_max = max(chs) if chs else 0
channels = chs if (chs and (chs_max < ts.shape[0])) else range(ts.shape[0])
for ch in channels:
if n_channels==1:
ctx.plot(t, ts[ch], label='ch'+str(ch), linewidth=linewidth, color=color, linestyle=linestyle)
else:
ctx.plot(t, ts[ch], label='ch'+str(ch), linewidth=linewidth, linestyle=linestyle)
if leg: ctx.legend(loc='upper right', ncol=2, framealpha=0.5)
if title: ctx.set_title(title)
return fig
# return ctx
show_doc(show_timeseries)
path_data = Config().data
path_data
# export
def file_extract_at_filename(fname, dest):
"Extract `fname` to `dest`/`fname`.name folder using `tarfile` or `zipfile"
dest = Path(dest)/Path(fname).with_suffix('').name
# tarfile.open(fname, 'r:gz').extractall(dest)
fname = str(fname)
if fname.endswith('gz'): tarfile.open(fname, 'r:gz').extractall(dest)
elif fname.endswith('zip'): zipfile.ZipFile(fname ).extractall(dest)
else: raise Exception(f'Unrecognized archive: {fname}')
###Output
_____no_output_____
###Markdown
`file_extract_at_filename` is used by default in `unzip_data` to decompress the downloaded file in a folder that has the same name as the zip filename.
###Code
# export
def unzip_data(url, fname=None, dest=None, c_key='data', force_download=False):
"Download `url` to `fname` if `dest` doesn't exist, and un-compress to `dest`/`fname`.name folder ."
return untar_data(url, fname=fname, c_key=c_key, force_download=force_download, extract_func=file_extract_at_filename)
###Output
_____no_output_____
###Markdown
`unzip_data` download and decompress the downloaded file in a folder and decompress it in a folder that has the same name as the zip filename
###Code
show_doc(unzip_data)
# export
class URLs_TS():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
URL = 'http://www.timeseriesclassification.com/Downloads/'
# UCR multivariate datasets - Current Naming
ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip'
ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip'
BASIC_MOTIONS = f'{URL}BasicMotions.zip'
CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip'
CRICKET = f'{URL}Cricket.zip'
DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip'
EIGEN_WORMS = f'{URL}EigenWorms.zip'
EPILEPSY = f'{URL}Epilepsy.zip'
ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip'
ERING = f'{URL}ERing.zip'
FACE_DETECTION = f'{URL}FaceDetection.zip'
FINGER_MOVEMENTS = f'{URL}FingerMovements.zip'
HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip'
HANDWRITING = f'{URL}Handwriting.zip'
HEARTBEAT = f'{URL}Heartbeat.zip'
JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip'
LIBRAS = f'{URL}Libras.zip'
LSST = f'{URL}LSST.zip'
INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip'
MOTOR_IMAGERY = f'{URL}MotorImagery.zip'
NATOPS = f'{URL}NATOPS.zip'
PEN_DIGITS = f'{URL}PenDigits.zip'
PEMS_SF = f'{URL}PEMS-SF.zip'
PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip'
RACKET_SPORTS = f'{URL}RacketSports.zip'
SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip'
SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip'
SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip'
STAND_WALK_JUMP = f'{URL}StandWalkJump.zip'
UWAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary.zip'
# UCR multivariate datasets - New Naming
# MULTI_ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip'
# MULTI_ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip'
# MULTI_BASIC_MOTIONS = f'{URL}BasicMotions.zip'
# MULTI_CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip'
# MULTI_CRICKET = f'{URL}Cricket.zip'
# MULTI_DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip'
# MULTI_EIGEN_WORMS = f'{URL}EigenWorms.zip'
# MULTI_EPILEPSY = f'{URL}Epilepsy.zip'
# MULTI_ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip'
# MULTI_ERING = f'{URL}ERing.zip'
# MULTI_FACE_DETECTION = f'{URL}FaceDetection.zip'
# MULTI_FINGER_MOVEMENTS = f'{URL}FingerMovements.zip'
# MULTI_HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip'
# MULTI_HANDWRITING = f'{URL}Handwriting.zip'
# MULTI_HEARTBEAT = f'{URL}Heartbeat.zip'
# MULTI_JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip'
# MULTI_LIBRAS = f'{URL}Libras.zip'
# MULTI_LSST = f'{URL}LSST.zip'
# MULTI_INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip'
# MULTI_MOTOR_IMAGERY = f'{URL}MotorImagery.zip'
# MULTI_NATOPS = f'{URL}NATOPS.zip'
# MULTI_PEN_DIGITS = f'{URL}PenDigits.zip'
# MULTI_PEMS_SF = f'{URL}PEMS-SF.zip'
# MULTI_PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip'
# MULTI_RACKET_SPORTS = f'{URL}RacketSports.zip'
# MULTI_SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip'
# MULTI_SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip'
# MULTI_SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip'
# MULTI_STAND_WALK_JUMP = f'{URL}StandWalkJump.zip'
# MULTI_U_WAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary'
# UCR univariate datasets
UNI_ACSF1 = f'{URL}ACSF1.zip'
UNI_ADIAC = f'{URL}Adiac.zip'
UNI_ALL_GESTURE_WIIMOTE_X = f'{URL}AllGestureWiimoteX.zip'
UNI_ALL_GESTURE_WIIMOTE_Y = f'{URL}AllGestureWiimoteY.zip'
UNI_ALL_GESTURE_WIIMOTE_Z = f'{URL}AllGestureWiimoteZ.zip'
UNI_ARROW_HEAD = f'{URL}ArrowHead.zip'
UNI_BEEF = f'{URL}Beef.zip'
UNI_BEETLE_FLY = f'{URL}BeetleFly.zip'
UNI_BIRD_CHICKEN = f'{URL}BirdChicken.zip'
UNI_BME = f'{URL}BME.zip'
UNI_CAR = f'{URL}Car.zip'
UNI_CBF = f'{URL}CBF.zip'
UNI_CHINATOWN = f'{URL}Chinatown.zip'
UNI_CHLORINE_CONCENTRATION = f'{URL}ChlorineConcentration.zip'
UNI_CIN_CEC_GTORSO = f'{URL}CinCECGtorso.zip'
UNI_COFFEE = f'{URL}Coffee.zip'
UNI_COMPUTERS = f'{URL}Computers.zip'
UNI_CRICKET_X = f'{URL}CricketX.zip'
UNI_CRICKET_Y = f'{URL}CricketY.zip'
UNI_CRICKET_Z = f'{URL}CricketZ.zip'
UNI_CROP = f'{URL}Crop.zip'
UNI_DIATOM_SIZE_REDUCTION = f'{URL}DiatomSizeReduction.zip'
UNI_DISTAL_PHALANX_OUTLINE_AGE_GROUP= f'{URL}DistalPhalanxOutlineAgeGroup.zip'
UNI_DISTAL_PHALANX_OUTLINE_CORRECT = f'{URL}DistalPhalanxOutlineCorrect.zip'
UNI_DISTAL_PHALANX_TW = f'{URL}DistalPhalanxTW.zip'
UNI_DODGER_LOOP_DAY = f'{URL}DodgerLoopDay.zip'
UNI_DODGER_LOOP_GAME = f'{URL}DodgerLoopGame.zip'
UNI_DODGER_LOOP_WEEKEND = f'{URL}DodgerLoopWeekend.zip'
UNI_EARTHQUAKES = f'{URL}Earthquakes.zip'
UNI_ECG200 = f'{URL}ECG200.zip'
UNI_ECG5000 = f'{URL}ECG5000.zip'
UNI_ECG_FIVE_DAYS = f'{URL}ECGFiveDays.zip'
UNI_ELECTRIC_DEVICES = f'{URL}ElectricDevices.zip'
UNI_EOG_HORIZONTAL_SIGNAL = f'{URL}EOGHorizontalSignal.zip'
UNI_EOG_VERTICAL_SIGNAL = f'{URL}EOGVerticalSignal.zip'
UNI_ETHANOL_LEVEL = f'{URL}EthanolLevel.zip'
UNI_FACE_ALL = f'{URL}FaceAll.zip'
UNI_FACE_FOUR = f'{URL}FaceFour.zip'
UNI_FACES_UCR = f'{URL}FacesUCR.zip'
UNI_FIFTY_WORDS = f'{URL}FiftyWords.zip'
UNI_FISH = f'{URL}Fish.zip'
UNI_FORD_A = f'{URL}FordA.zip'
UNI_FORD_B = f'{URL}FordB.zip'
UNI_FREEZER_REGULAR_TRAIN = f'{URL}FreezerRegularTrain.zip'
UNI_FREEZER_SMALL_TRAIN = f'{URL}FreezerSmallTrain.zip'
UNI_FUNGI = f'{URL}Fungi.zip'
UNI_GESTURE_MID_AIR_D1 = f'{URL}GestureMidAirD1.zip'
UNI_GESTURE_MID_AIR_D2 = f'{URL}GestureMidAirD2.zip'
UNI_GESTURE_MID_AIR_D3 = f'{URL}GestureMidAirD3.zip'
UNI_GESTURE_PEBBLE_Z1 = f'{URL}GesturePebbleZ1.zip'
UNI_GESTURE_PEBBLE_Z2 = f'{URL}GesturePebbleZ2.zip'
UNI_GUN_POINT = f'{URL}GunPoint.zip'
UNI_GUN_POINT_AGE_SPAN = f'{URL}GunPointAgeSpan.zip'
UNI_GUN_POINT_MALE_VERSUS_FEMALE = f'{URL}GunPointMaleVersusFemale.zip'
UNI_GUN_POINT_OLD_VERSUS_YOUNG = f'{URL}GunPointOldVersusYoung.zip'
UNI_HAM = f'{URL}Ham.zip'
UNI_HAND_OUTLINES = f'{URL}HandOutlines.zip'
UNI_HAPTICS = f'{URL}Haptics.zip'
UNI_HERRING = f'{URL}Herring.zip'
UNI_HOUSE_TWENTY = f'{URL}HouseTwenty.zip'
UNI_INLINE_SKATE = f'{URL}InlineSkate.zip'
UNI_INSECT_EPG_REGULAR_TRAIN = f'{URL}InsectEPGRegularTrain.zip'
UNI_INSECT_EPG_SMALL_TRAIN = f'{URL}InsectEPGSmallTrain.zip'
UNI_INSECT_WINGBEAT_SOUND = f'{URL}InsectWingbeatSound.zip'
UNI_ITALY_POWER_DEMAND = f'{URL}ItalyPowerDemand.zip'
UNI_LARGE_KITCHEN_APPLIANCES = f'{URL}LargeKitchenAppliances.zip'
UNI_LIGHTNING2 = f'{URL}Lightning2.zip'
UNI_LIGHTNING7 = f'{URL}Lightning7.zip'
UNI_MALLAT = f'{URL}Mallat.zip'
UNI_MEAT = f'{URL}Meat.zip'
UNI_MEDICAL_IMAGES = f'{URL}MedicalImages.zip'
UNI_MELBOURNE_PEDESTRIAN = f'{URL}MelbournePedestrian.zip'
UNI_MIDDLE_PHALANX_OUTLINE_AGE_GROUP= f'{URL}MiddlePhalanxOutlineAgeGroup.zip'
UNI_MIDDLE_PHALANX_OUTLINE_CORRECT = f'{URL}MiddlePhalanxOutlineCorrect.zip'
UNI_MIDDLE_PHALANX_TW = f'{URL}MiddlePhalanxTW.zip'
UNI_MIXED_SHAPES = f'{URL}MixedShapes.zip'
UNI_MIXED_SHAPES_SMALL_TRAIN = f'{URL}MixedShapesSmallTrain.zip'
UNI_MOTE_STRAIN = f'{URL}MoteStrain.zip'
UNI_NON_INVASIVE_FETAL_ECG_THORAX1 = f'{URL}NonInvasiveFetalECGThorax1.zip'
UNI_NON_INVASIVE_FETAL_ECG_THORAX2 = f'{URL}NonInvasiveFetalECGThorax2.zip'
UNI_OLIVE_OIL = f'{URL}OliveOil.zip'
UNI_OSU_LEAF = f'{URL}OSULeaf.zip'
UNI_PHALANGES_OUTLINES_CORRECT = f'{URL}PhalangesOutlinesCorrect.zip'
UNI_PHONEME = f'{URL}Phoneme.zip'
UNI_PICKUP_GESTURE_WIIMOTE_Z = f'{URL}PickupGestureWiimoteZ.zip'
UNI_PIG_AIRWAY_PRESSURE = f'{URL}PigAirwayPressure.zip'
UNI_PIG_ART_PRESSURE = f'{URL}PigArtPressure.zip'
UNI_PIG_CVP = f'{URL}PigCVP.zip'
UNI_PLAID = f'{URL}PLAID.zip'
UNI_PLANE = f'{URL}Plane.zip'
UNI_POWER_CONS = f'{URL}PowerCons.zip'
UNI_PROXIMAL_PHALANX_OUTLINE_AGE_GROUP= f'{URL}ProximalPhalanxOutlineAgeGroup.zip'
UNI_PROXIMAL_PHALANX_OUTLINE_CORRECT= f'{URL}ProximalPhalanxOutlineCorrect.zip'
UNI_PROXIMAL_PHALANX_TW = f'{URL}ProximalPhalanxTW.zip'
UNI_REFRIGERATION_DEVICES = f'{URL}RefrigerationDevices.zip'
UNI_ROCK = f'{URL}Rock.zip'
UNI_SCREEN_TYPE = f'{URL}ScreenType.zip'
UNI_SEMG_HAND_GENDER_CH2 = f'{URL}SemgHandGenderCh2.zip'
UNI_SEMG_HAND_MOVEMENT_CH2 = f'{URL}SemgHandMovementCh2.zip'
UNI_SEMG_HAND_SUBJECT_CH2 = f'{URL}SemgHandSubjectCh2.zip'
UNI_SHAKE_GESTURE_WIIMOTE_Z = f'{URL}ShakeGestureWiimoteZ.zip'
UNI_SHAPELET_SIM = f'{URL}ShapeletSim.zip'
UNI_SHAPES_ALL = f'{URL}ShapesAll.zip'
UNI_SMALL_KITCHEN_APPLIANCES = f'{URL}SmallKitchenAppliances.zip'
UNI_SMOOTH_SUBSPACE = f'{URL}SmoothSubspace.zip'
UNI_SONY_AIBO_ROBOT_SURFACE1 = f'{URL}SonyAIBORobotSurface1.zip'
UNI_SONY_AIBO_ROBOT_SURFACE2 = f'{URL}SonyAIBORobotSurface2.zip'
UNI_STARLIGHT_CURVES = f'{URL}StarLightCurves.zip'
UNI_STRAWBERRY = f'{URL}Strawberry.zip'
UNI_SWEDISH_LEAF = f'{URL}SwedishLeaf.zip'
UNI_SYMBOLS = f'{URL}Symbols.zip'
UNI_SYNTHETIC_CONTROL = f'{URL}SyntheticControl.zip'
UNI_TOE_SEGMENTATION1 = f'{URL}ToeSegmentation1.zip'
UNI_TOE_SEGMENTATION2 = f'{URL}ToeSegmentation2.zip'
UNI_TRACE = f'{URL}Trace.zip'
UNI_TWO_LEAD_ECG = f'{URL}TwoLeadECG.zip'
UNI_TWO_PATTERNS = f'{URL}TwoPatterns.zip'
UNI_UMD = f'{URL}UMD.zip'
UNI_U_WAVE_GESTURE_LIBRARY_ALL = f'{URL}UWaveGestureLibraryAll.zip'
UNI_U_WAVE_GESTURE_LIBRARY_X = f'{URL}UWaveGestureLibraryX.zip'
UNI_U_WAVE_GESTURE_LIBRARY_Y = f'{URL}UWaveGestureLibraryY.zip'
UNI_U_WAVE_GESTURE_LIBRARY_Z = f'{URL}UWaveGestureLibraryZ.zip'
UNI_WAFER = f'{URL}Wafer.zip'
UNI_WINE = f'{URL}Wine.zip'
UNI_WORD_SYNONYMS = f'{URL}WordSynonyms.zip'
UNI_WORMS = f'{URL}Worms.zip'
UNI_WORMS_TWO_CLASS = f'{URL}WormsTwoClass.zip'
UNI_YOGA = f'{URL}Yoga.zip'
def path(url='.', c_key='archive'):
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key=='models' else 'data')/fname
if local_path.exists(): return local_path
return Config()[c_key]/fname
dsname = 'NATOPS' #'NATOPS', 'LSST', 'Wine', 'Epilepsy', 'HandMovementDirection'
# dsname = 'ECG200' #'NATOPS', 'LSST', 'Wine', 'Epilepsy', 'HandMovementDirection'
path = unzip_data(URLs_TS.NATOPS)
# path = unzip_data(URLs_TS.UNI_ECG200)
path
path.ls()
fname_train = f'{dsname}_TRAIN.arff'
fname_test = f'{dsname}_TEST.arff'
fnames = [path/fname_train, path/fname_test]
fnames
data = TSData.from_arff(fnames)
data
print(data)
data.dsname, data.fnames, data.n_channels, data.sizes, data.x.shape, data.y.shape
test_eq(data.dsname, ['NATOPS_TRAIN', 'NATOPS_TEST'])
test_eq(data.n_channels, 24)
test_eq(data.sizes, ((360, 24, 51), (360,)))
test_eq(data.x.shape, (360, 24, 51))
test_eq(data.y.shape, (360,))
type(data.get_items()[1][0]), data.get_items()[1][0]
type(data.get_y()[1]), data.get_y()[1]
test_eq(data.get_y()[1], '3.0')
idx = 4
ts, title = data.get_items()[idx]
ts
show_timeseries(ts, title=title, figsize=(8,6), linewidth=3)
# show_timeseries(ts, title=title, figsize=(8,6), linewidth=4, color='orange', linestyle='dotted')
# show_timeseries(ts, title=title, chs=range(0,24,3))
# hide
def load_from_tsfile_to_array(full_file_path_and_name, return_separate_X_and_y=True, replace_missing_vals_with='NaN'):
"""Loads data from a .ts file into a Pandas DataFrame.
Parameters
full_file_path_and_name: str
The full pathname of the .ts file to read.
return_separate_X_and_y: bool
true if X and Y values should be returned as separate Data Frames (X) and a numpy array (y), false otherwise.
This is only relevant for data that
replace_missing_vals_with: str
The value that missing values in the text file should be replaced with prior to parsing.
Returns
DataFrame, ndarray
If return_separate_X_and_y then a tuple containing a DataFrame and a numpy array containing the relevant time-series and corresponding class values.
DataFrame
If not return_separate_X_and_y then a single DataFrame containing all time-series and (if relevant) a column "class_vals" the associated class values.
"""
# Initialize flags and variables used when parsing the file
metadata_started = False
data_started = False
has_problem_name_tag = False
has_timestamps_tag = False
has_univariate_tag = False
has_class_labels_tag = False
has_data_tag = False
previous_timestamp_was_int = None
previous_timestamp_was_timestamp = None
num_dimensions = None
is_first_case = True
instance_list = []
class_val_list = []
line_num = 0
# Parse the file
# print(full_file_path_and_name)
with open(full_file_path_and_name, 'r',encoding='utf-8') as file:
for line in file:
# Strip white space from start/end of line and change to lowercase for use below
line = line.strip().lower()
# Empty lines are valid at any point in a file
if line:
# Check if this line contains metadata
# Please note that even though metadata is stored in this function it is not currently published externally
if line.startswith("@problemname"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("problemname tag requires an associated value")
problem_name = line[len("@problemname") + 1:]
has_problem_name_tag = True
metadata_started = True
elif line.startswith("@timestamps"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("timestamps tag requires an associated Boolean value")
elif tokens[1] == "true":
timestamps = True
elif tokens[1] == "false":
timestamps = False
else:
raise TsFileParseException("invalid timestamps value")
has_timestamps_tag = True
metadata_started = True
elif line.startswith("@univariate"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("univariate tag requires an associated Boolean value")
elif tokens[1] == "true":
univariate = True
elif tokens[1] == "false":
univariate = False
else:
raise TsFileParseException("invalid univariate value")
has_univariate_tag = True
metadata_started = True
elif line.startswith("@classlabel"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("classlabel tag requires an associated Boolean value")
if tokens[1] == "true":
class_labels = True
elif tokens[1] == "false":
class_labels = False
else:
raise TsFileParseException("invalid classLabel value")
# Check if we have any associated class values
if token_len == 2 and class_labels:
raise TsFileParseException("if the classlabel tag is true then class values must be supplied")
has_class_labels_tag = True
class_label_list = [token.strip() for token in tokens[2:]]
metadata_started = True
# Check if this line contains the start of data
elif line.startswith("@data"):
if line != "@data":
raise TsFileParseException("data tag should not have an associated value")
if data_started and not metadata_started:
raise TsFileParseException("metadata must come before data")
else:
has_data_tag = True
data_started = True
# If the 'data tag has been found then metadata has been parsed and data can be loaded
elif data_started:
# Check that a full set of metadata has been provided
if (not has_problem_name_tag or not has_timestamps_tag or not has_univariate_tag
or not has_class_labels_tag or not has_data_tag):
raise TsFileParseException("a full set of metadata has not been provided before the data")
# Replace any missing values with the value specified
line = line.replace("?", replace_missing_vals_with)
# Check if we dealing with data that has timestamps
if timestamps:
# We're dealing with timestamps so cannot just split line on ':' as timestamps may contain one
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
this_line_num_dimensions = 0
line_len = len(line)
char_num = 0
while char_num < line_len:
# Move through any spaces
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# See if there is any more data to read in or if we should validate that read thus far
if char_num < line_len:
# See if we have an empty dimension (i.e. no values)
if line[char_num] == ":":
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
has_another_value = False
has_another_dimension = True
timestamps_for_dimension = []
values_for_dimension = []
char_num += 1
else:
# Check if we have reached a class label
if line[char_num] != "(" and class_labels:
class_val = line[char_num:].strip()
if class_val not in class_label_list:
raise TsFileParseException("the class value '" + class_val + "' on line " +
str(line_num + 1) + " is not valid")
class_val_list.append(class_val)
char_num = line_len
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
else:
# Read in the data contained within the next tuple
if line[char_num] != "(" and not class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " does not start with a '('")
char_num += 1
tuple_data = ""
while char_num < line_len and line[char_num] != ")":
tuple_data += line[char_num]
char_num += 1
if char_num >= line_len or line[char_num] != ")":
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " does not end with a ')'")
# Read in any spaces immediately after the current tuple
char_num += 1
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# Check if there is another value or dimension to process after this tuple
if char_num >= line_len:
has_another_value = False
has_another_dimension = False
elif line[char_num] == ",":
has_another_value = True
has_another_dimension = False
elif line[char_num] == ":":
has_another_value = False
has_another_dimension = True
char_num += 1
# Get the numeric value for the tuple by reading from the end of the tuple data backwards to the last comma
last_comma_index = tuple_data.rfind(',')
if last_comma_index == -1:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " contains a tuple that has no comma inside of it")
try:
value = tuple_data[last_comma_index + 1:]
value = float(value)
except ValueError:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains a tuple that does not have a valid numeric value")
# Check the type of timestamp that we have
timestamp = tuple_data[0: last_comma_index]
try:
timestamp = int(timestamp)
timestamp_is_int = True
timestamp_is_timestamp = False
except ValueError:
timestamp_is_int = False
if not timestamp_is_int:
try:
timestamp = timestamp.strip()
timestamp_is_timestamp = True
except ValueError:
timestamp_is_timestamp = False
# Make sure that the timestamps in the file (not just this dimension or case) are consistent
if not timestamp_is_timestamp and not timestamp_is_int:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " contains a tuple that has an invalid timestamp '" +
timestamp + "'")
if previous_timestamp_was_int is not None and previous_timestamp_was_int and \
not timestamp_is_int:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains tuples where the timestamp format is inconsistent")
if previous_timestamp_was_timestamp is not None and previous_timestamp_was_timestamp and \
not timestamp_is_timestamp:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains tuples where the timestamp format is inconsistent")
# Store the values
timestamps_for_dimension += [timestamp]
values_for_dimension += [value]
# If this was our first tuple then we store the type of timestamp we had
if previous_timestamp_was_timestamp is None and timestamp_is_timestamp:
previous_timestamp_was_timestamp = True
previous_timestamp_was_int = False
if previous_timestamp_was_int is None and timestamp_is_int:
previous_timestamp_was_timestamp = False
previous_timestamp_was_int = True
# See if we should add the data for this dimension
if not has_another_value:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
if timestamp_is_timestamp:
timestamps_for_dimension = pd.DatetimeIndex(timestamps_for_dimension)
instance_list[this_line_num_dimensions].append(pd.Series(index=timestamps_for_dimension
, data=values_for_dimension))
this_line_num_dimensions += 1
timestamps_for_dimension = []
values_for_dimension = []
elif has_another_value:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not class_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series(dtype=np.float32))
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and not has_another_dimension:
if num_dimensions is None:
num_dimensions = this_line_num_dimensions
if num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(line_num + 1) +
" does not have the same number of dimensions as the previous line of data")
# Check that we are not expecting some more data, and if not, store that processed above
if has_another_value:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not class_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(line_num + 1) +
" does not have the same number of dimensions as the previous line of data")
# Check if we should have class values, and if so that they are contained in those listed in the metadata
if class_labels and len(class_val_list) == 0:
raise TsFileParseException("the cases have no associated class values")
else:
dimensions = line.split(":")
# If first row then note the number of dimensions (that must be the same for all cases)
if is_first_case:
num_dimensions = len(dimensions)
if class_labels:
num_dimensions -= 1
for dim in range(0, num_dimensions):
instance_list.append([])
is_first_case = False
# See how many dimensions that the case whose data in represented in this line has
this_line_num_dimensions = len(dimensions)
if class_labels:
this_line_num_dimensions -= 1
# All dimensions should be included for all series, even if they are empty
if this_line_num_dimensions != num_dimensions:
raise TsFileParseException("inconsistent number of dimensions")
# Process the data for each dimension
for dim in range(0, num_dimensions):
dimension = dimensions[dim].strip()
if dimension:
# data_series = dimension.split(",")
# data_series = [float(i) for i in data_series]
# instance_list[dim].append(pd.Series(data_series))
# instance_list[dim].append(np.array(dimensions[dim].strip().split(','), dtype=np.float32))
instance_list[dim].append(np.array(dimensions[dim].split(','), dtype=np.float32))
# instance_list[dim].append(np.fromiter(dimensions[dim].strip().split(','), dtype=np.float32))
else:
# instance_list[dim].append(pd.Series())
instance_list[dim].append([])
if class_labels:
class_val_list.append(dimensions[num_dimensions].strip())
line_num += 1
# Check that the file was not empty
if line_num:
# Check that the file contained both metadata and data
if metadata_started and not (has_problem_name_tag and has_timestamps_tag and has_univariate_tag and
has_class_labels_tag and has_data_tag):
raise TsFileParseException("metadata incomplete")
elif metadata_started and not data_started:
raise TsFileParseException("file contained metadata but no data")
elif metadata_started and data_started and len(instance_list) == 0:
raise TsFileParseException("file contained metadata but no data")
# # Create a DataFrame from the data parsed above
# data = pd.DataFrame(dtype=np.float32)
# for dim in range(0, num_dimensions):
# data['dim_' + str(dim)] = instance_list[dim]
# # Check if we should return any associated class labels separately
# if class_labels:
# if return_separate_X_and_y:
# return data, np.asarray(class_val_list)
# else:
# data['class_vals'] = pd.Series(class_val_list)
# return data
# else:
# return data
# Create a numpy array
#instance_list has a shape of (dimensions, nb_samples, seq_lenght)
#for the NATOPS_Train.arff it would be (24, 180, 51)
#convert python list to numpy array and traspose the 2 first dimensions -> (180, 24, 51)
data_array = np.asarray(instance_list).transpose(1,0,2)
y = np.asarray(class_val_list)
return data_array, y
else:
raise TsFileParseException("empty file")
fname_train = path_data/f'{dsname}/{dsname}_TRAIN.ts'
fname_train
train_x_ts, train_y_ts = load_from_tsfile_to_array(fname_train)
train_x_ts.shape, train_y_ts.shape
train_x_ts[1].shape
train_x_ts[10][0][30]
#export
def get_UCR_univariate_list():
return [
'ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY',
'AllGestureWiimoteZ', 'ArrowHead', 'Beef', 'BeetleFly', 'BirdChicken',
'BME', 'Car', 'CBF', 'Chinatown', 'ChlorineConcentration',
'CinCECGtorso', 'Coffee', 'Computers', 'CricketX', 'CricketY',
'CricketZ', 'Crop', 'DiatomSizeReduction',
'DistalPhalanxOutlineAgeGroup', 'DistalPhalanxOutlineCorrect',
'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame',
'DodgerLoopWeekend', 'Earthquakes', 'ECG200', 'ECG5000', 'ECGFiveDays',
'ElectricDevices', 'EOGHorizontalSignal', 'EOGVerticalSignal',
'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords',
'Fish', 'FordA', 'FordB', 'FreezerRegularTrain', 'FreezerSmallTrain',
'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3',
'GesturePebbleZ1', 'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan',
'GunPointMaleVersusFemale', 'GunPointOldVersusYoung', 'Ham',
'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate',
'InsectEPGRegularTrain', 'InsectEPGSmallTrain', 'InsectWingbeatSound',
'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2',
'Lightning7', 'Mallat', 'Meat', 'MedicalImages', 'MelbournePedestrian',
'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',
'MiddlePhalanxTW', 'MixedShapes', 'MixedShapesSmallTrain',
'MoteStrain', 'NonInvasiveFetalECGThorax1',
'NonInvasiveFetalECGThorax2', 'OliveOil', 'OSULeaf',
'PhalangesOutlinesCorrect', 'Phoneme', 'PickupGestureWiimoteZ',
'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'PLAID', 'Plane',
'PowerCons', 'ProximalPhalanxOutlineAgeGroup',
'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',
'RefrigerationDevices', 'Rock', 'ScreenType', 'SemgHandGenderCh2',
'SemgHandMovementCh2', 'SemgHandSubjectCh2', 'ShakeGestureWiimoteZ',
'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace',
'SonyAIBORobotSurface1', 'SonyAIBORobotSurface2', 'StarlightCurves',
'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',
'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG',
'TwoPatterns', 'UMD', 'UWaveGestureLibraryAll', 'UWaveGestureLibraryX',
'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine',
'WordSynonyms', 'Worms', 'WormsTwoClass', 'Yoga'
]
def get_UCR_multivariate_list():
return [
'ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions',
'CharacterTrajectories', 'Cricket', 'DuckDuckGeese', 'EigenWorms',
'Epilepsy', 'EthanolConcentration', 'ERing', 'FaceDetection',
'FingerMovements', 'HandMovementDirection', 'Handwriting', 'Heartbeat',
'JapaneseVowels', 'Libras', 'LSST', 'InsectWingbeat', 'MotorImagery',
'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports',
'SelfRegulationSCP1', 'SelfRegulationSCP2', 'SpokenArabicDigits',
'StandWalkJump', 'UWaveGestureLibrary'
]
# hide
_camel_re1 = re.compile('(.)([A-Z][a-z]+)')
_camel_re2 = re.compile('([a-z0-9])([A-Z])')
def camel2snake(name):
"Convert CamelCase to snake_case"
s1 = re.sub(_camel_re1, r'\1_\2', name)
return re.sub(_camel_re2, r'\1_\2', s1).lower()
def camel2capitalsnake(name):
return camel2snake(name).upper()
# hide
# urls_ts = [f'{camel2capitalsnake(n)} = {n}.zip' for n in get_UCR_multivariate_list()]
# urls_ts
#hide
# MULTI_ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip',
# MULTI_ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip',
# MULTI_BASIC_MOTIONS = f'{URL}BasicMotions.zip',
# MULTI_CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip',
# MULTI_CRICKET = f'{URL}Cricket.zip',
# MULTI_DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip',
# MULTI_EIGEN_WORMS = f'{URL}EigenWorms.zip',
# MULTI_EPILEPSY = f'{URL}Epilepsy.zip',
# MULTI_ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip',
# MULTI_ERING = f'{URL}ERing.zip',
# MULTI_FACE_DETECTION = f'{URL}FaceDetection.zip',
# MULTI_FINGER_MOVEMENTS = f'{URL}FingerMovements.zip',
# MULTI_HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip',
# MULTI_HANDWRITING = f'{URL}Handwriting.zip',
# MULTI_HEARTBEAT = f'{URL}Heartbeat.zip',
# MULTI_JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip',
# MULTI_LIBRAS = f'{URL}Libras.zip',
# MULTI_LSST = f'{URL}LSST.zip',
# MULTI_INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip',
# MULTI_MOTOR_IMAGERY = f'{URL}MotorImagery.zip',
# MULTI_NATOPS = f'{URL}NATOPS.zip',
# MULTI_PEN_DIGITS = f'{URL}PenDigits.zip',
# MULTI_PEMS-SF = f'{URL}PEMS-SF.zip',
# MULTI_PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip',
# MULTI_RACKET_SPORTS = f'{URL}RacketSports.zip',
# MULTI_SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip',
# MULTI_SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip',
# MULTI_SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip',
# MULTI_STAND_WALK_JUMP = f'{URL}StandWalkJump.zip',
# MULTI_U_WAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary'
# hide
# urls_ts = [f'{camel2capitalsnake(n)} = {n}.zip' for n in get_UCR_univariate_list()]
# urls_ts
# hide
# UNI_ACSF1 = f'{URL}ACSF1.zip,
# UNI_ADIAC = f'{URL}Adiac.zip,
# UNI_ALL_GESTURE_WIIMOTE_X = f'{URL}AllGestureWiimoteX.zip,
# UNI_ALL_GESTURE_WIIMOTE_Y = f'{URL}AllGestureWiimoteY.zip,
# UNI_ALL_GESTURE_WIIMOTE_Z = f'{URL}AllGestureWiimoteZ.zip,
# UNI_ARROW_HEAD = f'{URL}ArrowHead.zip,
# UNI_BEEF = f'{URL}Beef.zip,
# UNI_BEETLE_FLY = f'{URL}BeetleFly.zip,
# UNI_BIRD_CHICKEN = f'{URL}BirdChicken.zip,
# UNI_BME = f'{URL}BME.zip,
# UNI_CAR = f'{URL}Car.zip,
# UNI_CBF = f'{URL}CBF.zip,
# UNI_CHINATOWN = f'{URL}Chinatown.zip,
# UNI_CHLORINE_CONCENTRATION = f'{URL}ChlorineConcentration.zip,
# UNI_CIN_CEC_GTORSO = f'{URL}CinCECGtorso.zip,
# UNI_COFFEE = f'{URL}Coffee.zip,
# UNI_COMPUTERS = f'{URL}Computers.zip,
# UNI_CRICKET_X = f'{URL}CricketX.zip,
# UNI_CRICKET_Y = f'{URL}CricketY.zip,
# UNI_CRICKET_Z = f'{URL}CricketZ.zip,
# UNI_CROP = f'{URL}Crop.zip,
# UNI_DIATOM_SIZE_REDUCTION = f'{URL}DiatomSizeReduction.zip,
# UNI_DISTAL_PHALANX_OUTLINE_AGE_GROUP = f'{URL}DistalPhalanxOutlineAgeGroup.zip,
# UNI_DISTAL_PHALANX_OUTLINE_CORRECT = f'{URL}DistalPhalanxOutlineCorrect.zip,
# UNI_DISTAL_PHALANX_TW = f'{URL}DistalPhalanxTW.zip,
# UNI_DODGER_LOOP_DAY = f'{URL}DodgerLoopDay.zip,
# UNI_DODGER_LOOP_GAME = f'{URL}DodgerLoopGame.zip,
# UNI_DODGER_LOOP_WEEKEND = f'{URL}DodgerLoopWeekend.zip,
# UNI_EARTHQUAKES = f'{URL}Earthquakes.zip,
# UNI_ECG200 = f'{URL}ECG200.zip,
# UNI_ECG5000 = f'{URL}ECG5000.zip,
# UNI_ECG_FIVE_DAYS = f'{URL}ECGFiveDays.zip,
# UNI_ELECTRIC_DEVICES = f'{URL}ElectricDevices.zip,
# UNI_EOG_HORIZONTAL_SIGNAL = f'{URL}EOGHorizontalSignal.zip,
# UNI_EOG_VERTICAL_SIGNAL = f'{URL}EOGVerticalSignal.zip,
# UNI_ETHANOL_LEVEL = f'{URL}EthanolLevel.zip,
# UNI_FACE_ALL = f'{URL}FaceAll.zip,
# UNI_FACE_FOUR = f'{URL}FaceFour.zip,
# UNI_FACES_UCR = f'{URL}FacesUCR.zip,
# UNI_FIFTY_WORDS = f'{URL}FiftyWords.zip,
# UNI_FISH = f'{URL}Fish.zip,
# UNI_FORD_A = f'{URL}FordA.zip,
# UNI_FORD_B = f'{URL}FordB.zip,
# UNI_FREEZER_REGULAR_TRAIN = f'{URL}FreezerRegularTrain.zip,
# UNI_FREEZER_SMALL_TRAIN = f'{URL}FreezerSmallTrain.zip,
# UNI_FUNGI = f'{URL}Fungi.zip,
# UNI_GESTURE_MID_AIR_D1 = f'{URL}GestureMidAirD1.zip,
# UNI_GESTURE_MID_AIR_D2 = f'{URL}GestureMidAirD2.zip,
# UNI_GESTURE_MID_AIR_D3 = f'{URL}GestureMidAirD3.zip,
# UNI_GESTURE_PEBBLE_Z1 = f'{URL}GesturePebbleZ1.zip,
# UNI_GESTURE_PEBBLE_Z2 = f'{URL}GesturePebbleZ2.zip,
# UNI_GUN_POINT = f'{URL}GunPoint.zip,
# UNI_GUN_POINT_AGE_SPAN = f'{URL}GunPointAgeSpan.zip,
# UNI_GUN_POINT_MALE_VERSUS_FEMALE = f'{URL}GunPointMaleVersusFemale.zip,
# UNI_GUN_POINT_OLD_VERSUS_YOUNG = f'{URL}GunPointOldVersusYoung.zip,
# UNI_HAM = f'{URL}Ham.zip,
# UNI_HAND_OUTLINES = f'{URL}HandOutlines.zip,
# UNI_HAPTICS = f'{URL}Haptics.zip,
# UNI_HERRING = f'{URL}Herring.zip,
# UNI_HOUSE_TWENTY = f'{URL}HouseTwenty.zip,
# UNI_INLINE_SKATE = f'{URL}InlineSkate.zip,
# UNI_INSECT_EPG_REGULAR_TRAIN = f'{URL}InsectEPGRegularTrain.zip,
# UNI_INSECT_EPG_SMALL_TRAIN = f'{URL}InsectEPGSmallTrain.zip,
# UNI_INSECT_WINGBEAT_SOUND = f'{URL}InsectWingbeatSound.zip,
# UNI_ITALY_POWER_DEMAND = f'{URL}ItalyPowerDemand.zip,
# UNI_LARGE_KITCHEN_APPLIANCES = f'{URL}LargeKitchenAppliances.zip,
# UNI_LIGHTNING2 = f'{URL}Lightning2.zip,
# UNI_LIGHTNING7 = f'{URL}Lightning7.zip,
# UNI_MALLAT = f'{URL}Mallat.zip,
# UNI_MEAT = f'{URL}Meat.zip,
# UNI_MEDICAL_IMAGES = f'{URL}MedicalImages.zip,
# UNI_MELBOURNE_PEDESTRIAN = f'{URL}MelbournePedestrian.zip,
# UNI_MIDDLE_PHALANX_OUTLINE_AGE_GROUP = f'{URL}MiddlePhalanxOutlineAgeGroup.zip,
# UNI_MIDDLE_PHALANX_OUTLINE_CORRECT = f'{URL}MiddlePhalanxOutlineCorrect.zip,
# UNI_MIDDLE_PHALANX_TW = f'{URL}MiddlePhalanxTW.zip,
# UNI_MIXED_SHAPES = f'{URL}MixedShapes.zip,
# UNI_MIXED_SHAPES_SMALL_TRAIN = f'{URL}MixedShapesSmallTrain.zip,
# UNI_MOTE_STRAIN = f'{URL}MoteStrain.zip,
# UNI_NON_INVASIVE_FETAL_ECG_THORAX1 = f'{URL}NonInvasiveFetalECGThorax1.zip,
# UNI_NON_INVASIVE_FETAL_ECG_THORAX2 = f'{URL}NonInvasiveFetalECGThorax2.zip,
# UNI_OLIVE_OIL = f'{URL}OliveOil.zip,
# UNI_OSU_LEAF = f'{URL}OSULeaf.zip,
# UNI_PHALANGES_OUTLINES_CORRECT = f'{URL}PhalangesOutlinesCorrect.zip,
# UNI_PHONEME = f'{URL}Phoneme.zip,
# UNI_PICKUP_GESTURE_WIIMOTE_Z = f'{URL}PickupGestureWiimoteZ.zip,
# UNI_PIG_AIRWAY_PRESSURE = f'{URL}PigAirwayPressure.zip,
# UNI_PIG_ART_PRESSURE = f'{URL}PigArtPressure.zip,
# UNI_PIG_CVP = f'{URL}PigCVP.zip,
# UNI_PLAID = f'{URL}PLAID.zip,
# UNI_PLANE = f'{URL}Plane.zip,
# UNI_POWER_CONS = f'{URL}PowerCons.zip,
# UNI_PROXIMAL_PHALANX_OUTLINE_AGE_GROUP = f'{URL}ProximalPhalanxOutlineAgeGroup.zip,
# UNI_PROXIMAL_PHALANX_OUTLINE_CORRECT = f'{URL}ProximalPhalanxOutlineCorrect.zip,
# UNI_PROXIMAL_PHALANX_TW = f'{URL}ProximalPhalanxTW.zip,
# UNI_REFRIGERATION_DEVICES = f'{URL}RefrigerationDevices.zip,
# UNI_ROCK = f'{URL}Rock.zip,
# UNI_SCREEN_TYPE = f'{URL}ScreenType.zip,
# UNI_SEMG_HAND_GENDER_CH2 = f'{URL}SemgHandGenderCh2.zip,
# UNI_SEMG_HAND_MOVEMENT_CH2 = f'{URL}SemgHandMovementCh2.zip,
# UNI_SEMG_HAND_SUBJECT_CH2 = f'{URL}SemgHandSubjectCh2.zip,
# UNI_SHAKE_GESTURE_WIIMOTE_Z = f'{URL}ShakeGestureWiimoteZ.zip,
# UNI_SHAPELET_SIM = f'{URL}ShapeletSim.zip,
# UNI_SHAPES_ALL = f'{URL}ShapesAll.zip,
# UNI_SMALL_KITCHEN_APPLIANCES = f'{URL}SmallKitchenAppliances.zip,
# UNI_SMOOTH_SUBSPACE = f'{URL}SmoothSubspace.zip,
# UNI_SONY_AIBO_ROBOT_SURFACE1 = f'{URL}SonyAIBORobotSurface1.zip,
# UNI_SONY_AIBO_ROBOT_SURFACE2 = f'{URL}SonyAIBORobotSurface2.zip,
# UNI_STARLIGHT_CURVES = f'{URL}StarlightCurves.zip,
# UNI_STRAWBERRY = f'{URL}Strawberry.zip,
# UNI_SWEDISH_LEAF = f'{URL}SwedishLeaf.zip,
# UNI_SYMBOLS = f'{URL}Symbols.zip,
# UNI_SYNTHETIC_CONTROL = f'{URL}SyntheticControl.zip,
# UNI_TOE_SEGMENTATION1 = f'{URL}ToeSegmentation1.zip,
# UNI_TOE_SEGMENTATION2 = f'{URL}ToeSegmentation2.zip,
# UNI_TRACE = f'{URL}Trace.zip,
# UNI_TWO_LEAD_ECG = f'{URL}TwoLeadECG.zip,
# UNI_TWO_PATTERNS = f'{URL}TwoPatterns.zip,
# UNI_UMD = f'{URL}UMD.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_ALL = f'{URL}UWaveGestureLibraryAll.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_X = f'{URL}UWaveGestureLibraryX.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_Y = f'{URL}UWaveGestureLibraryY.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_Z = f'{URL}UWaveGestureLibraryZ.zip,
# UNI_WAFER = f'{URL}Wafer.zip,
# UNI_WINE = f'{URL}Wine.zip,
# UNI_WORD_SYNONYMS = f'{URL}WordSynonyms.zip,
# UNI_WORMS = f'{URL}Worms.zip,
# UNI_WORMS_TWO_CLASS = f'{URL}WormsTwoClass.zip,
# UNI_YOGA = f'{URL}Yoga.zipUNI_
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
# notebook2script()
notebook2script(fname='80_timeseries_data.ipynb')
###Output
Converted 80_timeseries_data.ipynb.
###Markdown
Timeseries Data> Basic functions to read timeseries files like `.arff` and `.ts` files.
###Code
#export
class TSData():
"Class that loads .arff (soon .ts) files and returns a tuple (data.x , self.y)"
"self.x is a list of 2D array with a shape (n_samples, nb_channels, sequence_length) "
"self.y is a 1D array as y (i.e. label) with a shape (n_samples)"
"for the NATOPS_Train.arff file, the result will be : x(180, 24, 51) and y(180)"
# def __init__(self):
# self.x = self.y = self.dsname = self.fnames = [],[],[],[]
def __init__(self, fnames, has_targets=True, fill_missing='NaN'):
# self.x = self.y = self.dsname = [],[],[]
self.x = []
self.y = []
self.dsname = []
self.fnames = fnames
self.has_targets = has_targets
self.fill_missings = fill_missing
def __repr__(self): return f"{self.__class__.__name__}:\n Datasets names (concatenated): {self.dsname}\n Filenames: {self.fnames}\n Data shape: {self.x.shape}\n Targets shape: {self.y.shape}\n Nb Samples: {self.x.shape[0]}\n Nb Channels: {self.x.shape[1]}\n Sequence Length: {self.x.shape[2]}"
def get_x(self, as_list=True): return(list(self.x))
def get_y(self): return(self.y)
def get_items(self): return [(item, str(label)) for (item, label) in zip(list(self.x), self.y)]
def get_lists(self): return (list(self.x), self.y)
def __getitem__(self, i): return (self.x[i], str(self.y[i]))
def get_nb_samples(self): return self.x.shape[0]
def sample(self, cut):
n=self.x.shape[0]
rand_idx = L(int(i) for i in torch.randperm(n))
idxs = rand_idx[:cut]
return [(self.x[i], str(self.y[i])) for i in idxs]
@property
def sizes(self): return (self.x.shape, self.y.shape)
@property
def n_channels(self): return (self.x.shape[1])
def _load_arff(self, fname, has_targets=True, fill_missing='NaN'):
"load an .arff file and return a tupple of 2 numpy arrays: "
"x : array with a shape (n_samples, nb_channels, sequence_length)"
"y : array with a shape (n_samples)"
"for the NATOPS_Train.arff the result will be : x(180, 24, 51) and y(180)"
instance_list = []
class_val_list = []
data_started = False
is_multi_variate = False
is_first_case = True
with open(fname, 'r', encoding="utf8") as f:
for line in f:
if line.strip():
if is_multi_variate is False and "@attribute" in line.lower() and "relational" in line.lower():
is_multi_variate = True
if "@data" in line.lower():
data_started = True
continue
# if the 'data tag has been found, the header information has been cleared and now data can be loaded
if data_started:
line = line.replace("?", fill_missing)
if is_multi_variate:
if has_targets:
line, class_val = line.split("',")
class_val_list.append(class_val.strip())
dimensions = line.split("\\n")
dimensions[0] = dimensions[0].replace("'", "")
if is_first_case:
for d in range(len(dimensions)):
instance_list.append([])
is_first_case = False
for dim in range(len(dimensions)):
instance_list[dim].append(np.array(dimensions[dim].split(','), dtype=np.float32))
# instance_list[dim].append(np.fromiter(dimensions[dim].split(','), dtype=np.float32))
else:
if is_first_case:
instance_list.append([])
is_first_case = False
line_parts = line.split(",")
if has_targets:
instance_list[0].append(np.array(line_parts[:len(line_parts)-1], dtype=np.float32))
class_val_list.append(line_parts[-1].strip())
else:
instance_list[0].append(np.array(line_parts[:len(line_parts)-1], dtype=np.float32))
#instance_list has a shape of (dimensions, nb_samples, seq_lenght)
#for the NATOPS_Train.arff it would be (24, 180, 51)
#convert python list to numpy array and transpose the 2 first dimensions -> (180, 24, 51)
x = np.asarray(instance_list).transpose(1,0,2)
if has_targets:
y = np.asarray(class_val_list)
return x, y
else:
return x, [None*x.shape[0]]
@classmethod
def from_arff(self, fnames, has_targets=True, fill_missing='NaN'):
"load an .arff file and return a tupple of 2 numpy arrays: "
"x : array with a shape (n_samples, nb_channels, sequence_length)"
"y : array with a shape (n_samples)"
"for the NATOPS_Train.arff the result will be : x(180, 24, 51) and y(180)"
data = self(fnames, has_targets=has_targets, fill_missing=fill_missing)
if isinstance(fnames, list):
data.x = []
data.y = []
data.dsname = []
data.fnames = []
xs,ys = [],[]
for i, fn in enumerate(fnames):
x,y = data._load_arff(fn, has_targets=has_targets, fill_missing=fill_missing)
xs.append(x)
ys.append(y)
data.fnames.append(fn)
data.dsname.append(fn.stem)
data.x = np.concatenate(xs)
data.y = np.concatenate(ys)
else:
data.fnames.append(fnames)
data.dsname.append(fnames.stem)
data.x, data.y = data._load(fnames, has_targets=has_targets, fill_missing=fill_missing)
return data
# add_docs(TSData,
# from_arff="read one or serveral arff files and concatenate them, and returns a TSData object")
_docs=dict(
from_arff="read one or serveral arff files and concatenate them, and returns a TSData object",
get_items="return list of tuples. Each tuple corresponds to a timeserie (nump.ndarray) and a label (string)",
get_x="return list of timeseries (no labels)",
get_y="return list of labels corresponding to each timeserie",
sizes="return timeseries shape and labels shape (labels list size)",
n_channels="return timeserie's number of channels. For `arff` files it is called `dimension`. In the case of NATOPS_Train.arff, it returns 24")
show_doc(TSData.from_arff)
show_doc(TSData.get_items)
show_doc(TSData.n_channels)
#export
def get_ts_items(fnames):
'get_ts_items return list of tuples. Each tuple corresponds to a timeserie (nump.ndarray) and a label (string)'
data = TSData.from_arff(fnames)
return data.get_items()
show_doc(get_ts_items)
show_doc(get_ts_items)
# hide
def check_ext(fnames, ext):
if isinstance(fnames, list):
fnames = [fn if (fn.suffix!='') else f'{fn}.{ext}' for fn in fnames]
else:
fnames = fnames if (fnames.suffix!='') else f'{fnames}.{ext}'
###Output
_____no_output_____
###Markdown
Plot Timeseries
###Code
#export
def show_timeseries(ts, ctx=None, title=None, chs=None, leg=True, figsize=None, linewidth=3, linestyle='solid', color='orange', **kwargs):
"""
Plot a timeseries.
Args:
title : usually the class of the timeseries
ts : timeseries. It should have a shape of (nb_channels, sequence_length)
chs : array representing a list of channels to plot
leg : Display or not a legend
"""
fig = None
if ctx is None: fig, ctx = plt.subplots(figsize=figsize, **kwargs)
n_channels = ts.shape[0]
t = range(ts.shape[1])
chs_max = max(chs) if chs else 0
channels = chs if (chs and (chs_max < ts.shape[0])) else range(ts.shape[0])
for ch in channels:
if n_channels==1:
ctx.plot(t, ts[ch], label='ch'+str(ch), linewidth=linewidth, color=color, linestyle=linestyle)
else:
ctx.plot(t, ts[ch], label='ch'+str(ch), linewidth=linewidth, linestyle=linestyle)
if leg: ctx.legend(loc='upper right', ncol=2, framealpha=0.5)
if title: ctx.set_title(title)
return fig
# return ctx
show_doc(show_timeseries)
path_data = Config().data
path_data
# export
def file_extract_at_filename(fname, dest):
"Extract `fname` to `dest`/`fname`.name folder using `tarfile` or `zipfile"
dest = Path(dest)/Path(fname).with_suffix('').name
# tarfile.open(fname, 'r:gz').extractall(dest)
fname = str(fname)
if fname.endswith('gz'): tarfile.open(fname, 'r:gz').extractall(dest)
elif fname.endswith('zip'): zipfile.ZipFile(fname ).extractall(dest)
else: raise Exception(f'Unrecognized archive: {fname}')
###Output
_____no_output_____
###Markdown
`file_extract_at_filename` is used by default in `unzip_data` to decompress the downloaded file in a folder that has the same name as the zip filename.
###Code
# export
def unzip_data(url, fname=None, dest=None, c_key='data', force_download=False):
"Download `url` to `fname` if `dest` doesn't exist, and un-compress to `dest`/`fname`.name folder ."
return untar_data(url, fname=fname, c_key=c_key, force_download=force_download, extract_func=file_extract_at_filename)
###Output
_____no_output_____
###Markdown
`unzip_data` download and decompress the downloaded file in a folder and decompress it in a folder that has the same name as the zip filename
###Code
show_doc(unzip_data)
# export
class URLs_TS():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
URL = 'http://www.timeseriesclassification.com/Downloads/'
# UCR multivariate datasets - Current Naming
ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip'
ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip'
BASIC_MOTIONS = f'{URL}BasicMotions.zip'
CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip'
CRICKET = f'{URL}Cricket.zip'
DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip'
EIGEN_WORMS = f'{URL}EigenWorms.zip'
EPILEPSY = f'{URL}Epilepsy.zip'
ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip'
ERING = f'{URL}ERing.zip'
FACE_DETECTION = f'{URL}FaceDetection.zip'
FINGER_MOVEMENTS = f'{URL}FingerMovements.zip'
HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip'
HANDWRITING = f'{URL}Handwriting.zip'
HEARTBEAT = f'{URL}Heartbeat.zip'
JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip'
LIBRAS = f'{URL}Libras.zip'
LSST = f'{URL}LSST.zip'
INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip'
MOTOR_IMAGERY = f'{URL}MotorImagery.zip'
NATOPS = f'{URL}NATOPS.zip'
PEN_DIGITS = f'{URL}PenDigits.zip'
PEMS_SF = f'{URL}PEMS-SF.zip'
PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip'
RACKET_SPORTS = f'{URL}RacketSports.zip'
SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip'
SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip'
SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip'
STAND_WALK_JUMP = f'{URL}StandWalkJump.zip'
UWAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary.zip'
# UCR multivariate datasets - New Naming
# MULTI_ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip'
# MULTI_ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip'
# MULTI_BASIC_MOTIONS = f'{URL}BasicMotions.zip'
# MULTI_CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip'
# MULTI_CRICKET = f'{URL}Cricket.zip'
# MULTI_DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip'
# MULTI_EIGEN_WORMS = f'{URL}EigenWorms.zip'
# MULTI_EPILEPSY = f'{URL}Epilepsy.zip'
# MULTI_ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip'
# MULTI_ERING = f'{URL}ERing.zip'
# MULTI_FACE_DETECTION = f'{URL}FaceDetection.zip'
# MULTI_FINGER_MOVEMENTS = f'{URL}FingerMovements.zip'
# MULTI_HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip'
# MULTI_HANDWRITING = f'{URL}Handwriting.zip'
# MULTI_HEARTBEAT = f'{URL}Heartbeat.zip'
# MULTI_JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip'
# MULTI_LIBRAS = f'{URL}Libras.zip'
# MULTI_LSST = f'{URL}LSST.zip'
# MULTI_INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip'
# MULTI_MOTOR_IMAGERY = f'{URL}MotorImagery.zip'
# MULTI_NATOPS = f'{URL}NATOPS.zip'
# MULTI_PEN_DIGITS = f'{URL}PenDigits.zip'
# MULTI_PEMS_SF = f'{URL}PEMS-SF.zip'
# MULTI_PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip'
# MULTI_RACKET_SPORTS = f'{URL}RacketSports.zip'
# MULTI_SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip'
# MULTI_SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip'
# MULTI_SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip'
# MULTI_STAND_WALK_JUMP = f'{URL}StandWalkJump.zip'
# MULTI_U_WAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary'
# UCR univariate datasets
UNI_ACSF1 = f'{URL}ACSF1.zip'
UNI_ADIAC = f'{URL}Adiac.zip'
UNI_ALL_GESTURE_WIIMOTE_X = f'{URL}AllGestureWiimoteX.zip'
UNI_ALL_GESTURE_WIIMOTE_Y = f'{URL}AllGestureWiimoteY.zip'
UNI_ALL_GESTURE_WIIMOTE_Z = f'{URL}AllGestureWiimoteZ.zip'
UNI_ARROW_HEAD = f'{URL}ArrowHead.zip'
UNI_BEEF = f'{URL}Beef.zip'
UNI_BEETLE_FLY = f'{URL}BeetleFly.zip'
UNI_BIRD_CHICKEN = f'{URL}BirdChicken.zip'
UNI_BME = f'{URL}BME.zip'
UNI_CAR = f'{URL}Car.zip'
UNI_CBF = f'{URL}CBF.zip'
UNI_CHINATOWN = f'{URL}Chinatown.zip'
UNI_CHLORINE_CONCENTRATION = f'{URL}ChlorineConcentration.zip'
UNI_CIN_CEC_GTORSO = f'{URL}CinCECGtorso.zip'
UNI_COFFEE = f'{URL}Coffee.zip'
UNI_COMPUTERS = f'{URL}Computers.zip'
UNI_CRICKET_X = f'{URL}CricketX.zip'
UNI_CRICKET_Y = f'{URL}CricketY.zip'
UNI_CRICKET_Z = f'{URL}CricketZ.zip'
UNI_CROP = f'{URL}Crop.zip'
UNI_DIATOM_SIZE_REDUCTION = f'{URL}DiatomSizeReduction.zip'
UNI_DISTAL_PHALANX_OUTLINE_AGE_GROUP= f'{URL}DistalPhalanxOutlineAgeGroup.zip'
UNI_DISTAL_PHALANX_OUTLINE_CORRECT = f'{URL}DistalPhalanxOutlineCorrect.zip'
UNI_DISTAL_PHALANX_TW = f'{URL}DistalPhalanxTW.zip'
UNI_DODGER_LOOP_DAY = f'{URL}DodgerLoopDay.zip'
UNI_DODGER_LOOP_GAME = f'{URL}DodgerLoopGame.zip'
UNI_DODGER_LOOP_WEEKEND = f'{URL}DodgerLoopWeekend.zip'
UNI_EARTHQUAKES = f'{URL}Earthquakes.zip'
UNI_ECG200 = f'{URL}ECG200.zip'
UNI_ECG5000 = f'{URL}ECG5000.zip'
UNI_ECG_FIVE_DAYS = f'{URL}ECGFiveDays.zip'
UNI_ELECTRIC_DEVICES = f'{URL}ElectricDevices.zip'
UNI_EOG_HORIZONTAL_SIGNAL = f'{URL}EOGHorizontalSignal.zip'
UNI_EOG_VERTICAL_SIGNAL = f'{URL}EOGVerticalSignal.zip'
UNI_ETHANOL_LEVEL = f'{URL}EthanolLevel.zip'
UNI_FACE_ALL = f'{URL}FaceAll.zip'
UNI_FACE_FOUR = f'{URL}FaceFour.zip'
UNI_FACES_UCR = f'{URL}FacesUCR.zip'
UNI_FIFTY_WORDS = f'{URL}FiftyWords.zip'
UNI_FISH = f'{URL}Fish.zip'
UNI_FORD_A = f'{URL}FordA.zip'
UNI_FORD_B = f'{URL}FordB.zip'
UNI_FREEZER_REGULAR_TRAIN = f'{URL}FreezerRegularTrain.zip'
UNI_FREEZER_SMALL_TRAIN = f'{URL}FreezerSmallTrain.zip'
UNI_FUNGI = f'{URL}Fungi.zip'
UNI_GESTURE_MID_AIR_D1 = f'{URL}GestureMidAirD1.zip'
UNI_GESTURE_MID_AIR_D2 = f'{URL}GestureMidAirD2.zip'
UNI_GESTURE_MID_AIR_D3 = f'{URL}GestureMidAirD3.zip'
UNI_GESTURE_PEBBLE_Z1 = f'{URL}GesturePebbleZ1.zip'
UNI_GESTURE_PEBBLE_Z2 = f'{URL}GesturePebbleZ2.zip'
UNI_GUN_POINT = f'{URL}GunPoint.zip'
UNI_GUN_POINT_AGE_SPAN = f'{URL}GunPointAgeSpan.zip'
UNI_GUN_POINT_MALE_VERSUS_FEMALE = f'{URL}GunPointMaleVersusFemale.zip'
UNI_GUN_POINT_OLD_VERSUS_YOUNG = f'{URL}GunPointOldVersusYoung.zip'
UNI_HAM = f'{URL}Ham.zip'
UNI_HAND_OUTLINES = f'{URL}HandOutlines.zip'
UNI_HAPTICS = f'{URL}Haptics.zip'
UNI_HERRING = f'{URL}Herring.zip'
UNI_HOUSE_TWENTY = f'{URL}HouseTwenty.zip'
UNI_INLINE_SKATE = f'{URL}InlineSkate.zip'
UNI_INSECT_EPG_REGULAR_TRAIN = f'{URL}InsectEPGRegularTrain.zip'
UNI_INSECT_EPG_SMALL_TRAIN = f'{URL}InsectEPGSmallTrain.zip'
UNI_INSECT_WINGBEAT_SOUND = f'{URL}InsectWingbeatSound.zip'
UNI_ITALY_POWER_DEMAND = f'{URL}ItalyPowerDemand.zip'
UNI_LARGE_KITCHEN_APPLIANCES = f'{URL}LargeKitchenAppliances.zip'
UNI_LIGHTNING2 = f'{URL}Lightning2.zip'
UNI_LIGHTNING7 = f'{URL}Lightning7.zip'
UNI_MALLAT = f'{URL}Mallat.zip'
UNI_MEAT = f'{URL}Meat.zip'
UNI_MEDICAL_IMAGES = f'{URL}MedicalImages.zip'
UNI_MELBOURNE_PEDESTRIAN = f'{URL}MelbournePedestrian.zip'
UNI_MIDDLE_PHALANX_OUTLINE_AGE_GROUP= f'{URL}MiddlePhalanxOutlineAgeGroup.zip'
UNI_MIDDLE_PHALANX_OUTLINE_CORRECT = f'{URL}MiddlePhalanxOutlineCorrect.zip'
UNI_MIDDLE_PHALANX_TW = f'{URL}MiddlePhalanxTW.zip'
UNI_MIXED_SHAPES = f'{URL}MixedShapes.zip'
UNI_MIXED_SHAPES_SMALL_TRAIN = f'{URL}MixedShapesSmallTrain.zip'
UNI_MOTE_STRAIN = f'{URL}MoteStrain.zip'
UNI_NON_INVASIVE_FETAL_ECG_THORAX1 = f'{URL}NonInvasiveFetalECGThorax1.zip'
UNI_NON_INVASIVE_FETAL_ECG_THORAX2 = f'{URL}NonInvasiveFetalECGThorax2.zip'
UNI_OLIVE_OIL = f'{URL}OliveOil.zip'
UNI_OSU_LEAF = f'{URL}OSULeaf.zip'
UNI_PHALANGES_OUTLINES_CORRECT = f'{URL}PhalangesOutlinesCorrect.zip'
UNI_PHONEME = f'{URL}Phoneme.zip'
UNI_PICKUP_GESTURE_WIIMOTE_Z = f'{URL}PickupGestureWiimoteZ.zip'
UNI_PIG_AIRWAY_PRESSURE = f'{URL}PigAirwayPressure.zip'
UNI_PIG_ART_PRESSURE = f'{URL}PigArtPressure.zip'
UNI_PIG_CVP = f'{URL}PigCVP.zip'
UNI_PLAID = f'{URL}PLAID.zip'
UNI_PLANE = f'{URL}Plane.zip'
UNI_POWER_CONS = f'{URL}PowerCons.zip'
UNI_PROXIMAL_PHALANX_OUTLINE_AGE_GROUP= f'{URL}ProximalPhalanxOutlineAgeGroup.zip'
UNI_PROXIMAL_PHALANX_OUTLINE_CORRECT= f'{URL}ProximalPhalanxOutlineCorrect.zip'
UNI_PROXIMAL_PHALANX_TW = f'{URL}ProximalPhalanxTW.zip'
UNI_REFRIGERATION_DEVICES = f'{URL}RefrigerationDevices.zip'
UNI_ROCK = f'{URL}Rock.zip'
UNI_SCREEN_TYPE = f'{URL}ScreenType.zip'
UNI_SEMG_HAND_GENDER_CH2 = f'{URL}SemgHandGenderCh2.zip'
UNI_SEMG_HAND_MOVEMENT_CH2 = f'{URL}SemgHandMovementCh2.zip'
UNI_SEMG_HAND_SUBJECT_CH2 = f'{URL}SemgHandSubjectCh2.zip'
UNI_SHAKE_GESTURE_WIIMOTE_Z = f'{URL}ShakeGestureWiimoteZ.zip'
UNI_SHAPELET_SIM = f'{URL}ShapeletSim.zip'
UNI_SHAPES_ALL = f'{URL}ShapesAll.zip'
UNI_SMALL_KITCHEN_APPLIANCES = f'{URL}SmallKitchenAppliances.zip'
UNI_SMOOTH_SUBSPACE = f'{URL}SmoothSubspace.zip'
UNI_SONY_AIBO_ROBOT_SURFACE1 = f'{URL}SonyAIBORobotSurface1.zip'
UNI_SONY_AIBO_ROBOT_SURFACE2 = f'{URL}SonyAIBORobotSurface2.zip'
UNI_STARLIGHT_CURVES = f'{URL}StarLightCurves.zip'
UNI_STRAWBERRY = f'{URL}Strawberry.zip'
UNI_SWEDISH_LEAF = f'{URL}SwedishLeaf.zip'
UNI_SYMBOLS = f'{URL}Symbols.zip'
UNI_SYNTHETIC_CONTROL = f'{URL}SyntheticControl.zip'
UNI_TOE_SEGMENTATION1 = f'{URL}ToeSegmentation1.zip'
UNI_TOE_SEGMENTATION2 = f'{URL}ToeSegmentation2.zip'
UNI_TRACE = f'{URL}Trace.zip'
UNI_TWO_LEAD_ECG = f'{URL}TwoLeadECG.zip'
UNI_TWO_PATTERNS = f'{URL}TwoPatterns.zip'
UNI_UMD = f'{URL}UMD.zip'
UNI_U_WAVE_GESTURE_LIBRARY_ALL = f'{URL}UWaveGestureLibraryAll.zip'
UNI_U_WAVE_GESTURE_LIBRARY_X = f'{URL}UWaveGestureLibraryX.zip'
UNI_U_WAVE_GESTURE_LIBRARY_Y = f'{URL}UWaveGestureLibraryY.zip'
UNI_U_WAVE_GESTURE_LIBRARY_Z = f'{URL}UWaveGestureLibraryZ.zip'
UNI_WAFER = f'{URL}Wafer.zip'
UNI_WINE = f'{URL}Wine.zip'
UNI_WORD_SYNONYMS = f'{URL}WordSynonyms.zip'
UNI_WORMS = f'{URL}Worms.zip'
UNI_WORMS_TWO_CLASS = f'{URL}WormsTwoClass.zip'
UNI_YOGA = f'{URL}Yoga.zip'
def path(url='.', c_key='archive'):
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key=='models' else 'data')/fname
if local_path.exists(): return local_path
return Config()[c_key]/fname
dsname = 'NATOPS' #'NATOPS', 'LSST', 'Wine', 'Epilepsy', 'HandMovementDirection'
# dsname = 'ECG200' #'NATOPS', 'LSST', 'Wine', 'Epilepsy', 'HandMovementDirection'
path = unzip_data(URLs_TS.NATOPS)
# path = unzip_data(URLs_TS.UNI_ECG200)
path
path.ls()
fname_train = f'{dsname}_TRAIN.arff'
fname_test = f'{dsname}_TEST.arff'
fnames = [path/fname_train, path/fname_test]
fnames
data = TSData.from_arff(fnames)
data
print(data)
data.dsname, data.fnames, data.n_channels, data.sizes, data.x.shape, data.y.shape
test_eq(data.dsname, ['NATOPS_TRAIN', 'NATOPS_TEST'])
test_eq(data.n_channels, 24)
test_eq(data.sizes, ((360, 24, 51), (360,)))
test_eq(data.x.shape, (360, 24, 51))
test_eq(data.y.shape, (360,))
type(data.get_items()[1][0]), data.get_items()[1][0]
type(data.get_y()[1]), data.get_y()[1]
test_eq(data.get_y()[1], '3.0')
idx = 4
ts, title = data.get_items()[idx]
ts
show_timeseries(ts, title=title, figsize=(8,6), linewidth=3)
# show_timeseries(ts, title=title, figsize=(8,6), linewidth=4, color='orange', linestyle='dotted')
# show_timeseries(ts, title=title, chs=range(0,24,3))
# hide
def load_from_tsfile_to_array(full_file_path_and_name, return_separate_X_and_y=True, replace_missing_vals_with='NaN'):
"""Loads data from a .ts file into a Pandas DataFrame.
Parameters
full_file_path_and_name: str
The full pathname of the .ts file to read.
return_separate_X_and_y: bool
true if X and Y values should be returned as separate Data Frames (X) and a numpy array (y), false otherwise.
This is only relevant for data that
replace_missing_vals_with: str
The value that missing values in the text file should be replaced with prior to parsing.
Returns
DataFrame, ndarray
If return_separate_X_and_y then a tuple containing a DataFrame and a numpy array containing the relevant time-series and corresponding class values.
DataFrame
If not return_separate_X_and_y then a single DataFrame containing all time-series and (if relevant) a column "class_vals" the associated class values.
"""
# Initialize flags and variables used when parsing the file
metadata_started = False
data_started = False
has_problem_name_tag = False
has_timestamps_tag = False
has_univariate_tag = False
has_class_labels_tag = False
has_data_tag = False
previous_timestamp_was_int = None
previous_timestamp_was_timestamp = None
num_dimensions = None
is_first_case = True
instance_list = []
class_val_list = []
line_num = 0
# Parse the file
# print(full_file_path_and_name)
with open(full_file_path_and_name, 'r',encoding='utf-8') as file:
for line in file:
# Strip white space from start/end of line and change to lowercase for use below
line = line.strip().lower()
# Empty lines are valid at any point in a file
if line:
# Check if this line contains metadata
# Please note that even though metadata is stored in this function it is not currently published externally
if line.startswith("@problemname"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("problemname tag requires an associated value")
problem_name = line[len("@problemname") + 1:]
has_problem_name_tag = True
metadata_started = True
elif line.startswith("@timestamps"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("timestamps tag requires an associated Boolean value")
elif tokens[1] == "true":
timestamps = True
elif tokens[1] == "false":
timestamps = False
else:
raise TsFileParseException("invalid timestamps value")
has_timestamps_tag = True
metadata_started = True
elif line.startswith("@univariate"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("univariate tag requires an associated Boolean value")
elif tokens[1] == "true":
univariate = True
elif tokens[1] == "false":
univariate = False
else:
raise TsFileParseException("invalid univariate value")
has_univariate_tag = True
metadata_started = True
elif line.startswith("@classlabel"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("classlabel tag requires an associated Boolean value")
if tokens[1] == "true":
class_labels = True
elif tokens[1] == "false":
class_labels = False
else:
raise TsFileParseException("invalid classLabel value")
# Check if we have any associated class values
if token_len == 2 and class_labels:
raise TsFileParseException("if the classlabel tag is true then class values must be supplied")
has_class_labels_tag = True
class_label_list = [token.strip() for token in tokens[2:]]
metadata_started = True
# Check if this line contains the start of data
elif line.startswith("@data"):
if line != "@data":
raise TsFileParseException("data tag should not have an associated value")
if data_started and not metadata_started:
raise TsFileParseException("metadata must come before data")
else:
has_data_tag = True
data_started = True
# If the 'data tag has been found then metadata has been parsed and data can be loaded
elif data_started:
# Check that a full set of metadata has been provided
if (not has_problem_name_tag or not has_timestamps_tag or not has_univariate_tag
or not has_class_labels_tag or not has_data_tag):
raise TsFileParseException("a full set of metadata has not been provided before the data")
# Replace any missing values with the value specified
line = line.replace("?", replace_missing_vals_with)
# Check if we dealing with data that has timestamps
if timestamps:
# We're dealing with timestamps so cannot just split line on ':' as timestamps may contain one
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
this_line_num_dimensions = 0
line_len = len(line)
char_num = 0
while char_num < line_len:
# Move through any spaces
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# See if there is any more data to read in or if we should validate that read thus far
if char_num < line_len:
# See if we have an empty dimension (i.e. no values)
if line[char_num] == ":":
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
has_another_value = False
has_another_dimension = True
timestamps_for_dimension = []
values_for_dimension = []
char_num += 1
else:
# Check if we have reached a class label
if line[char_num] != "(" and class_labels:
class_val = line[char_num:].strip()
if class_val not in class_label_list:
raise TsFileParseException("the class value '" + class_val + "' on line " +
str(line_num + 1) + " is not valid")
class_val_list.append(class_val)
char_num = line_len
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
else:
# Read in the data contained within the next tuple
if line[char_num] != "(" and not class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " does not start with a '('")
char_num += 1
tuple_data = ""
while char_num < line_len and line[char_num] != ")":
tuple_data += line[char_num]
char_num += 1
if char_num >= line_len or line[char_num] != ")":
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " does not end with a ')'")
# Read in any spaces immediately after the current tuple
char_num += 1
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# Check if there is another value or dimension to process after this tuple
if char_num >= line_len:
has_another_value = False
has_another_dimension = False
elif line[char_num] == ",":
has_another_value = True
has_another_dimension = False
elif line[char_num] == ":":
has_another_value = False
has_another_dimension = True
char_num += 1
# Get the numeric value for the tuple by reading from the end of the tuple data backwards to the last comma
last_comma_index = tuple_data.rfind(',')
if last_comma_index == -1:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " contains a tuple that has no comma inside of it")
try:
value = tuple_data[last_comma_index + 1:]
value = float(value)
except ValueError:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains a tuple that does not have a valid numeric value")
# Check the type of timestamp that we have
timestamp = tuple_data[0: last_comma_index]
try:
timestamp = int(timestamp)
timestamp_is_int = True
timestamp_is_timestamp = False
except ValueError:
timestamp_is_int = False
if not timestamp_is_int:
try:
timestamp = timestamp.strip()
timestamp_is_timestamp = True
except ValueError:
timestamp_is_timestamp = False
# Make sure that the timestamps in the file (not just this dimension or case) are consistent
if not timestamp_is_timestamp and not timestamp_is_int:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " contains a tuple that has an invalid timestamp '" +
timestamp + "'")
if previous_timestamp_was_int is not None and previous_timestamp_was_int and \
not timestamp_is_int:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains tuples where the timestamp format is inconsistent")
if previous_timestamp_was_timestamp is not None and previous_timestamp_was_timestamp and \
not timestamp_is_timestamp:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains tuples where the timestamp format is inconsistent")
# Store the values
timestamps_for_dimension += [timestamp]
values_for_dimension += [value]
# If this was our first tuple then we store the type of timestamp we had
if previous_timestamp_was_timestamp is None and timestamp_is_timestamp:
previous_timestamp_was_timestamp = True
previous_timestamp_was_int = False
if previous_timestamp_was_int is None and timestamp_is_int:
previous_timestamp_was_timestamp = False
previous_timestamp_was_int = True
# See if we should add the data for this dimension
if not has_another_value:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
if timestamp_is_timestamp:
timestamps_for_dimension = pd.DatetimeIndex(timestamps_for_dimension)
instance_list[this_line_num_dimensions].append(pd.Series(index=timestamps_for_dimension
, data=values_for_dimension))
this_line_num_dimensions += 1
timestamps_for_dimension = []
values_for_dimension = []
elif has_another_value:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not class_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series(dtype=np.float32))
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and not has_another_dimension:
if num_dimensions is None:
num_dimensions = this_line_num_dimensions
if num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(line_num + 1) +
" does not have the same number of dimensions as the previous line of data")
# Check that we are not expecting some more data, and if not, store that processed above
if has_another_value:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not class_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(line_num + 1) +
" does not have the same number of dimensions as the previous line of data")
# Check if we should have class values, and if so that they are contained in those listed in the metadata
if class_labels and len(class_val_list) == 0:
raise TsFileParseException("the cases have no associated class values")
else:
dimensions = line.split(":")
# If first row then note the number of dimensions (that must be the same for all cases)
if is_first_case:
num_dimensions = len(dimensions)
if class_labels:
num_dimensions -= 1
for dim in range(0, num_dimensions):
instance_list.append([])
is_first_case = False
# See how many dimensions that the case whose data in represented in this line has
this_line_num_dimensions = len(dimensions)
if class_labels:
this_line_num_dimensions -= 1
# All dimensions should be included for all series, even if they are empty
if this_line_num_dimensions != num_dimensions:
raise TsFileParseException("inconsistent number of dimensions")
# Process the data for each dimension
for dim in range(0, num_dimensions):
dimension = dimensions[dim].strip()
if dimension:
# data_series = dimension.split(",")
# data_series = [float(i) for i in data_series]
# instance_list[dim].append(pd.Series(data_series))
# instance_list[dim].append(np.array(dimensions[dim].strip().split(','), dtype=np.float32))
instance_list[dim].append(np.array(dimensions[dim].split(','), dtype=np.float32))
# instance_list[dim].append(np.fromiter(dimensions[dim].strip().split(','), dtype=np.float32))
else:
# instance_list[dim].append(pd.Series())
instance_list[dim].append([])
if class_labels:
class_val_list.append(dimensions[num_dimensions].strip())
line_num += 1
# Check that the file was not empty
if line_num:
# Check that the file contained both metadata and data
if metadata_started and not (has_problem_name_tag and has_timestamps_tag and has_univariate_tag and
has_class_labels_tag and has_data_tag):
raise TsFileParseException("metadata incomplete")
elif metadata_started and not data_started:
raise TsFileParseException("file contained metadata but no data")
elif metadata_started and data_started and len(instance_list) == 0:
raise TsFileParseException("file contained metadata but no data")
# # Create a DataFrame from the data parsed above
# data = pd.DataFrame(dtype=np.float32)
# for dim in range(0, num_dimensions):
# data['dim_' + str(dim)] = instance_list[dim]
# # Check if we should return any associated class labels separately
# if class_labels:
# if return_separate_X_and_y:
# return data, np.asarray(class_val_list)
# else:
# data['class_vals'] = pd.Series(class_val_list)
# return data
# else:
# return data
# Create a numpy array
#instance_list has a shape of (dimensions, nb_samples, seq_lenght)
#for the NATOPS_Train.arff it would be (24, 180, 51)
#convert python list to numpy array and traspose the 2 first dimensions -> (180, 24, 51)
data_array = np.asarray(instance_list).transpose(1,0,2)
y = np.asarray(class_val_list)
return data_array, y
else:
raise TsFileParseException("empty file")
fname_train = path_data/f'{dsname}/{dsname}_TRAIN.ts'
fname_train
train_x_ts, train_y_ts = load_from_tsfile_to_array(fname_train)
train_x_ts.shape, train_y_ts.shape
train_x_ts[1].shape
train_x_ts[10][0][30]
#export
def get_UCR_univariate_list():
return [
'ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY',
'AllGestureWiimoteZ', 'ArrowHead', 'Beef', 'BeetleFly', 'BirdChicken',
'BME', 'Car', 'CBF', 'Chinatown', 'ChlorineConcentration',
'CinCECGtorso', 'Coffee', 'Computers', 'CricketX', 'CricketY',
'CricketZ', 'Crop', 'DiatomSizeReduction',
'DistalPhalanxOutlineAgeGroup', 'DistalPhalanxOutlineCorrect',
'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame',
'DodgerLoopWeekend', 'Earthquakes', 'ECG200', 'ECG5000', 'ECGFiveDays',
'ElectricDevices', 'EOGHorizontalSignal', 'EOGVerticalSignal',
'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords',
'Fish', 'FordA', 'FordB', 'FreezerRegularTrain', 'FreezerSmallTrain',
'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3',
'GesturePebbleZ1', 'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan',
'GunPointMaleVersusFemale', 'GunPointOldVersusYoung', 'Ham',
'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate',
'InsectEPGRegularTrain', 'InsectEPGSmallTrain', 'InsectWingbeatSound',
'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2',
'Lightning7', 'Mallat', 'Meat', 'MedicalImages', 'MelbournePedestrian',
'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',
'MiddlePhalanxTW', 'MixedShapes', 'MixedShapesSmallTrain',
'MoteStrain', 'NonInvasiveFetalECGThorax1',
'NonInvasiveFetalECGThorax2', 'OliveOil', 'OSULeaf',
'PhalangesOutlinesCorrect', 'Phoneme', 'PickupGestureWiimoteZ',
'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'PLAID', 'Plane',
'PowerCons', 'ProximalPhalanxOutlineAgeGroup',
'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',
'RefrigerationDevices', 'Rock', 'ScreenType', 'SemgHandGenderCh2',
'SemgHandMovementCh2', 'SemgHandSubjectCh2', 'ShakeGestureWiimoteZ',
'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace',
'SonyAIBORobotSurface1', 'SonyAIBORobotSurface2', 'StarlightCurves',
'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',
'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG',
'TwoPatterns', 'UMD', 'UWaveGestureLibraryAll', 'UWaveGestureLibraryX',
'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine',
'WordSynonyms', 'Worms', 'WormsTwoClass', 'Yoga'
]
def get_UCR_multivariate_list():
return [
'ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions',
'CharacterTrajectories', 'Cricket', 'DuckDuckGeese', 'EigenWorms',
'Epilepsy', 'EthanolConcentration', 'ERing', 'FaceDetection',
'FingerMovements', 'HandMovementDirection', 'Handwriting', 'Heartbeat',
'JapaneseVowels', 'Libras', 'LSST', 'InsectWingbeat', 'MotorImagery',
'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports',
'SelfRegulationSCP1', 'SelfRegulationSCP2', 'SpokenArabicDigits',
'StandWalkJump', 'UWaveGestureLibrary'
]
# hide
_camel_re1 = re.compile('(.)([A-Z][a-z]+)')
_camel_re2 = re.compile('([a-z0-9])([A-Z])')
def camel2snake(name):
"Convert CamelCase to snake_case"
s1 = re.sub(_camel_re1, r'\1_\2', name)
return re.sub(_camel_re2, r'\1_\2', s1).lower()
def camel2capitalsnake(name):
return camel2snake(name).upper()
# hide
# urls_ts = [f'{camel2capitalsnake(n)} = {n}.zip' for n in get_UCR_multivariate_list()]
# urls_ts
#hide
# MULTI_ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip',
# MULTI_ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip',
# MULTI_BASIC_MOTIONS = f'{URL}BasicMotions.zip',
# MULTI_CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip',
# MULTI_CRICKET = f'{URL}Cricket.zip',
# MULTI_DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip',
# MULTI_EIGEN_WORMS = f'{URL}EigenWorms.zip',
# MULTI_EPILEPSY = f'{URL}Epilepsy.zip',
# MULTI_ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip',
# MULTI_ERING = f'{URL}ERing.zip',
# MULTI_FACE_DETECTION = f'{URL}FaceDetection.zip',
# MULTI_FINGER_MOVEMENTS = f'{URL}FingerMovements.zip',
# MULTI_HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip',
# MULTI_HANDWRITING = f'{URL}Handwriting.zip',
# MULTI_HEARTBEAT = f'{URL}Heartbeat.zip',
# MULTI_JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip',
# MULTI_LIBRAS = f'{URL}Libras.zip',
# MULTI_LSST = f'{URL}LSST.zip',
# MULTI_INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip',
# MULTI_MOTOR_IMAGERY = f'{URL}MotorImagery.zip',
# MULTI_NATOPS = f'{URL}NATOPS.zip',
# MULTI_PEN_DIGITS = f'{URL}PenDigits.zip',
# MULTI_PEMS-SF = f'{URL}PEMS-SF.zip',
# MULTI_PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip',
# MULTI_RACKET_SPORTS = f'{URL}RacketSports.zip',
# MULTI_SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip',
# MULTI_SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip',
# MULTI_SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip',
# MULTI_STAND_WALK_JUMP = f'{URL}StandWalkJump.zip',
# MULTI_U_WAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary'
# hide
# urls_ts = [f'{camel2capitalsnake(n)} = {n}.zip' for n in get_UCR_univariate_list()]
# urls_ts
# hide
# UNI_ACSF1 = f'{URL}ACSF1.zip,
# UNI_ADIAC = f'{URL}Adiac.zip,
# UNI_ALL_GESTURE_WIIMOTE_X = f'{URL}AllGestureWiimoteX.zip,
# UNI_ALL_GESTURE_WIIMOTE_Y = f'{URL}AllGestureWiimoteY.zip,
# UNI_ALL_GESTURE_WIIMOTE_Z = f'{URL}AllGestureWiimoteZ.zip,
# UNI_ARROW_HEAD = f'{URL}ArrowHead.zip,
# UNI_BEEF = f'{URL}Beef.zip,
# UNI_BEETLE_FLY = f'{URL}BeetleFly.zip,
# UNI_BIRD_CHICKEN = f'{URL}BirdChicken.zip,
# UNI_BME = f'{URL}BME.zip,
# UNI_CAR = f'{URL}Car.zip,
# UNI_CBF = f'{URL}CBF.zip,
# UNI_CHINATOWN = f'{URL}Chinatown.zip,
# UNI_CHLORINE_CONCENTRATION = f'{URL}ChlorineConcentration.zip,
# UNI_CIN_CEC_GTORSO = f'{URL}CinCECGtorso.zip,
# UNI_COFFEE = f'{URL}Coffee.zip,
# UNI_COMPUTERS = f'{URL}Computers.zip,
# UNI_CRICKET_X = f'{URL}CricketX.zip,
# UNI_CRICKET_Y = f'{URL}CricketY.zip,
# UNI_CRICKET_Z = f'{URL}CricketZ.zip,
# UNI_CROP = f'{URL}Crop.zip,
# UNI_DIATOM_SIZE_REDUCTION = f'{URL}DiatomSizeReduction.zip,
# UNI_DISTAL_PHALANX_OUTLINE_AGE_GROUP = f'{URL}DistalPhalanxOutlineAgeGroup.zip,
# UNI_DISTAL_PHALANX_OUTLINE_CORRECT = f'{URL}DistalPhalanxOutlineCorrect.zip,
# UNI_DISTAL_PHALANX_TW = f'{URL}DistalPhalanxTW.zip,
# UNI_DODGER_LOOP_DAY = f'{URL}DodgerLoopDay.zip,
# UNI_DODGER_LOOP_GAME = f'{URL}DodgerLoopGame.zip,
# UNI_DODGER_LOOP_WEEKEND = f'{URL}DodgerLoopWeekend.zip,
# UNI_EARTHQUAKES = f'{URL}Earthquakes.zip,
# UNI_ECG200 = f'{URL}ECG200.zip,
# UNI_ECG5000 = f'{URL}ECG5000.zip,
# UNI_ECG_FIVE_DAYS = f'{URL}ECGFiveDays.zip,
# UNI_ELECTRIC_DEVICES = f'{URL}ElectricDevices.zip,
# UNI_EOG_HORIZONTAL_SIGNAL = f'{URL}EOGHorizontalSignal.zip,
# UNI_EOG_VERTICAL_SIGNAL = f'{URL}EOGVerticalSignal.zip,
# UNI_ETHANOL_LEVEL = f'{URL}EthanolLevel.zip,
# UNI_FACE_ALL = f'{URL}FaceAll.zip,
# UNI_FACE_FOUR = f'{URL}FaceFour.zip,
# UNI_FACES_UCR = f'{URL}FacesUCR.zip,
# UNI_FIFTY_WORDS = f'{URL}FiftyWords.zip,
# UNI_FISH = f'{URL}Fish.zip,
# UNI_FORD_A = f'{URL}FordA.zip,
# UNI_FORD_B = f'{URL}FordB.zip,
# UNI_FREEZER_REGULAR_TRAIN = f'{URL}FreezerRegularTrain.zip,
# UNI_FREEZER_SMALL_TRAIN = f'{URL}FreezerSmallTrain.zip,
# UNI_FUNGI = f'{URL}Fungi.zip,
# UNI_GESTURE_MID_AIR_D1 = f'{URL}GestureMidAirD1.zip,
# UNI_GESTURE_MID_AIR_D2 = f'{URL}GestureMidAirD2.zip,
# UNI_GESTURE_MID_AIR_D3 = f'{URL}GestureMidAirD3.zip,
# UNI_GESTURE_PEBBLE_Z1 = f'{URL}GesturePebbleZ1.zip,
# UNI_GESTURE_PEBBLE_Z2 = f'{URL}GesturePebbleZ2.zip,
# UNI_GUN_POINT = f'{URL}GunPoint.zip,
# UNI_GUN_POINT_AGE_SPAN = f'{URL}GunPointAgeSpan.zip,
# UNI_GUN_POINT_MALE_VERSUS_FEMALE = f'{URL}GunPointMaleVersusFemale.zip,
# UNI_GUN_POINT_OLD_VERSUS_YOUNG = f'{URL}GunPointOldVersusYoung.zip,
# UNI_HAM = f'{URL}Ham.zip,
# UNI_HAND_OUTLINES = f'{URL}HandOutlines.zip,
# UNI_HAPTICS = f'{URL}Haptics.zip,
# UNI_HERRING = f'{URL}Herring.zip,
# UNI_HOUSE_TWENTY = f'{URL}HouseTwenty.zip,
# UNI_INLINE_SKATE = f'{URL}InlineSkate.zip,
# UNI_INSECT_EPG_REGULAR_TRAIN = f'{URL}InsectEPGRegularTrain.zip,
# UNI_INSECT_EPG_SMALL_TRAIN = f'{URL}InsectEPGSmallTrain.zip,
# UNI_INSECT_WINGBEAT_SOUND = f'{URL}InsectWingbeatSound.zip,
# UNI_ITALY_POWER_DEMAND = f'{URL}ItalyPowerDemand.zip,
# UNI_LARGE_KITCHEN_APPLIANCES = f'{URL}LargeKitchenAppliances.zip,
# UNI_LIGHTNING2 = f'{URL}Lightning2.zip,
# UNI_LIGHTNING7 = f'{URL}Lightning7.zip,
# UNI_MALLAT = f'{URL}Mallat.zip,
# UNI_MEAT = f'{URL}Meat.zip,
# UNI_MEDICAL_IMAGES = f'{URL}MedicalImages.zip,
# UNI_MELBOURNE_PEDESTRIAN = f'{URL}MelbournePedestrian.zip,
# UNI_MIDDLE_PHALANX_OUTLINE_AGE_GROUP = f'{URL}MiddlePhalanxOutlineAgeGroup.zip,
# UNI_MIDDLE_PHALANX_OUTLINE_CORRECT = f'{URL}MiddlePhalanxOutlineCorrect.zip,
# UNI_MIDDLE_PHALANX_TW = f'{URL}MiddlePhalanxTW.zip,
# UNI_MIXED_SHAPES = f'{URL}MixedShapes.zip,
# UNI_MIXED_SHAPES_SMALL_TRAIN = f'{URL}MixedShapesSmallTrain.zip,
# UNI_MOTE_STRAIN = f'{URL}MoteStrain.zip,
# UNI_NON_INVASIVE_FETAL_ECG_THORAX1 = f'{URL}NonInvasiveFetalECGThorax1.zip,
# UNI_NON_INVASIVE_FETAL_ECG_THORAX2 = f'{URL}NonInvasiveFetalECGThorax2.zip,
# UNI_OLIVE_OIL = f'{URL}OliveOil.zip,
# UNI_OSU_LEAF = f'{URL}OSULeaf.zip,
# UNI_PHALANGES_OUTLINES_CORRECT = f'{URL}PhalangesOutlinesCorrect.zip,
# UNI_PHONEME = f'{URL}Phoneme.zip,
# UNI_PICKUP_GESTURE_WIIMOTE_Z = f'{URL}PickupGestureWiimoteZ.zip,
# UNI_PIG_AIRWAY_PRESSURE = f'{URL}PigAirwayPressure.zip,
# UNI_PIG_ART_PRESSURE = f'{URL}PigArtPressure.zip,
# UNI_PIG_CVP = f'{URL}PigCVP.zip,
# UNI_PLAID = f'{URL}PLAID.zip,
# UNI_PLANE = f'{URL}Plane.zip,
# UNI_POWER_CONS = f'{URL}PowerCons.zip,
# UNI_PROXIMAL_PHALANX_OUTLINE_AGE_GROUP = f'{URL}ProximalPhalanxOutlineAgeGroup.zip,
# UNI_PROXIMAL_PHALANX_OUTLINE_CORRECT = f'{URL}ProximalPhalanxOutlineCorrect.zip,
# UNI_PROXIMAL_PHALANX_TW = f'{URL}ProximalPhalanxTW.zip,
# UNI_REFRIGERATION_DEVICES = f'{URL}RefrigerationDevices.zip,
# UNI_ROCK = f'{URL}Rock.zip,
# UNI_SCREEN_TYPE = f'{URL}ScreenType.zip,
# UNI_SEMG_HAND_GENDER_CH2 = f'{URL}SemgHandGenderCh2.zip,
# UNI_SEMG_HAND_MOVEMENT_CH2 = f'{URL}SemgHandMovementCh2.zip,
# UNI_SEMG_HAND_SUBJECT_CH2 = f'{URL}SemgHandSubjectCh2.zip,
# UNI_SHAKE_GESTURE_WIIMOTE_Z = f'{URL}ShakeGestureWiimoteZ.zip,
# UNI_SHAPELET_SIM = f'{URL}ShapeletSim.zip,
# UNI_SHAPES_ALL = f'{URL}ShapesAll.zip,
# UNI_SMALL_KITCHEN_APPLIANCES = f'{URL}SmallKitchenAppliances.zip,
# UNI_SMOOTH_SUBSPACE = f'{URL}SmoothSubspace.zip,
# UNI_SONY_AIBO_ROBOT_SURFACE1 = f'{URL}SonyAIBORobotSurface1.zip,
# UNI_SONY_AIBO_ROBOT_SURFACE2 = f'{URL}SonyAIBORobotSurface2.zip,
# UNI_STARLIGHT_CURVES = f'{URL}StarlightCurves.zip,
# UNI_STRAWBERRY = f'{URL}Strawberry.zip,
# UNI_SWEDISH_LEAF = f'{URL}SwedishLeaf.zip,
# UNI_SYMBOLS = f'{URL}Symbols.zip,
# UNI_SYNTHETIC_CONTROL = f'{URL}SyntheticControl.zip,
# UNI_TOE_SEGMENTATION1 = f'{URL}ToeSegmentation1.zip,
# UNI_TOE_SEGMENTATION2 = f'{URL}ToeSegmentation2.zip,
# UNI_TRACE = f'{URL}Trace.zip,
# UNI_TWO_LEAD_ECG = f'{URL}TwoLeadECG.zip,
# UNI_TWO_PATTERNS = f'{URL}TwoPatterns.zip,
# UNI_UMD = f'{URL}UMD.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_ALL = f'{URL}UWaveGestureLibraryAll.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_X = f'{URL}UWaveGestureLibraryX.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_Y = f'{URL}UWaveGestureLibraryY.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_Z = f'{URL}UWaveGestureLibraryZ.zip,
# UNI_WAFER = f'{URL}Wafer.zip,
# UNI_WINE = f'{URL}Wine.zip,
# UNI_WORD_SYNONYMS = f'{URL}WordSynonyms.zip,
# UNI_WORMS = f'{URL}Worms.zip,
# UNI_WORMS_TWO_CLASS = f'{URL}WormsTwoClass.zip,
# UNI_YOGA = f'{URL}Yoga.zipUNI_
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
# notebook2script()
notebook2script(fname='80_timeseries_data.ipynb')
###Output
Converted 80_timeseries_data.ipynb.
###Markdown
Timeseries Data> Basic functions to read timeseries files like `.arff` and `.ts` files.
###Code
#export
class TSData():
"Class that loads .arff (soon .ts) files and returns a tuple (data.x , self.y)"
"self.x is a list of 2D array with a shape (n_samples, nb_channels, sequence_length) "
"self.y is a 1D array as y (i.e. label) with a shape (n_samples)"
"for the NATOPS_Train.arff file, the result will be : x(180, 24, 51) and y(180)"
# def __init__(self):
# self.x = self.y = self.dsname = self.fnames = [],[],[],[]
def __init__(self, fnames, has_targets=True, fill_missing='NaN'):
# self.x = self.y = self.dsname = [],[],[]
self.x = []
self.y = []
self.dsname = []
self.fnames = fnames
self.has_targets = has_targets
self.fill_missings = fill_missing
def __repr__(self): return f"{self.__class__.__name__}:\n Datasets names (concatenated): {self.dsname}\n Filenames: {self.fnames}\n Data shape: {self.x.shape}\n Targets shape: {self.y.shape}\n Nb Samples: {self.x.shape[0]}\n Nb Channels: {self.x.shape[1]}\n Sequence Length: {self.x.shape[2]}"
def get_x(self, as_list=True): return(list(self.x))
def get_y(self): return(self.y)
def get_items(self): return [(item, str(label)) for (item, label) in zip(list(self.x), self.y)]
def get_lists(self): return (list(self.x), self.y)
def __getitem__(self, i): return (self.x[i], str(self.y[i]))
def get_nb_samples(self): return self.x.shape[0]
def sample(self, cut):
n=self.x.shape[0]
rand_idx = L(int(i) for i in torch.randperm(n))
idxs = rand_idx[:cut]
return [(self.x[i], str(self.y[i])) for i in idxs]
@property
def sizes(self): return (self.x.shape, self.y.shape)
@property
def n_channels(self): return (self.x.shape[1])
def _load_arff(self, fname, has_targets=True, fill_missing='NaN'):
"load an .arff file and return a tupple of 2 numpy arrays: "
"x : array with a shape (n_samples, nb_channels, sequence_length)"
"y : array with a shape (n_samples)"
"for the NATOPS_Train.arff the result will be : x(180, 24, 51) and y(180)"
instance_list = []
class_val_list = []
data_started = False
is_multi_variate = False
is_first_case = True
with open(fname, 'r', encoding="utf8") as f:
for line in f:
if line.strip():
if is_multi_variate is False and "@attribute" in line.lower() and "relational" in line.lower():
is_multi_variate = True
if "@data" in line.lower():
data_started = True
continue
# if the 'data tag has been found, the header information has been cleared and now data can be loaded
if data_started:
line = line.replace("?", fill_missing)
if is_multi_variate:
if has_targets:
line, class_val = line.split("',")
class_val_list.append(class_val.strip())
dimensions = line.split("\\n")
dimensions[0] = dimensions[0].replace("'", "")
if is_first_case:
for d in range(len(dimensions)):
instance_list.append([])
is_first_case = False
for dim in range(len(dimensions)):
instance_list[dim].append(np.array(dimensions[dim].split(','), dtype=np.float32))
# instance_list[dim].append(np.fromiter(dimensions[dim].split(','), dtype=np.float32))
else:
if is_first_case:
instance_list.append([])
is_first_case = False
line_parts = line.split(",")
if has_targets:
instance_list[0].append(np.array(line_parts[:len(line_parts)-1], dtype=np.float32))
class_val_list.append(line_parts[-1].strip())
else:
instance_list[0].append(np.array(line_parts[:len(line_parts)-1], dtype=np.float32))
#instance_list has a shape of (dimensions, nb_samples, seq_lenght)
#for the NATOPS_Train.arff it would be (24, 180, 51)
#convert python list to numpy array and transpose the 2 first dimensions -> (180, 24, 51)
x = np.asarray(instance_list).transpose(1,0,2)
if has_targets:
y = np.asarray(class_val_list)
return x, y
else:
return x, [None*x.shape[0]]
@classmethod
def from_arff(self, fnames, has_targets=True, fill_missing='NaN'):
"load an .arff file and return a tupple of 2 numpy arrays: "
"x : array with a shape (n_samples, nb_channels, sequence_length)"
"y : array with a shape (n_samples)"
"for the NATOPS_Train.arff the result will be : x(180, 24, 51) and y(180)"
data = self(fnames, has_targets=has_targets, fill_missing=fill_missing)
if isinstance(fnames, list):
data.x = []
data.y = []
data.dsname = []
data.fnames = []
xs,ys = [],[]
for i, fn in enumerate(fnames):
x,y = data._load_arff(fn, has_targets=has_targets, fill_missing=fill_missing)
xs.append(x)
ys.append(y)
data.fnames.append(fn)
data.dsname.append(fn.stem)
data.x = np.concatenate(xs)
data.y = np.concatenate(ys)
else:
data.fnames.append(fnames)
data.dsname.append(fnames.stem)
data.x, data.y = data._load(fnames, has_targets=has_targets, fill_missing=fill_missing)
return data
# add_docs(TSData,
# from_arff="read one or serveral arff files and concatenate them, and returns a TSData object")
_docs=dict(
from_arff="read one or serveral arff files and concatenate them, and returns a TSData object",
get_items="return list of tuples. Each tuple corresponds to a timeserie (nump.ndarray) and a label (string)",
get_x="return list of timeseries (no labels)",
get_y="return list of labels corresponding to each timeserie",
sizes="return timeseries shape and labels shape (labels list size)",
n_channels="return timeserie's number of channels. For `arff` files it is called `dimension`. In the case of NATOPS_Train.arff, it returns 24")
show_doc(TSData.from_arff)
show_doc(TSData.get_items)
show_doc(TSData.n_channels)
#export
def get_ts_items(fnames):
'get_ts_items return list of tuples. Each tuple corresponds to a timeserie (nump.ndarray) and a label (string)'
data = TSData.from_arff(fnames)
return data.get_items()
show_doc(get_ts_items)
show_doc(get_ts_items)
# hide
def check_ext(fnames, ext):
if isinstance(fnames, list):
fnames = [fn if (fn.suffix!='') else f'{fn}.{ext}' for fn in fnames]
else:
fnames = fnames if (fnames.suffix!='') else f'{fnames}.{ext}'
###Output
_____no_output_____
###Markdown
Plot Timeseries
###Code
#export
def show_timeseries(ts, ctx=None, title=None, chs=None, leg=True, figsize=None, linewidth=3, linestyle='solid', color='orange', **kwargs):
"""
Plot a timeseries.
Args:
title : usually the class of the timeseries
ts : timeseries. It should have a shape of (nb_channels, sequence_length)
chs : array representing a list of channels to plot
leg : Display or not a legend
"""
fig = None
if ctx is None: fig, ctx = plt.subplots(figsize=figsize, **kwargs)
n_channels = ts.shape[0]
t = range(ts.shape[1])
chs_max = max(chs) if chs else 0
channels = chs if (chs and (chs_max < ts.shape[0])) else range(ts.shape[0])
for ch in channels:
if n_channels==1:
ctx.plot(t, ts[ch], label='ch'+str(ch), linewidth=linewidth, color=color, linestyle=linestyle)
else:
ctx.plot(t, ts[ch], label='ch'+str(ch), linewidth=linewidth, linestyle=linestyle)
if leg: ctx.legend(loc='upper right', ncol=2, framealpha=0.5)
if title: ctx.set_title(title)
return fig
# return ctx
show_doc(show_timeseries)
path_data = Config().data
path_data
# export
def file_extract_at_filename(fname, dest):
"Extract `fname` to `dest`/`fname`.name folder using `tarfile` or `zipfile"
dest = Path(dest)/Path(fname).with_suffix('').name
# tarfile.open(fname, 'r:gz').extractall(dest)
fname = str(fname)
if fname.endswith('gz'): tarfile.open(fname, 'r:gz').extractall(dest)
elif fname.endswith('zip'): zipfile.ZipFile(fname ).extractall(dest)
else: raise Exception(f'Unrecognized archive: {fname}')
###Output
_____no_output_____
###Markdown
`file_extract_at_filename` is used by default in `unzip_data` to decompress the downloaded file in a folder that has the same name as the zip filename.
###Code
# export
def unzip_data(url, fname=None, dest=None, c_key='data', force_download=False):
"Download `url` to `fname` if `dest` doesn't exist, and un-compress to `dest`/`fname`.name folder ."
return untar_data(url, fname=fname, c_key=c_key, force_download=force_download, extract_func=file_extract_at_filename)
###Output
_____no_output_____
###Markdown
`unzip_data` download and decompress the downloaded file in a folder and decompress it in a folder that has the same name as the zip filename
###Code
show_doc(unzip_data)
# export
class URLs_TS():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
URL = 'http://www.timeseriesclassification.com/Downloads/'
# UCR multivariate datasets - Current Naming
ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip'
ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip'
BASIC_MOTIONS = f'{URL}BasicMotions.zip'
CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip'
CRICKET = f'{URL}Cricket.zip'
DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip'
EIGEN_WORMS = f'{URL}EigenWorms.zip'
EPILEPSY = f'{URL}Epilepsy.zip'
ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip'
ERING = f'{URL}ERing.zip'
FACE_DETECTION = f'{URL}FaceDetection.zip'
FINGER_MOVEMENTS = f'{URL}FingerMovements.zip'
HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip'
HANDWRITING = f'{URL}Handwriting.zip'
HEARTBEAT = f'{URL}Heartbeat.zip'
JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip'
LIBRAS = f'{URL}Libras.zip'
LSST = f'{URL}LSST.zip'
INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip'
MOTOR_IMAGERY = f'{URL}MotorImagery.zip'
NATOPS = f'{URL}NATOPS.zip'
PEN_DIGITS = f'{URL}PenDigits.zip'
PEMS_SF = f'{URL}PEMS-SF.zip'
PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip'
RACKET_SPORTS = f'{URL}RacketSports.zip'
SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip'
SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip'
SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip'
STAND_WALK_JUMP = f'{URL}StandWalkJump.zip'
UWAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary.zip'
# UCR multivariate datasets - New Naming
# MULTI_ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip'
# MULTI_ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip'
# MULTI_BASIC_MOTIONS = f'{URL}BasicMotions.zip'
# MULTI_CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip'
# MULTI_CRICKET = f'{URL}Cricket.zip'
# MULTI_DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip'
# MULTI_EIGEN_WORMS = f'{URL}EigenWorms.zip'
# MULTI_EPILEPSY = f'{URL}Epilepsy.zip'
# MULTI_ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip'
# MULTI_ERING = f'{URL}ERing.zip'
# MULTI_FACE_DETECTION = f'{URL}FaceDetection.zip'
# MULTI_FINGER_MOVEMENTS = f'{URL}FingerMovements.zip'
# MULTI_HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip'
# MULTI_HANDWRITING = f'{URL}Handwriting.zip'
# MULTI_HEARTBEAT = f'{URL}Heartbeat.zip'
# MULTI_JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip'
# MULTI_LIBRAS = f'{URL}Libras.zip'
# MULTI_LSST = f'{URL}LSST.zip'
# MULTI_INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip'
# MULTI_MOTOR_IMAGERY = f'{URL}MotorImagery.zip'
# MULTI_NATOPS = f'{URL}NATOPS.zip'
# MULTI_PEN_DIGITS = f'{URL}PenDigits.zip'
# MULTI_PEMS_SF = f'{URL}PEMS-SF.zip'
# MULTI_PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip'
# MULTI_RACKET_SPORTS = f'{URL}RacketSports.zip'
# MULTI_SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip'
# MULTI_SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip'
# MULTI_SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip'
# MULTI_STAND_WALK_JUMP = f'{URL}StandWalkJump.zip'
# MULTI_U_WAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary'
# UCR univariate datasets
UNI_ACSF1 = f'{URL}ACSF1.zip'
UNI_ADIAC = f'{URL}Adiac.zip'
UNI_ALL_GESTURE_WIIMOTE_X = f'{URL}AllGestureWiimoteX.zip'
UNI_ALL_GESTURE_WIIMOTE_Y = f'{URL}AllGestureWiimoteY.zip'
UNI_ALL_GESTURE_WIIMOTE_Z = f'{URL}AllGestureWiimoteZ.zip'
UNI_ARROW_HEAD = f'{URL}ArrowHead.zip'
UNI_BEEF = f'{URL}Beef.zip'
UNI_BEETLE_FLY = f'{URL}BeetleFly.zip'
UNI_BIRD_CHICKEN = f'{URL}BirdChicken.zip'
UNI_BME = f'{URL}BME.zip'
UNI_CAR = f'{URL}Car.zip'
UNI_CBF = f'{URL}CBF.zip'
UNI_CHINATOWN = f'{URL}Chinatown.zip'
UNI_CHLORINE_CONCENTRATION = f'{URL}ChlorineConcentration.zip'
UNI_CIN_CEC_GTORSO = f'{URL}CinCECGtorso.zip'
UNI_COFFEE = f'{URL}Coffee.zip'
UNI_COMPUTERS = f'{URL}Computers.zip'
UNI_CRICKET_X = f'{URL}CricketX.zip'
UNI_CRICKET_Y = f'{URL}CricketY.zip'
UNI_CRICKET_Z = f'{URL}CricketZ.zip'
UNI_CROP = f'{URL}Crop.zip'
UNI_DIATOM_SIZE_REDUCTION = f'{URL}DiatomSizeReduction.zip'
UNI_DISTAL_PHALANX_OUTLINE_AGE_GROUP= f'{URL}DistalPhalanxOutlineAgeGroup.zip'
UNI_DISTAL_PHALANX_OUTLINE_CORRECT = f'{URL}DistalPhalanxOutlineCorrect.zip'
UNI_DISTAL_PHALANX_TW = f'{URL}DistalPhalanxTW.zip'
UNI_DODGER_LOOP_DAY = f'{URL}DodgerLoopDay.zip'
UNI_DODGER_LOOP_GAME = f'{URL}DodgerLoopGame.zip'
UNI_DODGER_LOOP_WEEKEND = f'{URL}DodgerLoopWeekend.zip'
UNI_EARTHQUAKES = f'{URL}Earthquakes.zip'
UNI_ECG200 = f'{URL}ECG200.zip'
UNI_ECG5000 = f'{URL}ECG5000.zip'
UNI_ECG_FIVE_DAYS = f'{URL}ECGFiveDays.zip'
UNI_ELECTRIC_DEVICES = f'{URL}ElectricDevices.zip'
UNI_EOG_HORIZONTAL_SIGNAL = f'{URL}EOGHorizontalSignal.zip'
UNI_EOG_VERTICAL_SIGNAL = f'{URL}EOGVerticalSignal.zip'
UNI_ETHANOL_LEVEL = f'{URL}EthanolLevel.zip'
UNI_FACE_ALL = f'{URL}FaceAll.zip'
UNI_FACE_FOUR = f'{URL}FaceFour.zip'
UNI_FACES_UCR = f'{URL}FacesUCR.zip'
UNI_FIFTY_WORDS = f'{URL}FiftyWords.zip'
UNI_FISH = f'{URL}Fish.zip'
UNI_FORD_A = f'{URL}FordA.zip'
UNI_FORD_B = f'{URL}FordB.zip'
UNI_FREEZER_REGULAR_TRAIN = f'{URL}FreezerRegularTrain.zip'
UNI_FREEZER_SMALL_TRAIN = f'{URL}FreezerSmallTrain.zip'
UNI_FUNGI = f'{URL}Fungi.zip'
UNI_GESTURE_MID_AIR_D1 = f'{URL}GestureMidAirD1.zip'
UNI_GESTURE_MID_AIR_D2 = f'{URL}GestureMidAirD2.zip'
UNI_GESTURE_MID_AIR_D3 = f'{URL}GestureMidAirD3.zip'
UNI_GESTURE_PEBBLE_Z1 = f'{URL}GesturePebbleZ1.zip'
UNI_GESTURE_PEBBLE_Z2 = f'{URL}GesturePebbleZ2.zip'
UNI_GUN_POINT = f'{URL}GunPoint.zip'
UNI_GUN_POINT_AGE_SPAN = f'{URL}GunPointAgeSpan.zip'
UNI_GUN_POINT_MALE_VERSUS_FEMALE = f'{URL}GunPointMaleVersusFemale.zip'
UNI_GUN_POINT_OLD_VERSUS_YOUNG = f'{URL}GunPointOldVersusYoung.zip'
UNI_HAM = f'{URL}Ham.zip'
UNI_HAND_OUTLINES = f'{URL}HandOutlines.zip'
UNI_HAPTICS = f'{URL}Haptics.zip'
UNI_HERRING = f'{URL}Herring.zip'
UNI_HOUSE_TWENTY = f'{URL}HouseTwenty.zip'
UNI_INLINE_SKATE = f'{URL}InlineSkate.zip'
UNI_INSECT_EPG_REGULAR_TRAIN = f'{URL}InsectEPGRegularTrain.zip'
UNI_INSECT_EPG_SMALL_TRAIN = f'{URL}InsectEPGSmallTrain.zip'
UNI_INSECT_WINGBEAT_SOUND = f'{URL}InsectWingbeatSound.zip'
UNI_ITALY_POWER_DEMAND = f'{URL}ItalyPowerDemand.zip'
UNI_LARGE_KITCHEN_APPLIANCES = f'{URL}LargeKitchenAppliances.zip'
UNI_LIGHTNING2 = f'{URL}Lightning2.zip'
UNI_LIGHTNING7 = f'{URL}Lightning7.zip'
UNI_MALLAT = f'{URL}Mallat.zip'
UNI_MEAT = f'{URL}Meat.zip'
UNI_MEDICAL_IMAGES = f'{URL}MedicalImages.zip'
UNI_MELBOURNE_PEDESTRIAN = f'{URL}MelbournePedestrian.zip'
UNI_MIDDLE_PHALANX_OUTLINE_AGE_GROUP= f'{URL}MiddlePhalanxOutlineAgeGroup.zip'
UNI_MIDDLE_PHALANX_OUTLINE_CORRECT = f'{URL}MiddlePhalanxOutlineCorrect.zip'
UNI_MIDDLE_PHALANX_TW = f'{URL}MiddlePhalanxTW.zip'
UNI_MIXED_SHAPES = f'{URL}MixedShapes.zip'
UNI_MIXED_SHAPES_SMALL_TRAIN = f'{URL}MixedShapesSmallTrain.zip'
UNI_MOTE_STRAIN = f'{URL}MoteStrain.zip'
UNI_NON_INVASIVE_FETAL_ECG_THORAX1 = f'{URL}NonInvasiveFetalECGThorax1.zip'
UNI_NON_INVASIVE_FETAL_ECG_THORAX2 = f'{URL}NonInvasiveFetalECGThorax2.zip'
UNI_OLIVE_OIL = f'{URL}OliveOil.zip'
UNI_OSU_LEAF = f'{URL}OSULeaf.zip'
UNI_PHALANGES_OUTLINES_CORRECT = f'{URL}PhalangesOutlinesCorrect.zip'
UNI_PHONEME = f'{URL}Phoneme.zip'
UNI_PICKUP_GESTURE_WIIMOTE_Z = f'{URL}PickupGestureWiimoteZ.zip'
UNI_PIG_AIRWAY_PRESSURE = f'{URL}PigAirwayPressure.zip'
UNI_PIG_ART_PRESSURE = f'{URL}PigArtPressure.zip'
UNI_PIG_CVP = f'{URL}PigCVP.zip'
UNI_PLAID = f'{URL}PLAID.zip'
UNI_PLANE = f'{URL}Plane.zip'
UNI_POWER_CONS = f'{URL}PowerCons.zip'
UNI_PROXIMAL_PHALANX_OUTLINE_AGE_GROUP= f'{URL}ProximalPhalanxOutlineAgeGroup.zip'
UNI_PROXIMAL_PHALANX_OUTLINE_CORRECT= f'{URL}ProximalPhalanxOutlineCorrect.zip'
UNI_PROXIMAL_PHALANX_TW = f'{URL}ProximalPhalanxTW.zip'
UNI_REFRIGERATION_DEVICES = f'{URL}RefrigerationDevices.zip'
UNI_ROCK = f'{URL}Rock.zip'
UNI_SCREEN_TYPE = f'{URL}ScreenType.zip'
UNI_SEMG_HAND_GENDER_CH2 = f'{URL}SemgHandGenderCh2.zip'
UNI_SEMG_HAND_MOVEMENT_CH2 = f'{URL}SemgHandMovementCh2.zip'
UNI_SEMG_HAND_SUBJECT_CH2 = f'{URL}SemgHandSubjectCh2.zip'
UNI_SHAKE_GESTURE_WIIMOTE_Z = f'{URL}ShakeGestureWiimoteZ.zip'
UNI_SHAPELET_SIM = f'{URL}ShapeletSim.zip'
UNI_SHAPES_ALL = f'{URL}ShapesAll.zip'
UNI_SMALL_KITCHEN_APPLIANCES = f'{URL}SmallKitchenAppliances.zip'
UNI_SMOOTH_SUBSPACE = f'{URL}SmoothSubspace.zip'
UNI_SONY_AIBO_ROBOT_SURFACE1 = f'{URL}SonyAIBORobotSurface1.zip'
UNI_SONY_AIBO_ROBOT_SURFACE2 = f'{URL}SonyAIBORobotSurface2.zip'
UNI_STARLIGHT_CURVES = f'{URL}StarLightCurves.zip'
UNI_STRAWBERRY = f'{URL}Strawberry.zip'
UNI_SWEDISH_LEAF = f'{URL}SwedishLeaf.zip'
UNI_SYMBOLS = f'{URL}Symbols.zip'
UNI_SYNTHETIC_CONTROL = f'{URL}SyntheticControl.zip'
UNI_TOE_SEGMENTATION1 = f'{URL}ToeSegmentation1.zip'
UNI_TOE_SEGMENTATION2 = f'{URL}ToeSegmentation2.zip'
UNI_TRACE = f'{URL}Trace.zip'
UNI_TWO_LEAD_ECG = f'{URL}TwoLeadECG.zip'
UNI_TWO_PATTERNS = f'{URL}TwoPatterns.zip'
UNI_UMD = f'{URL}UMD.zip'
UNI_U_WAVE_GESTURE_LIBRARY_ALL = f'{URL}UWaveGestureLibraryAll.zip'
UNI_U_WAVE_GESTURE_LIBRARY_X = f'{URL}UWaveGestureLibraryX.zip'
UNI_U_WAVE_GESTURE_LIBRARY_Y = f'{URL}UWaveGestureLibraryY.zip'
UNI_U_WAVE_GESTURE_LIBRARY_Z = f'{URL}UWaveGestureLibraryZ.zip'
UNI_WAFER = f'{URL}Wafer.zip'
UNI_WINE = f'{URL}Wine.zip'
UNI_WORD_SYNONYMS = f'{URL}WordSynonyms.zip'
UNI_WORMS = f'{URL}Worms.zip'
UNI_WORMS_TWO_CLASS = f'{URL}WormsTwoClass.zip'
UNI_YOGA = f'{URL}Yoga.zip'
def path(url='.', c_key='archive'):
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key=='models' else 'data')/fname
if local_path.exists(): return local_path
return Config()[c_key]/fname
dsname = 'NATOPS' #'NATOPS', 'LSST', 'Wine', 'Epilepsy', 'HandMovementDirection'
# dsname = 'ECG200' #'NATOPS', 'LSST', 'Wine', 'Epilepsy', 'HandMovementDirection'
path = unzip_data(URLs_TS.NATOPS)
# path = unzip_data(URLs_TS.UNI_ECG200)
path
path.ls()
fname_train = f'{dsname}_TRAIN.arff'
fname_test = f'{dsname}_TEST.arff'
fnames = [path/fname_train, path/fname_test]
fnames
data = TSData.from_arff(fnames)
data
print(data)
data.dsname, data.fnames, data.n_channels, data.sizes, data.x.shape, data.y.shape
test_eq(data.dsname, ['NATOPS_TRAIN', 'NATOPS_TEST'])
test_eq(data.n_channels, 24)
test_eq(data.sizes, ((360, 24, 51), (360,)))
test_eq(data.x.shape, (360, 24, 51))
test_eq(data.y.shape, (360,))
type(data.get_items()[1][0]), data.get_items()[1][0]
type(data.get_y()[1]), data.get_y()[1]
test_eq(data.get_y()[1], '3.0')
idx = 4
ts, title = data.get_items()[idx]
ts
show_timeseries(ts, title=title, figsize=(8,6), linewidth=3)
# show_timeseries(ts, title=title, figsize=(8,6), linewidth=4, color='orange', linestyle='dotted')
# show_timeseries(ts, title=title, chs=range(0,24,3))
# hide
def load_from_tsfile_to_array(full_file_path_and_name, return_separate_X_and_y=True, replace_missing_vals_with='NaN'):
"""Loads data from a .ts file into a Pandas DataFrame.
Parameters
full_file_path_and_name: str
The full pathname of the .ts file to read.
return_separate_X_and_y: bool
true if X and Y values should be returned as separate Data Frames (X) and a numpy array (y), false otherwise.
This is only relevant for data that
replace_missing_vals_with: str
The value that missing values in the text file should be replaced with prior to parsing.
Returns
DataFrame, ndarray
If return_separate_X_and_y then a tuple containing a DataFrame and a numpy array containing the relevant time-series and corresponding class values.
DataFrame
If not return_separate_X_and_y then a single DataFrame containing all time-series and (if relevant) a column "class_vals" the associated class values.
"""
# Initialize flags and variables used when parsing the file
metadata_started = False
data_started = False
has_problem_name_tag = False
has_timestamps_tag = False
has_univariate_tag = False
has_class_labels_tag = False
has_data_tag = False
previous_timestamp_was_int = None
previous_timestamp_was_timestamp = None
num_dimensions = None
is_first_case = True
instance_list = []
class_val_list = []
line_num = 0
# Parse the file
# print(full_file_path_and_name)
with open(full_file_path_and_name, 'r',encoding='utf-8') as file:
for line in file:
# Strip white space from start/end of line and change to lowercase for use below
line = line.strip().lower()
# Empty lines are valid at any point in a file
if line:
# Check if this line contains metadata
# Please note that even though metadata is stored in this function it is not currently published externally
if line.startswith("@problemname"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("problemname tag requires an associated value")
problem_name = line[len("@problemname") + 1:]
has_problem_name_tag = True
metadata_started = True
elif line.startswith("@timestamps"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("timestamps tag requires an associated Boolean value")
elif tokens[1] == "true":
timestamps = True
elif tokens[1] == "false":
timestamps = False
else:
raise TsFileParseException("invalid timestamps value")
has_timestamps_tag = True
metadata_started = True
elif line.startswith("@univariate"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("univariate tag requires an associated Boolean value")
elif tokens[1] == "true":
univariate = True
elif tokens[1] == "false":
univariate = False
else:
raise TsFileParseException("invalid univariate value")
has_univariate_tag = True
metadata_started = True
elif line.startswith("@classlabel"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("classlabel tag requires an associated Boolean value")
if tokens[1] == "true":
class_labels = True
elif tokens[1] == "false":
class_labels = False
else:
raise TsFileParseException("invalid classLabel value")
# Check if we have any associated class values
if token_len == 2 and class_labels:
raise TsFileParseException("if the classlabel tag is true then class values must be supplied")
has_class_labels_tag = True
class_label_list = [token.strip() for token in tokens[2:]]
metadata_started = True
# Check if this line contains the start of data
elif line.startswith("@data"):
if line != "@data":
raise TsFileParseException("data tag should not have an associated value")
if data_started and not metadata_started:
raise TsFileParseException("metadata must come before data")
else:
has_data_tag = True
data_started = True
# If the 'data tag has been found then metadata has been parsed and data can be loaded
elif data_started:
# Check that a full set of metadata has been provided
if (not has_problem_name_tag or not has_timestamps_tag or not has_univariate_tag
or not has_class_labels_tag or not has_data_tag):
raise TsFileParseException("a full set of metadata has not been provided before the data")
# Replace any missing values with the value specified
line = line.replace("?", replace_missing_vals_with)
# Check if we dealing with data that has timestamps
if timestamps:
# We're dealing with timestamps so cannot just split line on ':' as timestamps may contain one
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
this_line_num_dimensions = 0
line_len = len(line)
char_num = 0
while char_num < line_len:
# Move through any spaces
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# See if there is any more data to read in or if we should validate that read thus far
if char_num < line_len:
# See if we have an empty dimension (i.e. no values)
if line[char_num] == ":":
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
has_another_value = False
has_another_dimension = True
timestamps_for_dimension = []
values_for_dimension = []
char_num += 1
else:
# Check if we have reached a class label
if line[char_num] != "(" and class_labels:
class_val = line[char_num:].strip()
if class_val not in class_label_list:
raise TsFileParseException("the class value '" + class_val + "' on line " +
str(line_num + 1) + " is not valid")
class_val_list.append(class_val)
char_num = line_len
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
else:
# Read in the data contained within the next tuple
if line[char_num] != "(" and not class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " does not start with a '('")
char_num += 1
tuple_data = ""
while char_num < line_len and line[char_num] != ")":
tuple_data += line[char_num]
char_num += 1
if char_num >= line_len or line[char_num] != ")":
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " does not end with a ')'")
# Read in any spaces immediately after the current tuple
char_num += 1
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# Check if there is another value or dimension to process after this tuple
if char_num >= line_len:
has_another_value = False
has_another_dimension = False
elif line[char_num] == ",":
has_another_value = True
has_another_dimension = False
elif line[char_num] == ":":
has_another_value = False
has_another_dimension = True
char_num += 1
# Get the numeric value for the tuple by reading from the end of the tuple data backwards to the last comma
last_comma_index = tuple_data.rfind(',')
if last_comma_index == -1:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " contains a tuple that has no comma inside of it")
try:
value = tuple_data[last_comma_index + 1:]
value = float(value)
except ValueError:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains a tuple that does not have a valid numeric value")
# Check the type of timestamp that we have
timestamp = tuple_data[0: last_comma_index]
try:
timestamp = int(timestamp)
timestamp_is_int = True
timestamp_is_timestamp = False
except ValueError:
timestamp_is_int = False
if not timestamp_is_int:
try:
timestamp = timestamp.strip()
timestamp_is_timestamp = True
except ValueError:
timestamp_is_timestamp = False
# Make sure that the timestamps in the file (not just this dimension or case) are consistent
if not timestamp_is_timestamp and not timestamp_is_int:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " contains a tuple that has an invalid timestamp '" +
timestamp + "'")
if previous_timestamp_was_int is not None and previous_timestamp_was_int and \
not timestamp_is_int:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains tuples where the timestamp format is inconsistent")
if previous_timestamp_was_timestamp is not None and previous_timestamp_was_timestamp and \
not timestamp_is_timestamp:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) +
" contains tuples where the timestamp format is inconsistent")
# Store the values
timestamps_for_dimension += [timestamp]
values_for_dimension += [value]
# If this was our first tuple then we store the type of timestamp we had
if previous_timestamp_was_timestamp is None and timestamp_is_timestamp:
previous_timestamp_was_timestamp = True
previous_timestamp_was_int = False
if previous_timestamp_was_int is None and timestamp_is_int:
previous_timestamp_was_timestamp = False
previous_timestamp_was_int = True
# See if we should add the data for this dimension
if not has_another_value:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
if timestamp_is_timestamp:
timestamps_for_dimension = pd.DatetimeIndex(timestamps_for_dimension)
instance_list[this_line_num_dimensions].append(pd.Series(index=timestamps_for_dimension
, data=values_for_dimension))
this_line_num_dimensions += 1
timestamps_for_dimension = []
values_for_dimension = []
elif has_another_value:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not class_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series(dtype=np.float32))
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and not has_another_dimension:
if num_dimensions is None:
num_dimensions = this_line_num_dimensions
if num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(line_num + 1) +
" does not have the same number of dimensions as the previous line of data")
# Check that we are not expecting some more data, and if not, store that processed above
if has_another_value:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and class_labels:
raise TsFileParseException("dimension " + str(this_line_num_dimensions + 1) +
" on line " + str(line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not class_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(line_num + 1) +
" does not have the same number of dimensions as the previous line of data")
# Check if we should have class values, and if so that they are contained in those listed in the metadata
if class_labels and len(class_val_list) == 0:
raise TsFileParseException("the cases have no associated class values")
else:
dimensions = line.split(":")
# If first row then note the number of dimensions (that must be the same for all cases)
if is_first_case:
num_dimensions = len(dimensions)
if class_labels:
num_dimensions -= 1
for dim in range(0, num_dimensions):
instance_list.append([])
is_first_case = False
# See how many dimensions that the case whose data in represented in this line has
this_line_num_dimensions = len(dimensions)
if class_labels:
this_line_num_dimensions -= 1
# All dimensions should be included for all series, even if they are empty
if this_line_num_dimensions != num_dimensions:
raise TsFileParseException("inconsistent number of dimensions")
# Process the data for each dimension
for dim in range(0, num_dimensions):
dimension = dimensions[dim].strip()
if dimension:
# data_series = dimension.split(",")
# data_series = [float(i) for i in data_series]
# instance_list[dim].append(pd.Series(data_series))
# instance_list[dim].append(np.array(dimensions[dim].strip().split(','), dtype=np.float32))
instance_list[dim].append(np.array(dimensions[dim].split(','), dtype=np.float32))
# instance_list[dim].append(np.fromiter(dimensions[dim].strip().split(','), dtype=np.float32))
else:
# instance_list[dim].append(pd.Series())
instance_list[dim].append([])
if class_labels:
class_val_list.append(dimensions[num_dimensions].strip())
line_num += 1
# Check that the file was not empty
if line_num:
# Check that the file contained both metadata and data
if metadata_started and not (has_problem_name_tag and has_timestamps_tag and has_univariate_tag and
has_class_labels_tag and has_data_tag):
raise TsFileParseException("metadata incomplete")
elif metadata_started and not data_started:
raise TsFileParseException("file contained metadata but no data")
elif metadata_started and data_started and len(instance_list) == 0:
raise TsFileParseException("file contained metadata but no data")
# # Create a DataFrame from the data parsed above
# data = pd.DataFrame(dtype=np.float32)
# for dim in range(0, num_dimensions):
# data['dim_' + str(dim)] = instance_list[dim]
# # Check if we should return any associated class labels separately
# if class_labels:
# if return_separate_X_and_y:
# return data, np.asarray(class_val_list)
# else:
# data['class_vals'] = pd.Series(class_val_list)
# return data
# else:
# return data
# Create a numpy array
#instance_list has a shape of (dimensions, nb_samples, seq_lenght)
#for the NATOPS_Train.arff it would be (24, 180, 51)
#convert python list to numpy array and traspose the 2 first dimensions -> (180, 24, 51)
data_array = np.asarray(instance_list).transpose(1,0,2)
y = np.asarray(class_val_list)
return data_array, y
else:
raise TsFileParseException("empty file")
fname_train = path_data/f'{dsname}/{dsname}_TRAIN.ts'
fname_train
train_x_ts, train_y_ts = load_from_tsfile_to_array(fname_train)
train_x_ts.shape, train_y_ts.shape
train_x_ts[1].shape
train_x_ts[10][0][30]
#export
def get_UCR_univariate_list():
return [
'ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY',
'AllGestureWiimoteZ', 'ArrowHead', 'Beef', 'BeetleFly', 'BirdChicken',
'BME', 'Car', 'CBF', 'Chinatown', 'ChlorineConcentration',
'CinCECGtorso', 'Coffee', 'Computers', 'CricketX', 'CricketY',
'CricketZ', 'Crop', 'DiatomSizeReduction',
'DistalPhalanxOutlineAgeGroup', 'DistalPhalanxOutlineCorrect',
'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame',
'DodgerLoopWeekend', 'Earthquakes', 'ECG200', 'ECG5000', 'ECGFiveDays',
'ElectricDevices', 'EOGHorizontalSignal', 'EOGVerticalSignal',
'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords',
'Fish', 'FordA', 'FordB', 'FreezerRegularTrain', 'FreezerSmallTrain',
'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3',
'GesturePebbleZ1', 'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan',
'GunPointMaleVersusFemale', 'GunPointOldVersusYoung', 'Ham',
'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate',
'InsectEPGRegularTrain', 'InsectEPGSmallTrain', 'InsectWingbeatSound',
'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2',
'Lightning7', 'Mallat', 'Meat', 'MedicalImages', 'MelbournePedestrian',
'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect',
'MiddlePhalanxTW', 'MixedShapes', 'MixedShapesSmallTrain',
'MoteStrain', 'NonInvasiveFetalECGThorax1',
'NonInvasiveFetalECGThorax2', 'OliveOil', 'OSULeaf',
'PhalangesOutlinesCorrect', 'Phoneme', 'PickupGestureWiimoteZ',
'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'PLAID', 'Plane',
'PowerCons', 'ProximalPhalanxOutlineAgeGroup',
'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW',
'RefrigerationDevices', 'Rock', 'ScreenType', 'SemgHandGenderCh2',
'SemgHandMovementCh2', 'SemgHandSubjectCh2', 'ShakeGestureWiimoteZ',
'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace',
'SonyAIBORobotSurface1', 'SonyAIBORobotSurface2', 'StarlightCurves',
'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl',
'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG',
'TwoPatterns', 'UMD', 'UWaveGestureLibraryAll', 'UWaveGestureLibraryX',
'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine',
'WordSynonyms', 'Worms', 'WormsTwoClass', 'Yoga'
]
def get_UCR_multivariate_list():
return [
'ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions',
'CharacterTrajectories', 'Cricket', 'DuckDuckGeese', 'EigenWorms',
'Epilepsy', 'EthanolConcentration', 'ERing', 'FaceDetection',
'FingerMovements', 'HandMovementDirection', 'Handwriting', 'Heartbeat',
'JapaneseVowels', 'Libras', 'LSST', 'InsectWingbeat', 'MotorImagery',
'NATOPS', 'PenDigits', 'PEMS-SF', 'PhonemeSpectra', 'RacketSports',
'SelfRegulationSCP1', 'SelfRegulationSCP2', 'SpokenArabicDigits',
'StandWalkJump', 'UWaveGestureLibrary'
]
# hide
_camel_re1 = re.compile('(.)([A-Z][a-z]+)')
_camel_re2 = re.compile('([a-z0-9])([A-Z])')
def camel2snake(name):
"Convert CamelCase to snake_case"
s1 = re.sub(_camel_re1, r'\1_\2', name)
return re.sub(_camel_re2, r'\1_\2', s1).lower()
def camel2capitalsnake(name):
return camel2snake(name).upper()
# hide
# urls_ts = [f'{camel2capitalsnake(n)} = {n}.zip' for n in get_UCR_multivariate_list()]
# urls_ts
#hide
# MULTI_ARTICULARY_WORD_RECOGNITION = f'{URL}ArticularyWordRecognition.zip',
# MULTI_ATRIAL_FIBRILLATION = f'{URL}AtrialFibrillation.zip',
# MULTI_BASIC_MOTIONS = f'{URL}BasicMotions.zip',
# MULTI_CHARACTER_TRAJECTORIES = f'{URL}CharacterTrajectories.zip',
# MULTI_CRICKET = f'{URL}Cricket.zip',
# MULTI_DUCK_DUCK_GEESE = f'{URL}DuckDuckGeese.zip',
# MULTI_EIGEN_WORMS = f'{URL}EigenWorms.zip',
# MULTI_EPILEPSY = f'{URL}Epilepsy.zip',
# MULTI_ETHANOL_CONCENTRATION = f'{URL}EthanolConcentration.zip',
# MULTI_ERING = f'{URL}ERing.zip',
# MULTI_FACE_DETECTION = f'{URL}FaceDetection.zip',
# MULTI_FINGER_MOVEMENTS = f'{URL}FingerMovements.zip',
# MULTI_HAND_MOVEMENT_DIRECTION = f'{URL}HandMovementDirection.zip',
# MULTI_HANDWRITING = f'{URL}Handwriting.zip',
# MULTI_HEARTBEAT = f'{URL}Heartbeat.zip',
# MULTI_JAPANESE_VOWELS = f'{URL}JapaneseVowels.zip',
# MULTI_LIBRAS = f'{URL}Libras.zip',
# MULTI_LSST = f'{URL}LSST.zip',
# MULTI_INSECT_WINGBEAT = f'{URL}InsectWingbeat.zip',
# MULTI_MOTOR_IMAGERY = f'{URL}MotorImagery.zip',
# MULTI_NATOPS = f'{URL}NATOPS.zip',
# MULTI_PEN_DIGITS = f'{URL}PenDigits.zip',
# MULTI_PEMS-SF = f'{URL}PEMS-SF.zip',
# MULTI_PHONEME_SPECTRA = f'{URL}PhonemeSpectra.zip',
# MULTI_RACKET_SPORTS = f'{URL}RacketSports.zip',
# MULTI_SELF_REGULATION_SCP1 = f'{URL}SelfRegulationSCP1.zip',
# MULTI_SELF_REGULATION_SCP2 = f'{URL}SelfRegulationSCP2.zip',
# MULTI_SPOKEN_ARABIC_DIGITS = f'{URL}SpokenArabicDigits.zip',
# MULTI_STAND_WALK_JUMP = f'{URL}StandWalkJump.zip',
# MULTI_U_WAVE_GESTURE_LIBRARY = f'{URL}UWaveGestureLibrary'
# hide
# urls_ts = [f'{camel2capitalsnake(n)} = {n}.zip' for n in get_UCR_univariate_list()]
# urls_ts
# hide
# UNI_ACSF1 = f'{URL}ACSF1.zip,
# UNI_ADIAC = f'{URL}Adiac.zip,
# UNI_ALL_GESTURE_WIIMOTE_X = f'{URL}AllGestureWiimoteX.zip,
# UNI_ALL_GESTURE_WIIMOTE_Y = f'{URL}AllGestureWiimoteY.zip,
# UNI_ALL_GESTURE_WIIMOTE_Z = f'{URL}AllGestureWiimoteZ.zip,
# UNI_ARROW_HEAD = f'{URL}ArrowHead.zip,
# UNI_BEEF = f'{URL}Beef.zip,
# UNI_BEETLE_FLY = f'{URL}BeetleFly.zip,
# UNI_BIRD_CHICKEN = f'{URL}BirdChicken.zip,
# UNI_BME = f'{URL}BME.zip,
# UNI_CAR = f'{URL}Car.zip,
# UNI_CBF = f'{URL}CBF.zip,
# UNI_CHINATOWN = f'{URL}Chinatown.zip,
# UNI_CHLORINE_CONCENTRATION = f'{URL}ChlorineConcentration.zip,
# UNI_CIN_CEC_GTORSO = f'{URL}CinCECGtorso.zip,
# UNI_COFFEE = f'{URL}Coffee.zip,
# UNI_COMPUTERS = f'{URL}Computers.zip,
# UNI_CRICKET_X = f'{URL}CricketX.zip,
# UNI_CRICKET_Y = f'{URL}CricketY.zip,
# UNI_CRICKET_Z = f'{URL}CricketZ.zip,
# UNI_CROP = f'{URL}Crop.zip,
# UNI_DIATOM_SIZE_REDUCTION = f'{URL}DiatomSizeReduction.zip,
# UNI_DISTAL_PHALANX_OUTLINE_AGE_GROUP = f'{URL}DistalPhalanxOutlineAgeGroup.zip,
# UNI_DISTAL_PHALANX_OUTLINE_CORRECT = f'{URL}DistalPhalanxOutlineCorrect.zip,
# UNI_DISTAL_PHALANX_TW = f'{URL}DistalPhalanxTW.zip,
# UNI_DODGER_LOOP_DAY = f'{URL}DodgerLoopDay.zip,
# UNI_DODGER_LOOP_GAME = f'{URL}DodgerLoopGame.zip,
# UNI_DODGER_LOOP_WEEKEND = f'{URL}DodgerLoopWeekend.zip,
# UNI_EARTHQUAKES = f'{URL}Earthquakes.zip,
# UNI_ECG200 = f'{URL}ECG200.zip,
# UNI_ECG5000 = f'{URL}ECG5000.zip,
# UNI_ECG_FIVE_DAYS = f'{URL}ECGFiveDays.zip,
# UNI_ELECTRIC_DEVICES = f'{URL}ElectricDevices.zip,
# UNI_EOG_HORIZONTAL_SIGNAL = f'{URL}EOGHorizontalSignal.zip,
# UNI_EOG_VERTICAL_SIGNAL = f'{URL}EOGVerticalSignal.zip,
# UNI_ETHANOL_LEVEL = f'{URL}EthanolLevel.zip,
# UNI_FACE_ALL = f'{URL}FaceAll.zip,
# UNI_FACE_FOUR = f'{URL}FaceFour.zip,
# UNI_FACES_UCR = f'{URL}FacesUCR.zip,
# UNI_FIFTY_WORDS = f'{URL}FiftyWords.zip,
# UNI_FISH = f'{URL}Fish.zip,
# UNI_FORD_A = f'{URL}FordA.zip,
# UNI_FORD_B = f'{URL}FordB.zip,
# UNI_FREEZER_REGULAR_TRAIN = f'{URL}FreezerRegularTrain.zip,
# UNI_FREEZER_SMALL_TRAIN = f'{URL}FreezerSmallTrain.zip,
# UNI_FUNGI = f'{URL}Fungi.zip,
# UNI_GESTURE_MID_AIR_D1 = f'{URL}GestureMidAirD1.zip,
# UNI_GESTURE_MID_AIR_D2 = f'{URL}GestureMidAirD2.zip,
# UNI_GESTURE_MID_AIR_D3 = f'{URL}GestureMidAirD3.zip,
# UNI_GESTURE_PEBBLE_Z1 = f'{URL}GesturePebbleZ1.zip,
# UNI_GESTURE_PEBBLE_Z2 = f'{URL}GesturePebbleZ2.zip,
# UNI_GUN_POINT = f'{URL}GunPoint.zip,
# UNI_GUN_POINT_AGE_SPAN = f'{URL}GunPointAgeSpan.zip,
# UNI_GUN_POINT_MALE_VERSUS_FEMALE = f'{URL}GunPointMaleVersusFemale.zip,
# UNI_GUN_POINT_OLD_VERSUS_YOUNG = f'{URL}GunPointOldVersusYoung.zip,
# UNI_HAM = f'{URL}Ham.zip,
# UNI_HAND_OUTLINES = f'{URL}HandOutlines.zip,
# UNI_HAPTICS = f'{URL}Haptics.zip,
# UNI_HERRING = f'{URL}Herring.zip,
# UNI_HOUSE_TWENTY = f'{URL}HouseTwenty.zip,
# UNI_INLINE_SKATE = f'{URL}InlineSkate.zip,
# UNI_INSECT_EPG_REGULAR_TRAIN = f'{URL}InsectEPGRegularTrain.zip,
# UNI_INSECT_EPG_SMALL_TRAIN = f'{URL}InsectEPGSmallTrain.zip,
# UNI_INSECT_WINGBEAT_SOUND = f'{URL}InsectWingbeatSound.zip,
# UNI_ITALY_POWER_DEMAND = f'{URL}ItalyPowerDemand.zip,
# UNI_LARGE_KITCHEN_APPLIANCES = f'{URL}LargeKitchenAppliances.zip,
# UNI_LIGHTNING2 = f'{URL}Lightning2.zip,
# UNI_LIGHTNING7 = f'{URL}Lightning7.zip,
# UNI_MALLAT = f'{URL}Mallat.zip,
# UNI_MEAT = f'{URL}Meat.zip,
# UNI_MEDICAL_IMAGES = f'{URL}MedicalImages.zip,
# UNI_MELBOURNE_PEDESTRIAN = f'{URL}MelbournePedestrian.zip,
# UNI_MIDDLE_PHALANX_OUTLINE_AGE_GROUP = f'{URL}MiddlePhalanxOutlineAgeGroup.zip,
# UNI_MIDDLE_PHALANX_OUTLINE_CORRECT = f'{URL}MiddlePhalanxOutlineCorrect.zip,
# UNI_MIDDLE_PHALANX_TW = f'{URL}MiddlePhalanxTW.zip,
# UNI_MIXED_SHAPES = f'{URL}MixedShapes.zip,
# UNI_MIXED_SHAPES_SMALL_TRAIN = f'{URL}MixedShapesSmallTrain.zip,
# UNI_MOTE_STRAIN = f'{URL}MoteStrain.zip,
# UNI_NON_INVASIVE_FETAL_ECG_THORAX1 = f'{URL}NonInvasiveFetalECGThorax1.zip,
# UNI_NON_INVASIVE_FETAL_ECG_THORAX2 = f'{URL}NonInvasiveFetalECGThorax2.zip,
# UNI_OLIVE_OIL = f'{URL}OliveOil.zip,
# UNI_OSU_LEAF = f'{URL}OSULeaf.zip,
# UNI_PHALANGES_OUTLINES_CORRECT = f'{URL}PhalangesOutlinesCorrect.zip,
# UNI_PHONEME = f'{URL}Phoneme.zip,
# UNI_PICKUP_GESTURE_WIIMOTE_Z = f'{URL}PickupGestureWiimoteZ.zip,
# UNI_PIG_AIRWAY_PRESSURE = f'{URL}PigAirwayPressure.zip,
# UNI_PIG_ART_PRESSURE = f'{URL}PigArtPressure.zip,
# UNI_PIG_CVP = f'{URL}PigCVP.zip,
# UNI_PLAID = f'{URL}PLAID.zip,
# UNI_PLANE = f'{URL}Plane.zip,
# UNI_POWER_CONS = f'{URL}PowerCons.zip,
# UNI_PROXIMAL_PHALANX_OUTLINE_AGE_GROUP = f'{URL}ProximalPhalanxOutlineAgeGroup.zip,
# UNI_PROXIMAL_PHALANX_OUTLINE_CORRECT = f'{URL}ProximalPhalanxOutlineCorrect.zip,
# UNI_PROXIMAL_PHALANX_TW = f'{URL}ProximalPhalanxTW.zip,
# UNI_REFRIGERATION_DEVICES = f'{URL}RefrigerationDevices.zip,
# UNI_ROCK = f'{URL}Rock.zip,
# UNI_SCREEN_TYPE = f'{URL}ScreenType.zip,
# UNI_SEMG_HAND_GENDER_CH2 = f'{URL}SemgHandGenderCh2.zip,
# UNI_SEMG_HAND_MOVEMENT_CH2 = f'{URL}SemgHandMovementCh2.zip,
# UNI_SEMG_HAND_SUBJECT_CH2 = f'{URL}SemgHandSubjectCh2.zip,
# UNI_SHAKE_GESTURE_WIIMOTE_Z = f'{URL}ShakeGestureWiimoteZ.zip,
# UNI_SHAPELET_SIM = f'{URL}ShapeletSim.zip,
# UNI_SHAPES_ALL = f'{URL}ShapesAll.zip,
# UNI_SMALL_KITCHEN_APPLIANCES = f'{URL}SmallKitchenAppliances.zip,
# UNI_SMOOTH_SUBSPACE = f'{URL}SmoothSubspace.zip,
# UNI_SONY_AIBO_ROBOT_SURFACE1 = f'{URL}SonyAIBORobotSurface1.zip,
# UNI_SONY_AIBO_ROBOT_SURFACE2 = f'{URL}SonyAIBORobotSurface2.zip,
# UNI_STARLIGHT_CURVES = f'{URL}StarlightCurves.zip,
# UNI_STRAWBERRY = f'{URL}Strawberry.zip,
# UNI_SWEDISH_LEAF = f'{URL}SwedishLeaf.zip,
# UNI_SYMBOLS = f'{URL}Symbols.zip,
# UNI_SYNTHETIC_CONTROL = f'{URL}SyntheticControl.zip,
# UNI_TOE_SEGMENTATION1 = f'{URL}ToeSegmentation1.zip,
# UNI_TOE_SEGMENTATION2 = f'{URL}ToeSegmentation2.zip,
# UNI_TRACE = f'{URL}Trace.zip,
# UNI_TWO_LEAD_ECG = f'{URL}TwoLeadECG.zip,
# UNI_TWO_PATTERNS = f'{URL}TwoPatterns.zip,
# UNI_UMD = f'{URL}UMD.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_ALL = f'{URL}UWaveGestureLibraryAll.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_X = f'{URL}UWaveGestureLibraryX.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_Y = f'{URL}UWaveGestureLibraryY.zip,
# UNI_U_WAVE_GESTURE_LIBRARY_Z = f'{URL}UWaveGestureLibraryZ.zip,
# UNI_WAFER = f'{URL}Wafer.zip,
# UNI_WINE = f'{URL}Wine.zip,
# UNI_WORD_SYNONYMS = f'{URL}WordSynonyms.zip,
# UNI_WORMS = f'{URL}Worms.zip,
# UNI_WORMS_TWO_CLASS = f'{URL}WormsTwoClass.zip,
# UNI_YOGA = f'{URL}Yoga.zipUNI_
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
# notebook2script()
notebook2script(fname='80_timeseries_data.ipynb')
###Output
Converted 80_timeseries_data.ipynb.
|
Mahfuzur_Rahman_py10_1_1d.ipynb | ###Markdown
**Python Module**
* modules are files that have python code in them
* and can be imported into other code files
* a module is a a file containing a set of functions you want to include in you application
* a module is the same as a code library
* to create a module, just save the code you want in a file with the file extension `.py` **Create a Module**
* make a file `mymodule.py` and put the following code in it
```
def greeting(name):
print("Hello " + name)
```
* then we can import the module we created using the import statement **Use a Module**
###Code
# import model
import mymodule
# use the function greeting from mymodule
mymodule.greeting("Jill")
###Output
Hello Jill
###Markdown
**Aliasing a Module**
* you can create an alias when you import a module, using the as keyword
###Code
# import mymodule with alias 'mm'
import mymodule as mm
# call the function greeting from alias 'mm'
mm.greeting('Jill')
###Output
Hello Jill
###Markdown
**Buil-in Modules**
**Datetime module - Built-in Module**
* a date in Python is not a dtatype of its own, but we can import a module named `datetime` to work with dates as date objects
***Python Date - .now()***
###Code
import datetime
time_now = datetime.datetime.now()
print(time_now)
print(time_now.year)
print(time_now.day)
print(time_now.strftime("%A"))
###Output
2020-12-23 15:51:30.733939
2020
23
Wednesday
###Markdown
***Create a Datetime Object***
###Code
new_date = datetime.datetime(2020, 12, 2)
print(new_date)
###Output
2020-12-02 00:00:00
###Markdown
***String format Time - `strftime`***
* the datetime object has a method for formatting date objectsintp readable strings
* the method is called `strftime()`, and takes one parameter, format, to specify the format of the returned string
###Code
# create data
x = datetime.datetime(2018,6,1)
# get the weekday name
print(x.strftime("%A"))
#month
print(x.strftime("%B"))
#weekday
print(x.strftime("%d"))
# Year
print(x.strftime("%Y"))
import math
sq_root = math.sqrt(64)
print(sq_root)
round_higher = math.ceil(1.4)
round_lower = math.floor(1.4)
print(round_higher)
print(round_lower)
pi_value = math.pi
print(pi_value)
import random
random_integer = random.randint(0,9)
print(random_integer)
random_number = random.randrange(9)
print(random_number)
###Output
_____no_output_____ |
day-14.ipynb | ###Markdown
Day 14Time to re-fuel at Saturn. We can use the raw materials from Saturn's rings, as long as we follow the formula's that are given to use Part 1What's the minimum number of `ORE` needed to produce 1 `FUEL`?
###Code
import math
from typing import Dict, List, Union, Tuple
class Nanofactory:
def __init__(self, formulas: str):
self.formula_dict = self.make_formula_dict(formulas)
self.materials_count_dict = self.make_materials_count_dict(self.formula_dict)
self.materials_needed_dict = self.make_materials_needed_dict()
@staticmethod
def make_formula_dict(formulas: str) -> Dict[Tuple[int, str], List[Tuple[int, str]]]:
components = [component.strip() for formula in formulas.split("\n") for component in formula.split("=>")]
input_components = [component.split(", ") for component in components[0::2]]
output_components = [tuple(component.split(" ")) for component in components[1::2]]
# Convert output amounts to integers
output_components = [(int(component[0]), component[1]) for component in output_components]
formula_dict = dict(zip(output_components, input_components))
for output_component in formula_dict:
formula_dict[output_component] = [
tuple(component.split(" ")) for component in formula_dict[output_component]
]
formula_dict[output_component] = [
(int(component[0]), component[1]) for component in formula_dict[output_component]
]
return formula_dict
@staticmethod
def make_materials_count_dict(formula_dict: Dict[tuple, List[tuple]]) -> Dict[str, int]:
materials = [key[1] for key in formula_dict]
material_amounts = [0 for _ in range(len(materials))]
material_amount_dict = dict(zip(materials, material_amounts))
return material_amount_dict
def make_materials_needed_dict(self) -> Dict[str, int]:
materials_needed_dict = self.materials_count_dict.copy()
# Start out with one FUEL so we can work backwards
materials_needed_dict["FUEL"] = 1
# We'll need to know how much ORE we needed to get to 1 FUEL
# Doesn't need to be in materials_count_dict since it's a limitless resource
materials_needed_dict["ORE"] = 0
return materials_needed_dict
def find_needed_materials(self) -> List[Tuple[int, str]]:
return [
(count - self.materials_count_dict.get(material), material)
for material, count in self.materials_needed_dict.items()
if material != "ORE"
and self.materials_count_dict.get(material) < count
]
def translate_material_formula(self, material: Tuple[int, str], debug: bool = False):
"""Given a material, translate it to it's formula. Subtract the original formula
from the materials_needed_dict after translation. Add the formula requirements
to the materials_needed_dict. Add the total material after translation to the
materials_count_dict
"""
material_amount, material_name = material
for formula in self.formula_dict:
formula_amount, formula_name = formula
if formula_name == material_name:
formula_count = math.ceil(material_amount / formula_amount)
if debug:
print(f"****Using {formula} formula {formula_count} time(s) for {material} material")
print(f"****Adding {formula_amount * formula_count} {material_name} to the count_dict")
self.materials_count_dict[material_name] += formula_amount * formula_count
for material in self.formula_dict.get(formula):
material_amount, material_name = material
self.materials_needed_dict[material_name] += material_amount * formula_count
if debug:
print(f"****Adding {material_amount * formula_count} {material_name} to the needed_dict")
# Just want to do this for one material at a time
break
def get_ore_requirements(self, debug: bool = False) -> int:
"""Return the minimum number of ORE required to get 1 FUEL"""
needed_materials = self.find_needed_materials()
while needed_materials:
if debug:
print(f"Need materials to make {needed_materials}")
for material in needed_materials:
if debug:
print(f"**Get formula for {material}")
print(f"**Needed materials before: {self.materials_needed_dict}")
print(f"**Have materials before: {self.materials_count_dict}")
self.translate_material_formula(material)
if debug:
print(f"**Needed materials after: {self.materials_needed_dict}")
print(f"**Have materials after: {self.materials_count_dict}")
needed_materials = self.find_needed_materials()
return self.materials_needed_dict.get("ORE")
test_formulas = """10 ORE => 10 A
1 ORE => 1 B
7 A, 1 B => 1 C
7 A, 1 C => 1 D
7 A, 1 D => 1 E
7 A, 1 E => 1 FUEL
"""
# Test the nitty gritty operations
test_nanofactory = Nanofactory(test_formulas)
assert test_nanofactory.materials_needed_dict == {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'FUEL': 1, 'ORE': 0}
assert test_nanofactory.materials_count_dict == {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'FUEL': 0}
assert test_nanofactory.find_needed_materials() == [(1, 'FUEL')]
test_nanofactory.translate_material_formula((1, 'FUEL'))
assert test_nanofactory.materials_needed_dict == {'A': 7, 'B': 0, 'C': 0, 'D': 0, 'E': 1, 'FUEL': 1, 'ORE': 0}
assert test_nanofactory.materials_count_dict == {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'FUEL': 1}
assert test_nanofactory.find_needed_materials() == [(7, 'A'), (1, 'E')]
test_nanofactory.translate_material_formula((7, 'A'))
test_nanofactory.translate_material_formula((1, 'E'))
assert test_nanofactory.materials_needed_dict == {'A': 14, 'B': 0, 'C': 0, 'D': 1, 'E': 1, 'FUEL': 1, 'ORE': 10}
assert test_nanofactory.materials_count_dict == {'A': 10, 'B': 0, 'C': 0, 'D': 0, 'E': 1, 'FUEL': 1}
assert test_nanofactory.find_needed_materials() == [(4, 'A'), (1, 'D')]
test_nanofactory.translate_material_formula((4, 'A'))
test_nanofactory.translate_material_formula((1, 'D'))
assert test_nanofactory.materials_needed_dict == {'A': 21, 'B': 0, 'C': 1, 'D': 1, 'E': 1, 'FUEL': 1, 'ORE': 20}
assert test_nanofactory.materials_count_dict == {'A': 20, 'B': 0, 'C': 0, 'D': 1, 'E': 1, 'FUEL': 1}
assert test_nanofactory.find_needed_materials() == [(1, 'A'), (1, 'C')]
test_nanofactory.translate_material_formula((1, 'A'))
test_nanofactory.translate_material_formula((1, 'C'))
assert test_nanofactory.materials_needed_dict == {'A': 28, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'FUEL': 1, 'ORE': 30}
assert test_nanofactory.materials_count_dict == {'A': 30, 'B': 0, 'C': 1, 'D': 1, 'E': 1, 'FUEL': 1}
assert test_nanofactory.find_needed_materials() == [(1, 'B')]
test_nanofactory.translate_material_formula((1, 'B'))
assert test_nanofactory.materials_needed_dict == {'A': 28, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'FUEL': 1, 'ORE': 31}
assert test_nanofactory.materials_count_dict == {'A': 30, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'FUEL': 1}
assert test_nanofactory.find_needed_materials() == []
# Test the big picture
test_formulas = """10 ORE => 10 A
1 ORE => 1 B
7 A, 1 B => 1 C
7 A, 1 C => 1 D
7 A, 1 D => 1 E
7 A, 1 E => 1 FUEL"""
test_nanofactory = Nanofactory(test_formulas)
assert test_nanofactory.get_ore_requirements() == 31
test_formulas = """9 ORE => 2 A
8 ORE => 3 B
7 ORE => 5 C
3 A, 4 B => 1 AB
5 B, 7 C => 1 BC
4 C, 1 A => 1 CA
2 AB, 3 BC, 4 CA => 1 FUEL"""
test_nanofactory = Nanofactory(test_formulas)
assert test_nanofactory.get_ore_requirements() == 165
test_formulas = """157 ORE => 5 NZVS
165 ORE => 6 DCFZ
44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL
12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ
179 ORE => 7 PSHF
177 ORE => 5 HKGWZ
7 DCFZ, 7 PSHF => 2 XJWVT
165 ORE => 2 GPVTF
3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT"""
test_nanofactory = Nanofactory(test_formulas)
assert test_nanofactory.get_ore_requirements() == 13312
test_formulas = """2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG
17 NVRVD, 3 JNWZP => 8 VPVL
53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL
22 VJHF, 37 MNCFX => 5 FWMGM
139 ORE => 4 NVRVD
144 ORE => 7 JNWZP
5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC
5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV
145 ORE => 6 MNCFX
1 NVRVD => 8 CXFTF
1 VJHF, 6 MNCFX => 4 RFSQX
176 ORE => 6 VJHF"""
test_nanofactory = Nanofactory(test_formulas)
assert test_nanofactory.get_ore_requirements() == 180697
test_formulas = """171 ORE => 8 CNZTR
7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
114 ORE => 4 BHXH
14 VRPVC => 6 BMBT
6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
5 BMBT => 4 WPTQ
189 ORE => 9 KTJDG
1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
12 VRPVC, 27 CNZTR => 2 XDBXC
15 KTJDG, 12 BHXH => 5 XCVML
3 BHXH, 2 VRPVC => 7 MZWV
121 ORE => 7 VRPVC
7 XCVML => 6 RJRHP
5 BHXH, 4 VRPVC => 5 LTCX"""
test_nanofactory = Nanofactory(test_formulas)
test_nanofactory.get_ore_requirements()
assert test_nanofactory.get_ore_requirements() == 2210736
# Puzzle input
formulas = """3 PTZH, 14 MHDKS, 9 MPBVZ => 4 BDRP
4 VHPGT, 12 JSPDJ, 1 WNSC => 2 XCTCF
174 ORE => 4 JVNH
7 JVNH => 4 BTZH
12 XLNZ, 1 CZLDF => 8 NDHSR
1 VDVQ, 1 PTZH => 7 LXVZ
1 ZDQRT => 5 KJCJL
2 SGDXK, 6 VDVQ, 1 RLFHL => 7 GFNQ
8 JFBD => 5 VDVQ
1 SGDXK => 6 ZNBSR
2 PNZD, 1 JFBD => 7 TVRMW
11 TRXG, 4 CVHR, 1 VKXL, 63 GFNQ, 1 MGNW, 59 PFKHV, 22 KFPT, 3 KFCJC => 1 FUEL
6 BTZH => 8 GTWKH
5 WHVKJ, 1 QMZJX => 6 XLNZ
18 JSPDJ, 11 QMZJX => 5 RWQC
2 WFHXK => 4 JSPDJ
2 GHZW => 3 RLFHL
4 WHVKJ, 2 RWQC, 2 PTZH => 8 WNSC
1 QPJVR => 2 VFXSL
1 NCMQC => 6 GDLFK
199 ORE => 5 PNZD
2 RZND, 1 GTWKH, 2 VFXSL => 1 WHVKJ
1 VDVQ => 8 WFHXK
2 VFXSL => 4 VHMT
21 SBLQ, 4 XLNZ => 6 MGNW
6 SGDXK, 13 VDVQ => 9 NBSMG
1 SLKRN => 5 VKXL
3 ZNBSR, 1 WNSC => 1 TKWH
2 KJCJL => 6 LNRX
3 HPSK, 4 KZQC, 6 BPQBR, 2 MHDKS, 5 VKXL, 13 NDHSR => 9 TRXG
1 TKWH, 36 BDRP => 5 BNQFL
2 BJSWZ => 7 RZND
2 SLKRN, 1 NDHSR, 11 PTZH, 1 HPSK, 1 NCMQC, 1 BNQFL, 10 GFNQ => 2 KFCJC
3 LXVZ, 9 RWQC, 2 KJCJL => 7 VHPGT
2 GTWKH, 1 LNRX, 2 RZND => 1 MHDKS
18 RZND, 2 VHPGT, 7 JSPDJ => 9 NCMQC
2 NBSMG, 3 KJCJL => 9 BPQBR
124 ORE => 1 JFBD
1 QPJVR, 2 QMZJX => 4 SGDXK
4 BPQBR, 1 LNRX => 2 KZQC
1 KJCJL, 15 GTWKH => 2 SBLQ
1 ZDQRT, 3 CZLDF, 10 GDLFK, 1 BDRP, 10 VHMT, 6 XGVF, 1 RLFHL => 7 CVHR
1 KZQC => 8 MPBVZ
27 GRXH, 3 LNRX, 1 BPQBR => 6 XGVF
1 XCTCF => 6 KFPT
7 JFBD => 4 GHZW
19 VHPGT => 2 SLKRN
9 JFBD, 1 TVRMW, 10 BTZH => 6 BJSWZ
6 ZNBSR => 4 PTZH
1 JSPDJ, 2 BHNV, 1 RLFHL => 3 QMZJX
2 RCWX, 1 WNSC => 4 GRXH
2 TKWH, 5 NCMQC, 9 GRXH => 3 HPSK
32 KZQC => 5 RCWX
4 GHZW, 1 TVRMW => 1 QPJVR
2 QPJVR, 8 GHZW => 5 ZDQRT
1 VDVQ, 1 WFHXK => 6 BHNV
1 ZNBSR, 6 TKWH => 8 CZLDF
1 MGNW => 5 PFKHV
"""
nanofactory = Nanofactory(formulas)
nanofactory.get_ore_requirements()
###Output
_____no_output_____
###Markdown
Part 2Great, now that we now how much ore it takes to produce one fuel, we check our cargo hold to see how much ORE we can hold -- 1 trillion ORE (Dr. Evil smile :smiling_imp:)Once we've collected 1 trillion ORE, how much fuel can we make?So now, ORE is not an unlimited resource -- we only have one trillion of it. Given that constraint, how much fuel can we produce until we run out of ORE?
###Code
test_formulas = """171 ORE => 8 CNZTR
7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
114 ORE => 4 BHXH
14 VRPVC => 6 BMBT
6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
5 BMBT => 4 WPTQ
189 ORE => 9 KTJDG
1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
12 VRPVC, 27 CNZTR => 2 XDBXC
15 KTJDG, 12 BHXH => 5 XCVML
3 BHXH, 2 VRPVC => 7 MZWV
121 ORE => 7 VRPVC
7 XCVML => 6 RJRHP
5 BHXH, 4 VRPVC => 5 LTCX"""
test_nanofactory = Nanofactory(test_formulas)
# This is a very inefficient way to do it but it gives the right answer :shrug:
fuel_generated = 0
while test_nanofactory.get_ore_requirements() < 1000000000000:
fuel_generated += 1
ore_requirements = test_nanofactory.get_ore_requirements()
# Increment another FUEL
test_nanofactory.materials_needed_dict["FUEL"] += 1
assert fuel_generated == 460664
# If this **never** runs, then we can try _incrementing_/_decrementing_ the FUEL needed
# until we hit an ORE requirement over/under 1000000000000
nanofactory = Nanofactory(formulas)
fuel_generated = 0
while nanofactory.get_ore_requirements() < 1000000000000:
fuel_generated += 1
ore_requirements = nanofactory.get_ore_requirements()
# Increment another FUEL
nanofactory.materials_needed_dict["FUEL"] += 1
fuel_generated
###Output
_____no_output_____ |
Python Functions, Packages, Input_Output, Exception Handling and Debugging/2_function_types.ipynb | ###Markdown
Types Of Functions 1. Built-in Functions2. User-defined Functions Built-in Functions 1. abs()
###Code
# find the absolute value
num = -100
print(abs(num))
###Output
100
###Markdown
2. all() return value of all() functionTrue: if all elements in an iterable are trueFalse: if any element in an iterable is false
###Code
lst = [1, 2, 3, 4]
print(all(lst))
lst = (0, 2, 3, 4) # 0 present in list
print(all(lst))
lst = [] #empty list always true
print(all(lst))
lst = [False, 1, 2] #False present in a list so all(lst) is False
print(all(lst))
###Output
False
###Markdown
dir() The dir() tries to return a list of valid attributes of the object.If the object has __dir__() method, the method will be called and must return the list of attributes.If the object doesn't have __dir()__ method, this method tries to find information from the __dict__ attribute (if defined), and from type object. In this case, the list returned from dir() may not be complete.
###Code
numbers = [1, 2, 3]
print(dir(numbers))
###Output
['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'clear', 'copy', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort']
###Markdown
divmod() The divmod() method takes two numbers and returns a pair of numbers (a tuple) consisting of their quotient and remainder.
###Code
print(divmod(9, 2)) #print quotient and remainder as a tuple
#try with other number
###Output
(4, 1)
###Markdown
enumerate() The enumerate() method adds counter to an iterable and returns it syntax: enumerate(iterable, start=0)
###Code
numbers = [10, 20, 30, 40]
for index, num in enumerate(numbers,1):
print("index {0} has value {1}".format(index, num))
###Output
index 1 has value 10
index 2 has value 20
index 3 has value 30
index 4 has value 40
###Markdown
filter() The filter() method constructs an iterator from elements of an iterable for which a function returns true. syntax: filter(function, iterable)
###Code
def find_positive_number(num):
"""
This function returns the positive number if num is positive
"""
if num > 0:
return num
number_list = range(-10, 10) #create a list with numbers from -10 to 10
print(list(number_list))
positive_num_lst = list(filter(find_positive_number, number_list))
print(positive_num_lst)
###Output
[-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
isinstance() The isinstance() function checks if the object (first argument) is an instance or subclass of classinfo class (second argument). syntax: isinstance(object, classinfo)
###Code
lst = [1, 2, 3, 4]
print(isinstance(lst, list))
#try with other datatypes tuple, set
t = (1,2,3,4)
print(isinstance(t, list))
###Output
True
False
###Markdown
map() Map applies a function to all the items in an input_list. syntax: map(function_to_apply, list_of_inputs)
###Code
numbers = [1, 2, 3, 4]
#normal method of computing num^2 for each element in the list.
squared = []
for num in numbers:
squared.append(num ** 2)
print(squared)
numbers = [1, 2, 3, 4]
def powerOfTwo(num):
return num ** 2
#using map() function
squared = list(map(powerOfTwo, numbers))
print(squared)
###Output
[1, 4, 9, 16]
###Markdown
reduce() reduce() function is for performing some computation on a list and returning the result. It applies a rolling computation to sequential pairs of values in a list.
###Code
#product of elemnts in a list
product = 1
lst = [1, 2, 3, 4]
# traditional program without reduce()
for num in lst:
product *= num
print(product)
#with reduce()
from functools import reduce # in Python 3.
def multiply(x,y):
return x*y;
product = reduce(multiply, lst)
print(product)
###Output
24
###Markdown
2. User-defined Functions Functions that we define ourselves to do certain specific task are referred as user-defined functions If we use functions written by others in the form of library, it can be termed as library functions. Advantages 1. User-defined functions help to decompose a large program into small segments which makes program easy to understand, maintain and debug.2. If repeated code occurs in a program. Function can be used to include those codes and execute when needed by calling that function.3. Programmars working on large project can divide the workload by making different functions. Example:
###Code
def product_numbers(a, b):
"""
this function returns the product of two numbers
"""
product = a * b
return product
num1 = 10
num2 = 20
print ("product of {0} and {1} is {2} ".format(num1, num2, product_numbers(num1, num2)))
###Output
product of 10 and 20 is 200
###Markdown
Python program to make a simple calculator that can add, subtract, multiply and division
###Code
def add(a, b):
"""
This function adds two numbers
"""
return a + b
def multiply(a, b):
"""
This function multiply two numbers
"""
return a * b
def subtract(a, b):
"""
This function subtract two numbers
"""
return a - b
def division(a, b):
"""
This function divides two numbers
"""
return a / b
print("Select Option")
print("1. Addition")
print ("2. Subtraction")
print ("3. Multiplication")
print ("4. Division")
#take input from user
choice = int(input("Enter choice 1/2/3/4"))
num1 = float(input("Enter first number:"))
num2 = float(input("Enter second number:"))
if choice == 1:
print("Addition of {0} and {1} is {2}".format(num1, num2, add(num1, num2)))
elif choice == 2:
print("Subtraction of {0} and {1} is {2}".format(num1, num2, subtract(num1, num2)))
elif choice == 3:
print("Multiplication of {0} and {1} is {2}".format(num1, num2, multiply(num1, num2)))
elif choice == 4:
print("Division of {0} and {1} is {2}".format(num1, num2, division(num1, num2)))
else:
print("Invalid Choice")
###Output
Select Option
1. Addition
2. Subtraction
3. Multiplication
4. Division
Enter choice 1/2/3/41
Enter first number:6
Enter second number:8
Addition of 6.0 and 8.0 is 14.0
|
Classification/Gradient Boosting Machine/GradientBoostingClassifier.ipynb | ###Markdown
Gradient Boosting Classification
This Code template is for the Classification tasks using a simple GradientBoostingClassifier based on the Gradient Boosting Ensemble Learning Technique. Required Packages
###Code
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()#displaying initial entries
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X = df[features]
Y = df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Distribution Of Target Variable
###Code
plt.figure(figsize = (10,6))
se.countplot(Y)
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123)#performing datasplitting
###Output
_____no_output_____
###Markdown
Model
Gradient Boosting builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions.In each stage n_classes_ regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function.
Model Tuning Parameters
1. loss : {‘deviance’, ‘exponential’}, default=’deviance’
> The loss function to be optimized. ‘deviance’ refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss ‘exponential’ gradient boosting recovers the AdaBoost algorithm.
2. learning_ratefloat, default=0.1
> Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.
3. n_estimators : int, default=100
> The number of trees in the forest.
4. criterion : {‘friedman_mse’, ‘mse’, ‘mae’}, default=’friedman_mse’
> The function to measure the quality of a split. Supported criteria are ‘friedman_mse’ for the mean squared error with improvement score by Friedman, ‘mse’ for mean squared error, and ‘mae’ for the mean absolute error. The default value of ‘friedman_mse’ is generally the best as it can provide a better approximation in some cases.
5. max_depth : int, default=3
> The maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables.
6. max_features : {‘auto’, ‘sqrt’, ‘log2’}, int or float, default=None
> The number of features to consider when looking for the best split:
7. random_state : int, RandomState instance or None, default=None
> Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider when looking for the best split at each node (if `max_features < n_features`).
8. verbose : int, default=0
> Controls the verbosity when fitting and predicting.
9. n_iter_no_change : int, default=None
> n_iter_no_change is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside validation_fraction size of the training data as validation and terminate training when validation score is not improving in all of the previous n_iter_no_change numbers of iterations. The split is stratified.
10. tol : float, default=1e-4
> Tolerance for the early stopping. When the loss is not improving by at least tol for n_iter_no_change iterations (if set to a number), the training stops.
###Code
model = GradientBoostingClassifier(random_state = 123)
model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100))
###Output
Accuracy score 86.25 %
###Markdown
Confusion MatrixA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
###Code
plot_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues)
###Output
_____no_output_____
###Markdown
Classification ReportA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.* **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset.
###Code
print(classification_report(y_test,model.predict(X_test)))
###Output
precision recall f1-score support
0 0.90 0.88 0.89 50
1 0.81 0.83 0.82 30
accuracy 0.86 80
macro avg 0.85 0.86 0.85 80
weighted avg 0.86 0.86 0.86 80
###Markdown
Feature Importances.The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction.
###Code
plt.figure(figsize=(8,6))
n_features = len(X.columns)
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
###Output
_____no_output_____
###Markdown
Gradient Boosting Classification This Code template is for the Classification tasks using a simple GradientBoostingClassifier based on the Gradient Boosting Ensemble Learning Technique. Required Packages
###Code
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()#displaying initial entries
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X = df[features]
Y = df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Distribution Of Target Variable
###Code
plt.figure(figsize = (10,6))
se.countplot(Y)
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123)#performing datasplitting
###Output
_____no_output_____
###Markdown
Model
Gradient Boosting builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions.In each stage n_classes_ regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function.
Model Tuning Parameters
1. loss : {‘deviance’, ‘exponential’}, default=’deviance’
> The loss function to be optimized. ‘deviance’ refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss ‘exponential’ gradient boosting recovers the AdaBoost algorithm.
2. learning_ratefloat, default=0.1
> Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.
3. n_estimators : int, default=100
> The number of trees in the forest.
4. criterion : {‘friedman_mse’, ‘mse’, ‘mae’}, default=’friedman_mse’
> The function to measure the quality of a split. Supported criteria are ‘friedman_mse’ for the mean squared error with improvement score by Friedman, ‘mse’ for mean squared error, and ‘mae’ for the mean absolute error. The default value of ‘friedman_mse’ is generally the best as it can provide a better approximation in some cases.
5. max_depth : int, default=3
> The maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables.
6. max_features : {‘auto’, ‘sqrt’, ‘log2’}, int or float, default=None
> The number of features to consider when looking for the best split:
7. random_state : int, RandomState instance or None, default=None
> Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider when looking for the best split at each node (if `max_features < n_features`).
8. verbose : int, default=0
> Controls the verbosity when fitting and predicting.
9. n_iter_no_change : int, default=None
> n_iter_no_change is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside validation_fraction size of the training data as validation and terminate training when validation score is not improving in all of the previous n_iter_no_change numbers of iterations. The split is stratified.
10. tol : float, default=1e-4
> Tolerance for the early stopping. When the loss is not improving by at least tol for n_iter_no_change iterations (if set to a number), the training stops.
###Code
model = GradientBoostingClassifier(random_state = 123)
model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100))
###Output
Accuracy score 86.25 %
###Markdown
Confusion MatrixA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
###Code
plot_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues)
###Output
_____no_output_____
###Markdown
Classification ReportA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.* **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset.
###Code
print(classification_report(y_test,model.predict(X_test)))
###Output
precision recall f1-score support
0 0.90 0.88 0.89 50
1 0.81 0.83 0.82 30
accuracy 0.86 80
macro avg 0.85 0.86 0.85 80
weighted avg 0.86 0.86 0.86 80
###Markdown
Feature Importances.The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction.
###Code
plt.figure(figsize=(8,6))
n_features = len(X.columns)
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
###Output
_____no_output_____
###Markdown
Gradient Boosting Classification Required Packages
###Code
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()#displaying initial entries
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X = df[features]
Y = df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Distribution Of Target Variable
###Code
plt.figure(figsize = (10,6))
se.countplot(Y)
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123)#performing datasplitting
###Output
_____no_output_____
###Markdown
ModelA random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is controlled with the max_samples parameter if bootstrap=True (default), otherwise the whole dataset is used to build each tree. Model Tuning Parameters 1. loss : {‘deviance’, ‘exponential’}, default=’deviance’> The loss function to be optimized. ‘deviance’ refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss ‘exponential’ gradient boosting recovers the AdaBoost algorithm. 2. learning_ratefloat, default=0.1> Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators. 3. n_estimators : int, default=100> The number of trees in the forest. 4. criterion : {‘friedman_mse’, ‘mse’, ‘mae’}, default=’friedman_mse’> The function to measure the quality of a split. Supported criteria are ‘friedman_mse’ for the mean squared error with improvement score by Friedman, ‘mse’ for mean squared error, and ‘mae’ for the mean absolute error. The default value of ‘friedman_mse’ is generally the best as it can provide a better approximation in some cases. 5. max_depth : int, default=3> The maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. 6. max_features : {‘auto’, ‘sqrt’, ‘log2’}, int or float, default=None> The number of features to consider when looking for the best split: 7. random_state : int, RandomState instance or None, default=None> Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider when looking for the best split at each node (if `max_features < n_features`). 8. verbose : int, default=0> Controls the verbosity when fitting and predicting. 9. n_iter_no_change : int, default=None> n_iter_no_change is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside validation_fraction size of the training data as validation and terminate training when validation score is not improving in all of the previous n_iter_no_change numbers of iterations. The split is stratified. 10. tol : float, default=1e-4> Tolerance for the early stopping. When the loss is not improving by at least tol for n_iter_no_change iterations (if set to a number), the training stops.
###Code
model = GradientBoostingClassifier(random_state = 123)
model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100))
###Output
Accuracy score 86.25 %
###Markdown
Confusion MatrixA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
###Code
plot_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues)
###Output
_____no_output_____
###Markdown
Classification ReportA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.* **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset.
###Code
print(classification_report(y_test,model.predict(X_test)))
###Output
precision recall f1-score support
0 0.90 0.88 0.89 50
1 0.81 0.83 0.82 30
accuracy 0.86 80
macro avg 0.85 0.86 0.85 80
weighted avg 0.86 0.86 0.86 80
###Markdown
Feature Importances.The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction.
###Code
plt.figure(figsize=(8,6))
n_features = len(X.columns)
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
###Output
_____no_output_____ |
Assignment 7_ 17IM3FP10.ipynb | ###Markdown
Assignment 7_ 17IM3FP10 1. For a given n by n square matrix write a program for summing the principal and non- principal diagonal elements.
###Code
import numpy as np
n=int(input("Enter"))
a=np.random.randint(50,100,size=(n,n))
a
np.trace(a) # main diagonal elements
s= sum(a[i][n-i-1]for i in range(n))
s
###Output
_____no_output_____
###Markdown
2. Create a list of 10 students and 6 subjects, and generate random marks between 0 and 100 for each student for all subjects. Create a menu driven program to find out and display: i) Students with highest and lowest total marks ii) Subjects with the highest and lowest average score iii) Students with the highest score in each subject
###Code
import numpy as np
mks=np.random.randint(50,100,size=(10,6))
li=[]
avg=[]
print(mks)
name=["Sagun","Amitanshu","Debayan","Mihir","Sourabh","Abhishek","Noni","Keerthana","Shivam","Ayush"]
for i in range(0,10):
li.append(sum(mks[i]))
for i in range(0,10):
avg.append(li[i]/6)
avg
j=li.index(max(li))
k=li.index(min(li))
j_a=avg.index(max(avg))
k_a=avg.index(min(avg))
print("The student with the highest total score is",name[j],"with",li[j],"marks")
print("The student with the lowest total score is",name[k],"with",li[k],"marks")
print("The student with the highest average score is",name[j_a],"with",avg[j_a],"marks")
print("The student with the lowest average score is",name[k_a],"with",avg[k_a],"marks")
sub=["Maths","Physics","Chemistry","Biology","English","Computer"]
com=[]
for j in range(0,6):
for k in range(0,10):
com.append(mks[k,j])
l=com.index(max(com))
print("The student with the highest score in subject",sub[j],"is",name[l],"with",com[l],"marks")
com=[]
###Output
_____no_output_____
###Markdown
3. A=np.array([[17, 24, 1, 8, 15], [23, 5, 7, 14, 16], [ 4, 6, 13, 20, 22], [10, 12, 19, 21, 3], [11, 18, 25, 2, 9]]). Verify whether array A is a magic square.
###Code
A=np.array([[17, 24, 1, 8, 15], [23, 5, 7, 14, 16], [ 4, 6, 13, 20, 22], [10, 12, 19, 21, 3], [11, 18, 25, 2, 9]])
def ismagicsq(A):
s=np.trace(A)
for i in range(0,5):
r_sum=0
for j in range(0,5):
r_sum += A[i][j]
if(r_sum != s):
return False
for i in range(0,5):
c_sum=0
for j in range(0,5):
c_sum += A[j][i]
if(c_sum != s):
return False
sum=0
for i in range(0,5):
for j in range(0,5):
if i+j==(n-1):
sum += A[i][j]
if(sum != s):
return False
return True
if(ismagicsq(A)):
print("magic sq")
else:
print("not magic sq")
###Output
_____no_output_____
###Markdown
4. Create an n dimensional array with random elements from 0 to 10. Count how many even indices have odd elements and how many odd indices have even elements.
###Code
import numpy as np
n=int(input("Enter the no. of elements: "))
rnd=np.random.randint(0,10,size=(n))
print(rnd)
ctr1=0
ctr2=0
for i in range(0,n):
if i%2==0:
if rnd[i]%2==1:
ctr1+=1
if i%2==1:
if rnd[i]%2==0:
ctr2+=1
print(ctr1)
print(ctr2)
###Output
_____no_output_____
###Markdown
5. Generate 100 random integers from 0 to 10 and make a bar chart for numbers versus its frequency. Also draw a pie chart for this.
###Code
pl=np.random.randint(0,10,size=(100))
pl
from matplotlib import pyplot as plt
###Output
_____no_output_____ |
nbs/audio.04_learner.ipynb | ###Markdown
Audio Learner> Learner which stacks tuples of `TensorSpec` or `TensorMelSpec`
###Code
#|export
from __future__ import annotations
from fastcore.dispatch import retain_type
from fastai.callback.core import Callback
from fastai.callback.fp16 import MixedPrecision
from fastai.learner import Learner, defaults
from fastai.optimizer import Adam
from fastxtend.audio.core import TensorSpec, TensorMelSpec
from fastxtend.audio.data import MelSpectrogram, Spectrogram
from fastxtend.imports import *
#|hide
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
DetupleCallback -
###Code
#|export
class StackSpecCallback(Callback):
"Stacks tuples of TensorSpec or TensorMelSpec. ToDo: add resizing"
order = MixedPrecision.order-1
def before_batch(self):
xb = L(self.xb)
idx = xb.argwhere(lambda x: isinstance(x, (TensorSpec, TensorMelSpec)))
ts = []
for i in idx:
ts.append(xb[i])
stacked = torch.stack(ts, dim=2)
xb = retain_type(torch.flatten(stacked, 1, 2), xb[i])
self.learn.xb = tuple(xb)
###Output
_____no_output_____
###Markdown
audio_learner -
###Code
#|export
def audio_learner(
dls,
model,
loss_func=None,
opt_func=Adam,
lr=defaults.lr,
splitter=trainable_params,
cbs=None,
metrics=None,
path=None,
model_dir='models',
wd=None,
wd_bn_bias=False,
train_bn=True,
moms=(0.95,0.85,0.95)
) -> Learner:
"An Audio specific Learner that stacks tuples of `TensorSpec` or `TensorMelSpec`"
detuple = False
for i in range(len(dls.train.after_batch.fs)):
if not detuple and isinstance(dls.train.after_batch[i], (Spectrogram, MelSpectrogram)):
detuple = is_listy(dls.train.after_batch[i].n_fft)
if detuple:
if cbs is None: cbs = DetupleSpecCallback()
else: cbs = L(cbs) + L(DetupleSpecCallback())
return Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
###Output
_____no_output_____ |
ch02git/12Remotes.ipynb | ###Markdown
Working with multiple remotes Distributed versus centralisedOlder version control systems (cvs, svn) were "centralised"; the history was kept only on a server,and all commits required an internet.Centralised | Distributed-------------------------------|--------------------------Server has history |Every user has full historyYour computer has one snapshot | Many local branchesTo access history, need internet| History always availableYou commit to remote server | Users synchronise historiescvs, subversion(svn) | git, mercurial (hg), bazaar (bzr) With modern distributed systems, we can add a second remote. This might be a personal *fork* on github:
###Code
import os
top_dir = os.getcwd()
git_dir = os.path.join(top_dir, 'learning_git')
working_dir=os.path.join(git_dir, 'git_example')
os.chdir(working_dir)
%%bash
git checkout master
git remote add jamespjh https://${GITHUB_TOKEN}@github.com/Giovanni1085/github-example.git
###Output
Already on 'master'
fatal: remote jamespjh already exists.
###Markdown
Check your remote branches:```Python%%bashgit remote -v``` We can push to a named remote:
###Code
%%writefile Pennines.md
Mountains In the Pennines
========================
* Cross Fell
* Whernside
%%bash
git commit -am "Add Whernside"
%%bash
git push -uf jamespjh master
###Output
Branch master set up to track remote branch master from jamespjh.
###Markdown
Referencing remotesYou can always refer to commits on a remote like this:
###Code
%%bash
git fetch
git log --oneline --left-right jamespjh/master...origin/master
###Output
< 2ec725a Add Whernside
< a19da7c Add github pages YAML frontmatter
###Markdown
To see the differences between remotes, for example.To see what files you have changed that aren't updated on a particular remote, for example:
###Code
%%bash
git diff --name-only origin/master
###Output
Pennines.md
index.md
###Markdown
When you reference remotes like this, you're working with a cached copy of the last time you interacted with the remote. You can do `git fetch` to update local data with the remotes without actually pulling. You can also get useful information about whether tracking branches are ahead or behind the remote branches they track:
###Code
%%bash
git branch -vv
###Output
gh-pages fb21ea4 Add Whernside
* master 2ec725a [jamespjh/master] Add Whernside
###Markdown
Hosting Servers Hosting a local server* Any repository can be a remote for pulls* Can pull/push over shared folders or ssh* Pushing to someone's working copy is dangerous* Use `git init --bare` to make a copy for pushing* You don't need to create a "server" as such, any 'bare' git repo will do.
###Code
bare_dir=os.path.join(git_dir, 'bare_repo')
os.chdir(git_dir)
%%bash
mkdir -p bare_repo
cd bare_repo
git init --bare
os.chdir(working_dir)
%%bash
git remote add local_bare ../bare_repo
git push -u local_bare master
###Output
fatal: remote local_bare already exists.
fatal: You are pushing to remote 'local_bare', which is not the upstream of
your current branch 'master', without telling me what to push
to update which remote branch.
###Markdown
Working with multiple remotes Distributed versus centralisedOlder version control systems (cvs, svn) were "centralised"; the history was kept only on a server,and all commits required an internet.Centralised | Distributed-------------------------------|--------------------------Server has history |Every user has full historyYour computer has one snapshot | Many local branchesTo access history, need internet| History always availableYou commit to remote server | Users synchronise historiescvs, subversion(svn) | git, mercurial (hg), bazaar (bzr) With modern distributed systems, we can add a second remote. This might be a personal *fork* on github:
###Code
import os
top_dir = os.getcwd()
git_dir = os.path.join(top_dir, 'learning_git')
working_dir=os.path.join(git_dir, 'git_example')
os.chdir(working_dir)
%%bash
git checkout master
git remote add jamespjh https://${GITHUB_TOKEN}@github.com/jack89roberts/github-example.git
###Output
Your branch is ahead of 'origin/master' by 1 commit.
(use "git push" to publish your local commits)
###Markdown
Check your remote branches:```Python%%bashgit remote -v``` We can push to a named remote:
###Code
%%writefile Pennines.md
Mountains In the Pennines
========================
* Cross Fell
* Whernside
%%bash
git add Pennines.md
git commit -am "Add Whernside"
%%bash
git push -uf jamespjh master
###Output
Branch 'master' set up to track remote branch 'master' from 'jamespjh'.
###Markdown
Referencing remotesYou can always refer to commits on a remote like this:
###Code
%%bash
git fetch
git log --oneline --left-right jamespjh/master...origin/master
###Output
< fd768dd Add Whernside
< 3f98dd2 Add github pages YAML frontmatter
###Markdown
To see the differences between remotes, for example.To see what files you have changed that aren't updated on a particular remote, for example:
###Code
%%bash
git diff --name-only origin/master
###Output
Pennines.md
index.md
###Markdown
When you reference remotes like this, you're working with a cached copy of the last time you interacted with the remote. You can do `git fetch` to update local data with the remotes without actually pulling. You can also get useful information about whether tracking branches are ahead or behind the remote branches they track:
###Code
%%bash
git branch -vv
###Output
gh-pages 3f98dd2 [origin/gh-pages] Add github pages YAML frontmatter
* master fd768dd [jamespjh/master] Add Whernside
###Markdown
Hosting Servers Hosting a local server* Any repository can be a remote for pulls* Can pull/push over shared folders or ssh* Pushing to someone's working copy is dangerous* Use `git init --bare` to make a copy for pushing* You don't need to create a "server" as such, any 'bare' git repo will do.
###Code
bare_dir=os.path.join(git_dir, 'bare_repo')
os.chdir(git_dir)
%%bash
mkdir -p bare_repo
cd bare_repo
git init --bare
os.chdir(working_dir)
%%bash
git remote add local_bare ../bare_repo
git push -u local_bare master
###Output
Branch 'master' set up to track remote branch 'master' from 'local_bare'.
###Markdown
Working with multiple remotes Distributed versus centralisedOlder version control systems (cvs, svn) were "centralised"; the history was kept only on a server,and all commits required an internet.Centralised | Distributed-------------------------------|--------------------------Server has history |Every user has full historyYour computer has one snapshot | Many local branchesTo access history, need internet| History always availableYou commit to remote server | Users synchronise historiescvs, subversion(svn) | git, mercurial (hg), bazaar (bzr) With modern distributed systems, we can add a second remote. This might be a personal *fork* on github:
###Code
import os
top_dir = os.getcwd()
git_dir = os.path.join(top_dir, "learning_git")
working_dir = os.path.join(git_dir, "git_example")
os.chdir(working_dir)
%%bash
git checkout main
git remote add jack89roberts https://${GITHUB_TOKEN}@github.com/jack89roberts/github-example.git
###Output
Your branch is up to date with 'origin/main'.
###Markdown
Check your remote branches:```bash> git remote -vjack89roberts https://${GITHUB_TOKEN}@github.com/jack89roberts/github-example.git (fetch)jack89roberts https://${GITHUB_TOKEN}@github.com/jack89roberts/github-example.git (push)origin https://${GITHUB_TOKEN}@github.com/alan-turing-institute/github-example.git (fetch)origin https://${GITHUB_TOKEN}@github.com/alan-turing-institute/github-example.git (push)``` We can push to a named remote:
###Code
%%writefile Pennines.md
Mountains In the Pennines
========================
* Cross Fell
* Whernside
%%bash
git add Pennines.md
git commit -am "Add Whernside"
%%bash
git push -uf jack89roberts main
###Output
Branch 'main' set up to track remote branch 'main' from 'jack89roberts'.
###Markdown
Referencing remotesYou can always refer to commits on a remote like this:
###Code
%%bash
git fetch
git log --oneline --left-right jack89roberts/main...origin/main
###Output
< 0dd20e3 Add Whernside
###Markdown
To see the differences between remotes, for example.To see what files you have changed that aren't updated on a particular remote, for example:
###Code
%%bash
git diff --name-only origin/main
###Output
Pennines.md
###Markdown
When you reference remotes like this, you're working with a cached copy of the last time you interacted with the remote. You can do `git fetch` to update local data with the remotes without actually pulling. You can also get useful information about whether tracking branches are ahead or behind the remote branches they track:
###Code
%%bash
git branch -vv
###Output
* main 0dd20e3 [jack89roberts/main] Add Whernside
###Markdown
Hosting Servers Hosting a local server* Any repository can be a remote for pulls* Can pull/push over shared folders or ssh* Pushing to someone's working copy is dangerous* Use `git init --bare` to make a copy for pushing* You don't need to create a "server" as such, any 'bare' git repo will do.
###Code
bare_dir = os.path.join(git_dir, "bare_repo")
os.chdir(git_dir)
%%bash
mkdir -p bare_repo
cd bare_repo
git init --bare --initial-branch=main
os.chdir(working_dir)
%%bash
git remote add local_bare ../bare_repo
git push -u local_bare main
###Output
Branch 'main' set up to track remote branch 'main' from 'local_bare'.
###Markdown
Working with multiple remotes Distributed versus centralisedOlder version control systems (cvs, svn) were "centralised"; the history was kept only on a server,and all commits required an internet.Centralised | Distributed-------------------------------|--------------------------Server has history |Every user has full historyYour computer has one snapshot | Many local branchesTo access history, need internet| History always availableYou commit to remote server | Users synchronise historiescvs, subversion(svn) | git, mercurial (hg), bazaar (bzr) With modern distributed systems, we can add a second remote. This might be a personal *fork* on github:
###Code
import os
top_dir = os.getcwd()
git_dir = os.path.join(top_dir, 'learning_git')
working_dir = os.path.join(git_dir, 'git_example')
os.chdir(working_dir)
%%bash
git checkout master
git remote add rits [email protected]:ucl-rits/github-example.git
git remote -v
###Output
Your branch is ahead of 'origin/master' by 1 commit.
jamespjh [email protected]:jamespjh/github-example.git (fetch)
jamespjh [email protected]:jamespjh/github-example.git (push)
local_bare ../bare_repo (fetch)
local_bare ../bare_repo (push)
origin [email protected]:UCL/github-example.git (fetch)
origin [email protected]:UCL/github-example.git (push)
###Markdown
We can push to a named remote:
###Code
%%writefile Pennines.md
Mountains In the Pennines
========================
* Cross Fell
* Whernside
%%bash
git commit -am "Add Whernside"
%%bash
git push -uf rits master
###Output
Branch master set up to track remote branch master from jamespjh.
###Markdown
Referencing remotesYou can always refer to commits on a remote like this:
###Code
%%bash
git fetch
git log --oneline --left-right rits/master...origin/master
###Output
< 2ec725a Add Whernside
< a19da7c Add github pages YAML frontmatter
###Markdown
To see the differences between remotes, for example.To see what files you have changed that aren't updated on a particular remote, for example:
###Code
%%bash
git diff --name-only origin/master
###Output
Pennines.md
index.md
###Markdown
When you reference remotes like this, you're working with a cached copy of the last time you interacted with the remote. You can do `git fetch` to update local data with the remotes without actually pulling. You can also get useful information about whether tracking branches are ahead or behind the remote branches they track:
###Code
%%bash
git branch -vv
###Output
gh-pages fb21ea4 Add Whernside
* master 2ec725a [jamespjh/master] Add Whernside
###Markdown
Hosting Servers Hosting a local server* Any repository can be a remote for pulls* Can pull/push over shared folders or ssh* Pushing to someone's working copy is dangerous* Use `git init --bare` to make a copy for pushing* You don't need to create a "server" as such, any 'bare' git repo will do.
###Code
bare_dir = os.path.join(git_dir, 'bare_repo')
os.chdir(git_dir)
%%bash
mkdir -p bare_repo
cd bare_repo
git init --bare
os.chdir(working_dir)
%%bash
git remote add local_bare ../bare_repo
git push -u local_bare master
%%bash
git remote -v
###Output
jamespjh [email protected]:jamespjh/github-example.git (fetch)
jamespjh [email protected]:jamespjh/github-example.git (push)
local_bare ../bare_repo (fetch)
local_bare ../bare_repo (push)
origin [email protected]:UCL/github-example.git (fetch)
origin [email protected]:UCL/github-example.git (push)
###Markdown
Working with multiple remotes Distributed versus centralisedOlder version control systems (cvs, svn) were "centralised"; the history was kept only on a server,and all commits required an internet.Centralised | Distributed-------------------------------|--------------------------Server has history |Every user has full historyYour computer has one snapshot | Many local branchesTo access history, need internet| History always availableYou commit to remote server | Users synchronise historiescvs, subversion(svn) | git, mercurial (hg), bazaar (bzr) With modern distributed systems, we can add a second remote. This might be a personal *fork* on github:
###Code
import os
top_dir = os.getcwd()
git_dir = os.path.join(top_dir, 'learning_git')
working_dir=os.path.join(git_dir, 'git_example')
os.chdir(working_dir)
%%bash
git checkout master
git remote add jamespjh https://${GITHUB_TOKEN}@github.com/jack89roberts/github-example.git
###Output
Your branch is ahead of 'origin/master' by 1 commit.
(use "git push" to publish your local commits)
###Markdown
Check your remote branches:```Python%%bashgit remote -v``` We can push to a named remote:
###Code
%%writefile Pennines.md
Mountains In the Pennines
========================
* Cross Fell
* Whernside
%%bash
git add Pennines.md
git commit -am "Add Whernside"
%%bash
git push -uf jamespjh master
###Output
Branch 'master' set up to track remote branch 'master' from 'jamespjh'.
###Markdown
Referencing remotesYou can always refer to commits on a remote like this:
###Code
%%bash
git fetch
git log --oneline --left-right jamespjh/master...origin/master
###Output
< fd768dd Add Whernside
< 3f98dd2 Add github pages YAML frontmatter
###Markdown
To see the differences between remotes, for example.To see what files you have changed that aren't updated on a particular remote, for example:
###Code
%%bash
git diff --name-only origin/master
###Output
Pennines.md
test.md
###Markdown
When you reference remotes like this, you're working with a cached copy of the last time you interacted with the remote. You can do `git fetch` to update local data with the remotes without actually pulling. You can also get useful information about whether tracking branches are ahead or behind the remote branches they track:
###Code
%%bash
git branch -vv
###Output
gh-pages 3f98dd2 [origin/gh-pages] Add github pages YAML frontmatter
* master fd768dd [jamespjh/master] Add Whernside
###Markdown
Hosting Servers Hosting a local server* Any repository can be a remote for pulls* Can pull/push over shared folders or ssh* Pushing to someone's working copy is dangerous* Use `git init --bare` to make a copy for pushing* You don't need to create a "server" as such, any 'bare' git repo will do.
###Code
bare_dir=os.path.join(git_dir, 'bare_repo')
os.chdir(git_dir)
%%bash
mkdir -p bare_repo
cd bare_repo
git init --bare
os.chdir(working_dir)
%%bash
git remote add local_bare ../bare_repo
git push -u local_bare master
###Output
Branch 'master' set up to track remote branch 'master' from 'local_bare'.
###Markdown
Working with multiple remotes Distributed versus centralisedOlder version control systems (cvs, svn) were "centralised"; the history was kept only on a server,and all commits required an internet.Centralised | Distributed-------------------------------|--------------------------Server has history |Every user has full historyYour computer has one snapshot | Many local branchesTo access history, need internet| History always availableYou commit to remote server | Users synchronise historiescvs, subversion(svn) | git, mercurial (hg), bazaar (bzr) With modern distributed systems, we can add a second remote. This might be a personal *fork* on github:
###Code
import os
top_dir = os.getcwd()
git_dir = os.path.join(top_dir, 'learning_git')
working_dir=os.path.join(git_dir, 'git_example')
os.chdir(working_dir)
%%bash
git checkout master
git remote add jamespjh [email protected]:jamespjh/github-example.git
git remote -v
###Output
Your branch is ahead of 'origin/master' by 1 commit.
jamespjh [email protected]:jamespjh/github-example.git (fetch)
jamespjh [email protected]:jamespjh/github-example.git (push)
local_bare ../bare_repo (fetch)
local_bare ../bare_repo (push)
origin [email protected]:UCL/github-example.git (fetch)
origin [email protected]:UCL/github-example.git (push)
###Markdown
We can push to a named remote:
###Code
%%writefile Pennines.md
Mountains In the Pennines
========================
* Cross Fell
* Whernside
%%bash
git commit -am "Add Whernside"
%%bash
git push -uf jamespjh master
###Output
Branch master set up to track remote branch master from jamespjh.
###Markdown
Referencing remotesYou can always refer to commits on a remote like this:
###Code
%%bash
git fetch
git log --oneline --left-right jamespjh/master...origin/master
###Output
< 2ec725a Add Whernside
< a19da7c Add github pages YAML frontmatter
###Markdown
To see the differences between remotes, for example.To see what files you have changed that aren't updated on a particular remote, for example:
###Code
%%bash
git diff --name-only origin/master
###Output
Pennines.md
index.md
###Markdown
When you reference remotes like this, you're working with a cached copy of the last time you interacted with the remote. You can do `git fetch` to update local data with the remotes without actually pulling. You can also get useful information about whether tracking branches are ahead or behind the remote breanches they track:
###Code
%%bash
git branch -vv
###Output
gh-pages fb21ea4 Add Whernside
* master 2ec725a [jamespjh/master] Add Whernside
###Markdown
Hosting Servers Hosting a local server* Any repository can be a remote for pulls* Can pull/push over shared folders or ssh* Pushing to someone's working copy is dangerous* Use `git init --bare` to make a copy for pushing* You don't need to create a "server" as such, any 'bare' git repo will do.
###Code
bare_dir=os.path.join(git_dir, 'bare_repo')
os.chdir(git_dir)
%%bash
mkdir -p bare_repo
cd bare_repo
git init --bare
os.chdir(working_dir)
%%bash
git remote add local_bare ../bare_repo
git push -u local_bare master
%%bash
git remote -v
###Output
jamespjh [email protected]:jamespjh/github-example.git (fetch)
jamespjh [email protected]:jamespjh/github-example.git (push)
local_bare ../bare_repo (fetch)
local_bare ../bare_repo (push)
origin [email protected]:UCL/github-example.git (fetch)
origin [email protected]:UCL/github-example.git (push)
###Markdown
Working with multiple remotes Distributed versus centralisedOlder version control systems (cvs, svn) were "centralised"; the history was kept only on a server,and all commits required an internet.Centralised | Distributed-------------------------------|--------------------------Server has history |Every user has full historyYour computer has one snapshot | Many local branchesTo access history, need internet| History always availableYou commit to remote server | Users synchronise historiescvs, subversion(svn) | git, mercurial (hg), bazaar (bzr) With modern distributed systems, we can add a second remote. This might be a personal *fork* on github:
###Code
import os
top_dir = os.getcwd()
git_dir = os.path.join(top_dir, 'learning_git')
working_dir=os.path.join(git_dir, 'git_example')
os.chdir(working_dir)
%%bash
git checkout master
git remote add jamespjh [email protected]:jamespjh/github-example.git
git remote -v
###Output
Your branch is ahead of 'origin/master' by 1 commit.
jamespjh [email protected]:jamespjh/github-example.git (fetch)
jamespjh [email protected]:jamespjh/github-example.git (push)
local_bare ../bare_repo (fetch)
local_bare ../bare_repo (push)
origin [email protected]:UCL/github-example.git (fetch)
origin [email protected]:UCL/github-example.git (push)
###Markdown
We can push to a named remote:
###Code
%%writefile Pennines.md
Mountains In the Pennines
========================
* Cross Fell
* Whernside
%%bash
git commit -am "Add Whernside"
%%bash
git push -uf jamespjh master
###Output
Branch master set up to track remote branch master from jamespjh.
###Markdown
Referencing remotesYou can always refer to commits on a remote like this:
###Code
%%bash
git fetch
git log --oneline --left-right jamespjh/master...origin/master
###Output
< 2ec725a Add Whernside
< a19da7c Add github pages YAML frontmatter
###Markdown
To see the differences between remotes, for example.To see what files you have changed that aren't updated on a particular remote, for example:
###Code
%%bash
git diff --name-only origin/master
###Output
Pennines.md
index.md
###Markdown
When you reference remotes like this, you're working with a cached copy of the last time you interacted with the remote. You can do `git fetch` to update local data with the remotes without actually pulling. You can also get useful information about whether tracking branches are ahead or behind the remote branches they track:
###Code
%%bash
git branch -vv
###Output
gh-pages fb21ea4 Add Whernside
* master 2ec725a [jamespjh/master] Add Whernside
###Markdown
Hosting Servers Hosting a local server* Any repository can be a remote for pulls* Can pull/push over shared folders or ssh* Pushing to someone's working copy is dangerous* Use `git init --bare` to make a copy for pushing* You don't need to create a "server" as such, any 'bare' git repo will do.
###Code
bare_dir=os.path.join(git_dir, 'bare_repo')
os.chdir(git_dir)
%%bash
mkdir -p bare_repo
cd bare_repo
git init --bare
os.chdir(working_dir)
%%bash
git remote add local_bare ../bare_repo
git push -u local_bare master
%%bash
git remote -v
###Output
jamespjh [email protected]:jamespjh/github-example.git (fetch)
jamespjh [email protected]:jamespjh/github-example.git (push)
local_bare ../bare_repo (fetch)
local_bare ../bare_repo (push)
origin [email protected]:UCL/github-example.git (fetch)
origin [email protected]:UCL/github-example.git (push)
|
notebooks/var_string_num.ipynb | ###Markdown
Variables, Strings, and Numbers===In this section, you will learn to store information in variables. You will learn about two types of data: strings, which are sets of characters, and numerical data types. [Previous: Introduction]() | [Next: Lists & Tuples]() Contents---- [Variables](Variables) - [Example](Example) - [Naming rules](Naming-rules) - [NameError](NameError) - [Exercises](Exercises-variables)- [Strings](Strings) - [Single and double quotes](Single-and-double-quotes) - [Changing case](Changing-case) - [Combining strings (concatenation)](Combining-strings-(concatenation)) - [Whitespace](Whitespace) - [Exercises](Exercises-strings)- [Numbers](Numbers) - [Integer operations](Integer-operations) - [Floating-Point numbers](Floating-Point-numbers) - [Division in Python 3](Division-in-Python-3) - [Exercises](Exercises-numbers) - [Challenges](Challenges-numbers)- [Comments](Comments) - [What makes a good comment?](What-makes-a-good-comment?) - [When should you write comments?](When-should-you-write-comments?) - [Exercises](Exercises-comments)- [Zen of Python](Zen-of-Python)- [Overall Challenges](Overall-Challenges) Variables===A variable holds a value. In Python we can use a meaningful name, such as 'message', to store a variable. Variables can be strings of characters, whole numbers or real numbers. Example---In the example below, the variable **message** stores a string of characters. Double click in this code block and press **Ctrl-Return** to run it. As you work through the exercises here, double click in each code block to run the code. Remember that you can edit the code before running it - or run it, edit it, and then run it again to see how your changes have affected the output!
###Code
message = "Hello Python world!"
print(message)
###Output
_____no_output_____
###Markdown
A variable holds a value. You can change the value of a variable at any point.
###Code
message = "Hello Python world!"
print(message)
message = "Python is my favorite language!"
print(message)
###Output
_____no_output_____
###Markdown
Naming rules---- Variable names can only contain letters, numbers, and underscores. Variable names can start with a letter or an underscore, but can not start with a number.- Spaces are not allowed in variable names, so we use underscores instead of spaces. For example, use student_name instead of "student name".- You cannot use [Python keywords](http://docs.python.org/3/reference/lexical_analysis.htmlkeywords) as variable names.- Variable names should be descriptive, without being too long. For example mc_wheels is better than just "wheels", and number_of_wheels_on_a_motorycle.- Be careful about using the lowercase letter l and the uppercase letter O in places where they could be confused with the numbers 1 and 0. NameError---There is one common error when using variables, that you will almost certainly encounter at some point. Take a look at this code, and see if you can figure out why it causes an error.
###Code
message = "Thank you for sharing Python with the world, Guido!"
print(mesage)
###Output
_____no_output_____
###Markdown
Let's look through this error message. First, we see it is a NameError. Then we see the file that caused the error, and a green arrow shows us what line in that file caused the error. Then we get some more specific feedback, that "name 'mesage' is not defined".You may have already spotted the source of the error. We spelled message two different ways. Python does not care whether we use the variable name "message" or "mesage". Python only cares that the spellings of our variable names match every time we use them.This is pretty important, because it allows us to have a variable "name" with a single name in it, and then another variable "names" with a bunch of names in it.We can fix NameErrors by making sure all of our variable names are spelled consistently.
###Code
message = "Thank you for sharing Python with the world, Guido!"
print(message)
###Output
_____no_output_____
###Markdown
In case you didn't know [Guido](http://en.wikipedia.org/wiki/Guido_van_Rossum) [van Rossum](http://www.python.org/~guido/) created the Python language over 20 years ago, and he is considered Python's [Benevolent Dictator for Life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life). Guido still signs off on all major changes to the core Python language. Exercises--- Hello World - variable- Store your own version of the message "Hello World" in a variable, and print it. One Variable, Two Messages:- Store a message in a variable, and then print that message.- Store a new message in the same variable, and then print that new message. Strings===Strings are sets of characters. Strings are easier to understand by looking at some examples. Single and double quotes---Strings are contained by either single or double quotes.
###Code
my_string = "This is a double-quoted string."
my_string = 'This is a single-quoted string.'
###Output
_____no_output_____
###Markdown
This lets us make strings that contain quotations.
###Code
quote = "Linus Torvalds once said, 'Any program is only as good as it is useful.'"
###Output
_____no_output_____
###Markdown
Changing case---You can easily change the case of a string, to present it the way you want it to look.
###Code
first_name = 'eric'
print(first_name)
print(first_name.title())
###Output
_____no_output_____
###Markdown
It is often good to store data in lower case, and then change the case as you want to for presentation. This catches some TYpos. It also makes sure that 'eric', 'Eric', and 'ERIC' are not considered three different people.Some of the most common cases are lower, title, and upper.
###Code
###highlight=[6,8,9]
first_name = 'eric'
print(first_name)
print(first_name.title())
print(first_name.upper())
first_name = 'Eric'
print(first_name.lower())
###Output
_____no_output_____
###Markdown
You will see this syntax quite often, where a variable name is followed by a dot and then the name of an action, followed by a set of parentheses. The parentheses may be empty, or they may contain some values.variable_name.action()In this example, the word "action" is the name of a method. A method is something that can be done to a variable. The methods 'lower', 'title', and 'upper' are all functions that have been written into the Python language, which do something to strings. Later on, you will learn to write your own methods. Combining strings (concatenation)---It is often very useful to be able to combine strings into a message or page element that we want to display. Again, this is easier to understand through an example.
###Code
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
print(full_name.title())
###Output
_____no_output_____
###Markdown
The plus sign combines two strings into one, which is called "concatenation". You can use as many plus signs as you want in composing messages. In fact, many web pages are written as giant strings which are put together through a long series of string concatenations.
###Code
###highlight=[6,7,8]
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
message = full_name.title() + ' ' + "was considered the world's first computer programmer."
print(message)
###Output
_____no_output_____
###Markdown
If you don't know who Ada Lovelace is, you might want to go read what [Wikipedia](http://en.wikipedia.org/wiki/Ada_Lovelace) or the [Computer History Museum](http://www.computerhistory.org/babbage/adalovelace/) have to say about her. Her life and her work are also the inspiration for the [Ada Initiative](http://adainitiative.org/faq/about-ada-lovelace/), which supports women who are involved in technical fields. Inside the brackets of a print command, it is also possible to concatenate strings using a comma - this will automatically add a space between strings. However, this will not work as you might wish if you try this when creating a variable!
###Code
print('Once','upon', 'a', 'time')
message = ('Once','upon', 'a', 'time')
print(message)
###Output
_____no_output_____
###Markdown
Whitespace---The term "whitespace" refers to characters that the computer is aware of, but are invisible to readers. The most common whitespace characters are spaces, tabs, and newlines.Spaces are easy to create, because you have been using them as long as you have been using computers. Tabs and newlines are represented by special character combinations.The two-character combination "\t" makes a tab appear in a string. Tabs can be used anywhere you like in a string.
###Code
print("Hello everyone!")
print("\tHello everyone!")
print("Hello \teveryone!")
###Output
_____no_output_____
###Markdown
The combination "\n" makes a newline appear in a string. You can use newlines anywhere you like in a string.
###Code
print("Hello everyone!")
print("\nHello everyone!")
print("Hello \neveryone!")
print("\n\n\nHello everyone!")
###Output
_____no_output_____
###Markdown
Stripping whitespaceMany times you will allow users to enter text into a box, and then you will read that text and use it. It is really easy for people to include extra whitespace at the beginning or end of their text. Whitespace includes spaces, tabs, and newlines.It is often a good idea to strip this whitespace from strings before you start working with them. For example, you might want to let people log in, and you probably want to treat 'eric ' as 'eric' when you are trying to see if I exist on your system.You can strip whitespace from the left side, the right side, or both sides of a string.
###Code
name = ' eric '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
###Output
_____no_output_____
###Markdown
It's hard to see exactly what is happening, so maybe the following will make it a little more clear:
###Code
name = ' eric '
print('-' + name.lstrip() + '-')
print('-' + name.rstrip() + '-')
print('-' + name.strip() + '-')
###Output
_____no_output_____
###Markdown
Exercises--- Someone Said- Find a quote that you like. Store the quote in a variable, with an appropriate introduction such as "Ken Thompson once said, 'One of my most productive days was throwing away 1000 lines of code'". Print the quote. First Name Cases- Store your first name, in lowercase, in a variable.- Using that one variable, print your name in lowercase, Titlecase, and UPPERCASE. Full Name- Store your first name and last name in separate variables, and then combine them to print out your full name. About This Person- Choose a person you look up to. Store their first and last names in separate variables.- Use concatenation to make a sentence about this person, and store that sentence in a variable.-- Print the sentence. Name Strip- Store your first name in a variable, but include at least two kinds of whitespace on each side of your name.- Print your name as it is stored.- Print your name with whitespace stripped from the left side, then from the right side, then from both sides. Numbers===Dealing with simple numerical data is fairly straightforward in Python, but there are a few things you should know about. Integers---You can do all of the basic operations with integers, and everything should behave as you expect. Addition and subtraction use the standard plus and minus symbols. Multiplication uses the asterisk, and division uses a forward slash. Exponents use two asterisks.
###Code
print(3+2)
print(3-2)
print(3*2)
print(3/2)
print(3**2)
###Output
_____no_output_____
###Markdown
You can use parenthesis to modify the standard order of operations.
###Code
standard_order = 2+3*4
print(standard_order)
my_order = (2+3)*4
print(my_order)
###Output
_____no_output_____
###Markdown
Floating-Point numbers---Floating-point numbers refer to any number with a decimal point. Most of the time, you can think of floating point numbers as decimals, and they will behave as you expect them to.
###Code
print(0.1+0.1)
###Output
_____no_output_____
###Markdown
However, sometimes you will get an answer with an unexpectly long decimal part:
###Code
print(0.1+0.2)
###Output
_____no_output_____
###Markdown
This happens because of the way computers represent numbers internally; this has nothing to do with Python itself. Basically, we are used to working in powers of ten, where one tenth plus two tenths is just three tenths. But computers work in powers of two. So your computer has to represent 0.1 in a power of two, and then 0.2 as a power of two, and express their sum as a power of two. There is no exact representation for 0.3 in powers of two, and we see that in the answer to 0.1+0.2.Python tries to hide this kind of stuff when possible. Don't worry about it much for now; just don't be surprised by it, and know that we will learn to clean up our results a little later on.You can also get the same kind of result with other operations.
###Code
print(3*0.1)
###Output
_____no_output_____
###Markdown
Division in Python 3---Dividing two integers in Python 3 always returns a float, while in Python 2 dividing two integers results in another integer. If you are getting numerical results that you don't expect, or that don't make sense, check if the version of Python you are using is treating integers differently than you expect.
###Code
print(4/2)
print(3/2)
###Output
_____no_output_____
###Markdown
Exercises--- Arithmetic- Write a program that prints out the results of at least one calculation for each of the basic operations: addition, subtraction, multiplication, division, and exponents. Order of Operations- Find a calculation whose result depends on the order of operations.- Print the result of this calculation using the standard order of operations.- Use parentheses to force a nonstandard order of operations. Print the result of this calculation. Long Decimals- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find at least one other calculation that results in a long decimal like this. Python 2 or Python 3?- Use integer division to show whether your Python interpeter is using Python 2 or Python 3. Challenges--- Neat Arithmetic- Store the results of at least 5 different calculations in separate variables. Make sure you use each operation at least once.- Print a series of informative statements, such as "The result of the calculation 5+7 is 12." Neat Order of Operations- Take your work for "Order of Operations" above.- Instead of just printing the results, print an informative summary of the results. Show each calculation that is being done and the result of that calculation. Explain how you modified the result using parentheses. Long Decimals - Pattern- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find a number of other calculations that result in a long decimal like this. Try to find a pattern in what kinds of numbers will result in long decimals. Try Some Python 2- If you just want to play around with Python 2 and Python 3, you can easily do so on [pythontutor.com](http://pythontutor.com/). Click "Start using Online Python Tutor now", and then delete the sample code in the text box so you can enter your own code. On that page, there is a drop-down list just below the text box that lets you select different versions of Python. Click "Visualize Execution" to run your code. On the next page, you can either click "Forward" to step through your code one line at a time, or click "Last" to run your entire program. Comments===As you begin to write more complicated code, you will have to spend more time thinking about how to code solutions to the problems you want to solve. Once you come up with an idea, you will spend a fair amount of time troubleshooting your code, and revising your overall approach.Comments allow you to write in English, within your program. In Python, any line that starts with a pound () symbol is ignored by the Python interpreter.
###Code
# This line is a comment.
print("This line is not a comment, it is code.")
###Output
_____no_output_____
###Markdown
What makes a good comment?---- It is short and to the point, but a complete thought. Most comments should be written in complete sentences.- It explains your thinking, so that when you return to the code later you will understand how you were approaching the problem.- It explains your thinking, so that others who work with your code will understand your overall approach to a problem.- It explains particularly difficult sections of code in detail.When should you write comments?---- When you have to think about code before writing it.- When you are likely to forget later exactly how you were approaching a problem.- When there is more than one way to solve a problem.- When others are unlikely to anticipate your way of thinking about a problem.Writing good comments is one of the clear signs of a good programmer. If you have any real interest in taking programming seriously, start using comments now. You will see them throughout the examples in these notebooks. Exercises--- First Comments- Choose the longest, most difficult, or most interesting program you have written so far. Write at least one comment in your program. Zen of Python===The Python community is incredibly large and diverse. People are using Python in science, in medicine, in robotics, on the internet, and in any other field you can imagine. This diverse group of thinkers has developed a collective mindset about how programs should be written. If you want to understand Python and the community of Python programmers, it is a good idea to learn the ways Python programmers think.You can easily see a set of guiding principles that is written right into the language, when you run the program below:
###Code
import this
###Output
_____no_output_____
###Markdown
There is a lot here. Let's just take a few lines, and see what they mean for you as a new programmer. Beautiful is better than ugly.Python programmers recognize that good code can actually be beautiful. If you come up with a particularly elegant or efficient way to solve a problem, especially a difficult problem, other Python programmers will respect your work and may even call it beautiful. There is beauty in high-level technical work. Explicit is better than implicit.It is better to be clear about what you are doing, than come up with some shorter way to do something that is difficult to understand. Simple is better than complex. Complex is better than complicated.Keep your code simple whenever possible, but recognize that we sometimes take on really difficult problems for which there are no easy solutions. In those cases, accept the complexity but avoid complication. Readability counts.There are very few interesting and useful programs these days that are written and maintained entirely by one person. Write your code in a way that others can read it as easily as possible, and in a way that you will be able to read and understand it 6 months from now. This includes writing good comments in your code. There should be one-- and preferably only one --obvious way to do it.There are many ways to solve most problems that come up in programming. However, most problems have a standard, well-established approach. Save complexity for when it is needed, and solve problems in the most straightforward way possible. Now is better than never.No one ever writes perfect code. If you have an idea you want to implement it, write some code that works. Release it, let it be used by others, and then steadily improve it. Overall Challenges===We have learned quite a bit so far about programming, but we haven't learned enough yet for you to go create something. In the next notebook, things will get much more interesting, and there will be a longer list of overall challenges. What I've Learned- Write a program that uses everything you have learned in this notebook at least once.- Write comments that label each section of your program.- For each thing your program does, write at least one line of output that explains what your program did.- For example, you might have one line that stores your name with some whitespace in a variable, and a second line that strips that whitespace from your name:
###Code
# I learned how to strip whitespace from strings.
name = '\t\teric'
print("I can strip tabs from my name: " + name.strip())
###Output
_____no_output_____
###Markdown
Variables, Strings, and Numbers===In this section, you will learn to store information in variables. You will learn about two types of data: strings, which are sets of characters, and numerical data types. [Previous: Hello World](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/hello_world.ipynb) | [Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) | [Next: Lists and Tuples](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/lists_tuples.ipynb) Contents---- [Variables](Variables) - [Example](Example) - [Naming rules](Naming-rules) - [NameError](NameError) - [Exercises](Exercises-variables)- [Strings](Strings) - [Single and double quotes](Single-and-double-quotes) - [Changing case](Changing-case) - [Combining strings (concatenation)](Combining-strings-(concatenation)) - [Whitespace](Whitespace) - [Exercises](Exercises-strings)- [Numbers](Numbers) - [Integer operations](Integer-operations) - [Floating-point numbers](Floating-point-numbers) - [Integers in Python 2.7](Integers-in-Python-2.7) - [Exercises](Exercises-numbers) - [Challenges](Challenges-numbers)- [Comments](Comments) - [What makes a good comment?](What-makes-a-good-comment?) - [When should you write comments?](When-should-you-write-comments?) - [Exercises](Exercises-comments)- [Zen of Python](Zen-of-Python)- [Overall Challenges](Overall-Challenges) Variables===A variable holds a value. Example---
###Code
message = "Hello Python world!"
print(message)
###Output
Hello Python world!
###Markdown
A variable holds a value. You can change the value of a variable at any point.
###Code
###highlight=[5,6]
message = "Hello Python world!"
print(message)
message = "Python is my favorite language!"
print(message)
###Output
Hello Python world!
Python is my favorite language!
###Markdown
Naming rules---- Variables can only contain letters, numbers, and underscores. Variable names can start with a letter or an underscore, but can not start with a number.- Spaces are not allowed in variable names, so we use underscores instead of spaces. For example, use student_name instead of "student name".- You cannot use [Python keywords](http://docs.python.org/3/reference/lexical_analysis.htmlkeywords) as variable names.- Variable names should be descriptive, without being too long. For example mc_wheels is better than just "wheels", and number_of_wheels_on_a_motorycle.- Be careful about using the lowercase letter l and the uppercase letter O in places where they could be confused with the numbers 1 and 0. NameError---There is one common error when using variables, that you will almost certainly encounter at some point. Take a look at this code, and see if you can figure out why it causes an error.
###Code
message = "Thank you for sharing Python with the world, Guido!"
print(mesage)
###Output
_____no_output_____
###Markdown
Let's look through this error message. First, we see it is a NameError. Then we see the file that caused the error, and a green arrow shows us what line in that file caused the error. Then we get some more specific feedback, that "name 'mesage' is not defined".You may have already spotted the source of the error. We spelled message two different ways. Python does not care whether we use the variable name "message" or "mesage". Python only cares that the spellings of our variable names match every time we use them.This is pretty important, because it allows us to have a variable "name" with a single name in it, and then another variable "names" with a bunch of names in it.We can fix NameErrors by making sure all of our variable names are spelled consistently.
###Code
###highlight=[3]
message = "Thank you for sharing Python with the world, Guido!"
print(message)
###Output
Thank you for sharing Python with the world, Guido!
###Markdown
In case you didn't know [Guido](http://en.wikipedia.org/wiki/Guido_van_Rossum) [van Rossum](http://www.python.org/~guido/) created the Python language over 20 years ago, and he is considered Python's [Benevolent Dictator for Life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life). Guido still signs off on all major changes to the core Python language. Exercises--- Hello World - variable- Store your own version of the message "Hello World" in a variable, and print it. One Variable, Two Messages:- Store a message in a variable, and then print that message.- Store a new message in the same variable, and then print that new message. [top]() Strings===Strings are sets of characters. Strings are easier to understand by looking at some examples. Single and double quotes---Strings are contained by either single or double quotes.
###Code
my_string = "This is a double-quoted string."
my_string = 'This is a single-quoted string.'
###Output
_____no_output_____
###Markdown
This lets us make strings that contain quotations.
###Code
quote = "Linus Torvalds once said, 'Any program is only as good as it is useful.'"
###Output
_____no_output_____
###Markdown
Changing case---You can easily change the case of a string, to present it the way you want it to look.
###Code
###highlight=[6,8,9]
first_name = 'eric'
print(first_name)
print(first_name.title())
print(first_name.upper())
first_name = 'Eric'
print(first_name.lower())
###Output
eric
Eric
ERIC
eric
###Markdown
It is often good to store data in lower case, and then change the case as you want to for presentation. This catches some TYpos. It also makes sure that 'eric', 'Eric', and 'ERIC' are not considered three different people.Some of the most common cases are lower, title, and upper. You will see this syntax quite often, where a variable name is followed by a dot and then the name of an action, followed by a set of parentheses. The parentheses may be empty, or they may contain some values.variable_name.action()In this example, the word "action" is the name of a method. A method is something that can be done to a variable. The methods 'lower', 'title', and 'upper' are all functions that have been written into the Python language, which do something to strings. Later on, you will learn to write your own methods. Combining strings (concatenation)---It is often very useful to be able to combine strings into a message or page element that we want to display. Again, this is easier to understand through an example.
###Code
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
print(full_name.title())
###Output
Ada Lovelace
###Markdown
The plus sign combines two strings into one, which is called "concatenation". You can use as many plus signs as you want in composing messages. In fact, many web pages are written as giant strings which are put together through a long series of string concatenations.
###Code
###highlight=[6,7,8]
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
message = full_name.title() + ' ' + "was considered the world's first computer programmer."
print(message)
###Output
Ada Lovelace was considered the world's first computer programmer.
###Markdown
If you don't know who Ada Lovelace is, you might want to go read what [Wikipedia](http://en.wikipedia.org/wiki/Ada_Lovelace) or the [Computer History Museum](http://www.computerhistory.org/babbage/adalovelace/) have to say about her. Her life and her work are also the inspiration for the [Ada Initiative](http://adainitiative.org/faq/about-ada-lovelace/), which supports women who are involved in technical fields. Whitespace---The term "whitespace" refers to characters that the computer is aware of, but are invisible to readers. The most common whitespace characters are spaces, tabs, and newlines.Spaces are easy to create, because you have been using them as long as you have been using computers. Tabs and newlines are represented by special character combinations.The two-character combination "\t" makes a tab appear in a string. Tabs can be used anywhere you like in a string.
###Code
print("Hello everyone!")
print("\tHello everyone!")
print("Hello \teveryone!")
###Output
Hello everyone!
###Markdown
The combination "\n" makes a newline appear in a string. You can use newlines anywhere you like in a string.
###Code
print("Hello everyone!")
print("\nHello everyone!")
print("Hello \neveryone!")
print("\n\n\nHello everyone!")
###Output
Hello everyone!
###Markdown
Stripping whitespaceMany times you will allow users to enter text into a box, and then you will read that text and use it. It is really easy for people to include extra whitespace at the beginning or end of their text. Whitespace includes spaces, tabs, and newlines.It is often a good idea to strip this whitespace from strings before you start working with them. For example, you might want to let people log in, and you probably want to treat 'eric ' as 'eric' when you are trying to see if I exist on your system.You can strip whitespace from the left side, the right side, or both sides of a string.
###Code
name = ' eric '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
###Output
eric
eric
eric
###Markdown
It's hard to see exactly what is happening, so maybe the following will make it a little more clear:
###Code
name = ' eric '
print('-' + name.lstrip() + '-')
print('-' + name.rstrip() + '-')
print('-' + name.strip() + '-')
###Output
-eric -
- eric-
-eric-
###Markdown
Exercises--- Someone Said- Find a quote that you like. Store the quote in a variable, with an appropriate introduction such as "Ken Thompson once said, 'One of my most productive days was throwing away 1000 lines of code'". Print the quote. First Name Cases- Store your first name, in lowercase, in a variable.- Using that one variable, print your name in lowercase, Titlecase, and UPPERCASE. Full Name- Store your first name and last name in separate variables, and then combine them to print out your full name. ___Optional:___ About This Person- Choose a person you look up to. Store their first and last names in separate variables.- Use concatenation to make a sentence about this person, and store that sentence in a variable.-- Print the sentence. Name Strip- Store your first name in a variable, but include at least two kinds of whitespace on each side of your name.- Print your name as it is stored.- Print your name with whitespace stripped from the left side, then from the right side, then from both sides. [top]() Numbers===Dealing with simple numerical data is fairly straightforward in Python, but there are a few things you should know about. Integers---You can do all of the basic operations with integers, and everything should behave as you expect. Addition and subtraction use the standard plus and minus symbols. Multiplication uses the asterisk, and division uses a forward slash. Exponents use two asterisks.
###Code
print(3+2)
print(3-2)
print(3*2)
print(3/2)
print(3**2)
###Output
9
###Markdown
You can use parenthesis to modify the standard order of operations.
###Code
standard_order = 2+3*4
print(standard_order)
my_order = (2+3)*4
print(my_order)
###Output
20
###Markdown
Floating-Point numbers---Floating-point numbers refer to any number with a decimal point. Most of the time, you can think of floating point numbers as decimals, and they will behave as you expect them to.
###Code
print(0.1+0.1)
###Output
0.2
###Markdown
However, sometimes you will get an answer with an unexpectly long decimal part:
###Code
print(0.1+0.2)
###Output
0.30000000000000004
###Markdown
This happens because of the way computers represent numbers internally; this has nothing to do with Python itself. Basically, we are used to working in powers of ten, where one tenth plus two tenths is just three tenths. But computers work in powers of two. So your computer has to represent 0.1 in a power of two, and then 0.2 as a power of two, and express their sum as a power of two. There is no exact representation for 0.3 in powers of two, and we see that in the answer to 0.1+0.2.Python tries to hide this kind of stuff when possible. Don't worry about it much for now; just don't be surprised by it, and know that we will learn to clean up our results a little later on.You can also get the same kind of result with other operations.
###Code
print(3*0.1)
###Output
0.30000000000000004
###Markdown
Exercises--- Arithmetic- Write a program that prints out the results of at least one calculation for each of the basic operations: addition, subtraction, multiplication, division, and exponents. Order of Operations- Find a calculation whose result depends on the order of operations.- Print the result of this calculation using the standard order of operations.- Use parentheses to force a nonstandard order of operations. Print the result of this calculation. Long Decimals- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find at least one other calculation that results in a long decimal like this. Optional Challenges--- Neat Arithmetic- Store the results of at least 5 different calculations in separate variables. Make sure you use each operation at least once.- Print a series of informative statements, such as "The result of the calculation 5+7 is 12." Neat Order of Operations- Take your work for "Order of Operations" above.- Instead of just printing the results, print an informative summary of the results. Show each calculation that is being done and the result of that calculation. Explain how you modified the result using parentheses. Long Decimals - Pattern- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find a number of other calculations that result in a long decimal like this. Try to find a pattern in what kinds of numbers will result in long decimals. [top]() Comments===As you begin to write more complicated code, you will have to spend more time thinking about how to code solutions to the problems you want to solve. Once you come up with an idea, you will spend a fair amount of time troubleshooting your code, and revising your overall approach.Comments allow you to write in English, within your program. In Python, any line that starts with a pound () symbol is ignored by the Python interpreter.
###Code
# This line is a comment.
print("This line is not a comment, it is code.")
###Output
This line is not a comment, it is code.
###Markdown
What makes a good comment?---- It is short and to the point, but a complete thought. Most comments should be written in complete sentences.- It explains your thinking, so that when you return to the code later you will understand how you were approaching the problem.- It explains your thinking, so that others who work with your code will understand your overall approach to a problem.- It explains particularly difficult sections of code in detail.When should you write comments?---- When you have to think about code before writing it.- When you are likely to forget later exactly how you were approaching a problem.- When there is more than one way to solve a problem.- When others are unlikely to anticipate your way of thinking about a problem.Writing good comments is one of the clear signs of a good programmer. If you have any real interest in taking programming seriously, start using comments now. You will see them throughout the examples in these notebooks. Exercises--- First Comments- Choose the longest, most difficult, or most interesting program you have written so far. Write at least one comment in your program. [top]() Optional: Zen of Python===The Python community is incredibly large and diverse. People are using Python in science, in medicine, in robotics, on the internet, and in any other field you can imagine. This diverse group of thinkers has developed a collective mindset about how programs should be written. If you want to understand Python and the community of Python programmers, it is a good idea to learn the ways Python programmers think.You can easily see a set of guiding principles that is written right into the language:
###Code
import this
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
###Markdown
There is a lot here. Let's just take a few lines, and see what they mean for you as a new programmer. Beautiful is better than ugly.Python programmers recognize that good code can actually be beautiful. If you come up with a particularly elegant or efficient way to solve a problem, especially a difficult problem, other Python programmers will respect your work and may even call it beautiful. There is beauty in high-level technical work. Explicit is better than implicit.It is better to be clear about what you are doing, than come up with some shorter way to do something that is difficult to understand. Simple is better than complex. Complex is better than complicated.Keep your code simple whenever possible, but recognize that we sometimes take on really difficult problems for which there are no easy solutions. In those cases, accept the complexity but avoid complication. Readability counts.There are very few interesting and useful programs these days that are written and maintained entirely by one person. Write your code in a way that others can read it as easily as possible, and in a way that you will be able to read and understand it 6 months from now. This includes writing good comments in your code. There should be one-- and preferably only one --obvious way to do it.There are many ways to solve most problems that come up in programming. However, most problems have a standard, well-established approach. Save complexity for when it is needed, and solve problems in the most straightforward way possible. Now is better than never.No one ever writes perfect code. If you have an idea you want to implement it, write some code that works. Release it, let it be used by others, and then steadily improve it. [top]() Optional: General Challenges===We have learned quite a bit so far about programming, but we haven't learned enough yet for you to go create something. In the next notebook, things will get much more interesting, and there will be a longer list of overall challenges. What I've Learned- Write a program that uses everything you have learned in this notebook at least once.- Write comments that label each section of your program.- For each thing your program does, write at least one line of output that explains what your program did.- For example, you might have one line that stores your name with some whitespace in a variable, and a second line that strips that whitespace from your name:
###Code
# I learned how to strip whitespace from strings.
name = '\t\teric'
print("I can strip tabs from my name: " + name.strip())
###Output
I can strip tabs from my name: eric
###Markdown
Table of Contents1 Variables, Strings, and Numbers2 Variables2.1 Example2.2 Naming rules2.3 NameError3 Strings3.1 Single and double quotes3.2 Getting a string's length and checking for character3.3 Changing case3.4 Combining strings (concatenation)3.5 Whitespace3.5.1 Stripping whitespace4 Numbers4.1 Floating-Point numbers4.2 Integers in Python 2.74.2.1 Division in Python 2.74.2.2 Division in Python 3.35 Comments5.1 What makes a good comment?5.2 When should you write comments?6 Zen of Python Variables, Strings, and Numbers===In this section, you will learn to store information in variables. You will learn about three types of data: strings, which are lists of characters, integers, which are numbers like 2 and 3, and floats, which are numbers like 2.0 and 2.5. Variables===A variable holds a value. Example---
###Code
message = "Hello world!"
print(message)
###Output
Hello world!
###Markdown
A variable holds a value. You can change the value of a variable at any point. When you do, the original value is overwritten.
###Code
message = "Hello world!"
print(message)
message = "I'm learning to program"
print(message)
###Output
Hello Python world!
Python is my favorite language!
###Markdown
Naming rules---- Variable names can only have letters, numbers, and underscores. Variable names can start with a letter or an underscore, but cannot start with a number.- Spaces are not allowed in variable names, so we use underscores instead of spaces. For example, use student_name instead of "student name".- You cannot use [Python keywords](http://docs.python.org/2/reference/lexical_analysis.htmlkeywords) as variable names.- Variable names should be descriptive, without being too long. For example, `cur_trial` is better than just `trial`, and `my_current_trial`.- Be careful about using the lowercase letter l and the uppercase letter O in places where they could be confused with the numbers 1 and 0.- Python conventions dictate separating naming clauses with underscores (e.g., `cur_trial`). An alternate naming convention, and one that I will use in the class because of old habits is *mixed case* (e.g., `curTrial`) NameError---There is one common error when using variables, that you will almost certainly encounter at some point. Take a look at this code, and see if you can figure out why it causes an error.
###Code
message = "Thank you for sharing Python with the world, Guido!"
print(mesage)
###Output
_____no_output_____
###Markdown
Let's look through this error message. First, we see it is a NameError. Then we see the file that caused the error, and a green arrow shows us what line in that file caused the error. Then we get some more specific feedback, that "name 'mesage' is not defined".You may have already spotted the source of the error. We spelled message two different ways. Python does not care whether we use the variable name "message" or "mesage". Python only cares that the spellings of our variable names match every time we use them.This is pretty important, because it allows us to have a variable "name" with a single name in it, and then another variable "names" with a bunch of names in it.We can fix NameErrors by making sure all of our variable names are spelled consistently.Nearly in every case, capitalization matters! The variable names `message`, `Message`, and `messagE` are -- to Python -- as different from one another as `message` and `rhinoceros` Of course to *humans* `message` and `Message` are quite similar and liable to get confused, which is a reason to not use variable names that differ only in capitalization!
###Code
message = "Thank you for making Python, Guido!"
print(message)
###Output
Thank you for making Python, Guido!
###Markdown
[Guido](http://en.wikipedia.org/wiki/Guido_van_Rossum) [van Rossum](http://www.python.org/~guido/) created the Python language over 20 years ago, and he is considered Python's [Benevolent Dictator for Life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life). Guido still signs off on all major changes to the core Python language. [top]() Strings===Strings are lists of characters. Let's look at some examples. Single and double quotes---Strings are contained by either single or double quotes.
###Code
my_string = "This is a double-quoted string."
my_string = 'This is a single-quoted string.'
###Output
_____no_output_____
###Markdown
This lets us make strings that contain quotations.
###Code
quote = "Linus Torvalds once said, 'Any program is only as good as it is useful.'"
###Output
_____no_output_____
###Markdown
What if we want to have a string with both single and double quotes in it? The following won't work. Can you see why?
###Code
my_string = "Here's a string with a "quote in it""
print my_string
###Output
_____no_output_____
###Markdown
To make it work we have to "escape" the quotes that would otherwise tell the Python interpreter that ths string is to be terminated.
###Code
my_string = "Here's a string with a \"quote in it\""
print my_string
###Output
Here's a string with a "quote in it"
###Markdown
Getting a string's length and checking if something exists---Remember how we said that strings are lists of characters? We'll learn more about lists in the next section, but for now, just note that because they're lists, you can do the following:
###Code
print "There are",len('aeiou'), "vowels in English"
if 'e' in 'aeiou':
print "yep, there's an e"
###Output
yep, there's an e
False
###Markdown
We'll cover more complex string searches later in the semester.
###Code
print 'f' in 'aeiou'
###Output
_____no_output_____
###Markdown
Changing case---You can easily change the case of a string, to present it the way you want it to look.
###Code
first_name = 'eric'
print(first_name)
print(first_name.title())
###Output
eric
Eric
###Markdown
It is often good to store data in lower case, and then change the case as you want to for presentation. This catches some TYpos. It also makes sure that 'eric', 'Eric', and 'ERIC' are not considered three different people.Some of the most common cases are lower, title, and upper.
###Code
first_name = 'eric'
print(first_name)
print(first_name.title())
print(first_name.upper())
first_name = 'Eric'
print(first_name.lower())
###Output
eric
Eric
ERIC
eric
###Markdown
You will see this syntax quite often, where a variable name is followed by a dot and then the name of an action, followed by a set of parentheses. The parentheses may be empty, or they may contain some values.variable_name.action()In this example, the word "action" is the name of a method. A method is something that can be done to a variable. The methods 'lower', 'title', and 'upper' are all functions that have been written into the Python language, which do something to strings. Later on, you will learn to write your own methods. Combining strings (concatenation)---It is often very useful to be able to combine strings into a message or page element that we want to display. Again, this is easier to understand through an example.
###Code
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
print(full_name.title())
###Output
Ada Lovelace
###Markdown
The plus sign combines two strings into one, which is called "concatenation". You can use as many plus signs as you want in composing messages. In fact, many web pages are written as giant strings which are put together through a long series of string concatenations.
###Code
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
message = full_name.title() + ' ' + "was considered the world's first computer programmer."
print(message)
###Output
Ada Lovelace was considered the world's first computer programmer.
###Markdown
If you don't know who Ada Lovelace is, you might want to go read what [Wikipedia](http://en.wikipedia.org/wiki/Ada_Lovelace) or the [Computer History Museum](http://www.computerhistory.org/babbage/adalovelace/) have to say about her. Her life and her work are also the inspiration for the [Ada Initiative](http://adainitiative.org/faq/about-ada-lovelace/). Whitespace---The term "whitespace" refers to characters that the computer is aware of, but are invisible to readers. The most common whitespace characters are spaces, tabs, and newlines.Spaces are easy to create, because you have been using them as long as you have been using computers. Tabs and newlines are represented by special character combinations.The two-character combination "\t" makes a tab appear in a string. Tabs can be used anywhere you like in a string.
###Code
print("Hello everyone!")
print("\tHello everyone!")
print("Hello \teveryone!")
###Output
Hello everyone!
###Markdown
The combination "\n" makes a newline appear in a string. You can use newlines anywhere you like in a string.
###Code
print("Hello everyone!")
print("\nHello everyone!")
print("Hello \neveryone!")
print("\n\n\nHello everyone!")
###Output
Hello everyone!
###Markdown
We'll talk again about newlines when we talk about writing to files. Stripping whitespaceSometimes you'll wnat to get rid of whitespace characters (spaces, tabs, and newlines) that precede or follow the string (for example, when reading from files or when accepting free-response from users). You can strip whitespace from the left side, the right side, or both sides of a string.
###Code
name = ' eric '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
###Output
eric
eric
eric
###Markdown
It's hard to see exactly what is happening, so maybe the following will make it a little more clear:
###Code
name = ' eric '
print('-' + name.lstrip() + '-')
print('-' + name.rstrip() + '-')
print('-' + name.strip() + '-')
###Output
-eric -
- eric-
-eric-
###Markdown
[top]() Numbers=== Integers---You can do all of the basic operations with integers, and everything should behave as you expect. Addition and subtraction use the standard plus and minus symbols. Multiplication uses the asterisk, and division uses a forward slash. Exponents use two asterisks.
###Code
print 3+2
print 3-2
print 3**2
print 3 % 2
###Output
_____no_output_____
###Markdown
This last one (`%`) is a modulus operator. It returns the remainder after division: 3 mod 2 is 1 because when we divide 3 by 2, the remainder is 1. Remember this. It comes in handy! You can use parenthesis to modify the standard order of operations.
###Code
standard_order = 2+3*4
print(standard_order)
my_order = (2+3)*4
print(my_order)
###Output
20
###Markdown
Floating-Point numbers---Floating-point numbers refer to any number with a decimal point. Most of the time, you can think of floating point numbers as decimals, and they will behave as you expect them to.
###Code
print(0.1+0.1)
###Output
0.2
###Markdown
However, sometimes you will get an answer with an unexpectly long decimal part:
###Code
print(0.1+0.2)
###Output
0.30000000000000004
###Markdown
This happens because of the way computers represent numbers internally; this has nothing to do with Python itself. Basically, we are used to working in powers of ten, where one tenth plus two tenths is just three tenths. But computers work in powers of two. So your computer has to represent 0.1 in a power of two, and then 0.2 as a power of two, and express their sum as a power of two. There is no exact representation for 0.3 in powers of two, and we see that in the answer to 0.1+0.2.Python tries to hide this kind of stuff when possible. Don't worry about it much for now; just don't be surprised by it, and know that we will learn to clean up our results a little later on.You can also get the same kind of result with other operations.
###Code
print(3*0.1)
###Output
0.30000000000000004
###Markdown
Integers in Python 2.7---There are a couple differences in the way Python 2 and Python 3 handle numbers. In Python 2, dividing two integers always results in an integer, while Python 3 always returns a float. This is fine when the result of your integer division is an integer, but it leads to quite different results when the answer is a decimal. Division in Python 2.7
###Code
# Python 2.7
print 4/2
# Python 2.7
print 3/2
###Output
1
###Markdown
You can force Python 2 to use decimal-point (float) division by having one or both of the divisors be a float. Both of these methods work
###Code
print 3.0/2
print float(3)/2
###Output
1.5
###Markdown
Division in Python 3.3 Python 3 does float division by default:
###Code
# Python 3.3
print(4/2)
# Python 3.3
print(3/2)
###Output
1.5
###Markdown
If you are getting numerical results that you don't expect, or that don't make sense, check if the version of Python you are using is treating integers differently than you expect. Combining data-types Python is a typed language meaning that every variable has a defined type. You can check its type like this:
###Code
type(4)
type(3.0)
type('asd')
type(False)
###Output
_____no_output_____
###Markdown
If you want to combine types, you need to convert them appopriately. Sometimes the conversions happen behind the scenes:
###Code
a=3
b='s'
print a,b
###Output
3 s
###Markdown
Other times they don't':
###Code
a=3
b='s'
print a+b
###Output
_____no_output_____
###Markdown
Some of Python's operators are "overloaded" meaning that what they do depends on the type of the variables the operator is working with:
###Code
a=3
b=5
print a*b
a='q'
b=3
print a*b
a='q'
b='m'
print a*b
a='q'
b='m'
print a+b
###Output
qm
###Markdown
The '+' operator is overloaded: when given integers or floats, it does normal addition (that's what you would expect). Adding a string and an integer throws a `TypeError` because it doesn't make sense... how would you add a number to a string? *Multiplying* a string by an integer *does* have a sensible and unambiguous interpretation (to programmers anyway): just repeat the string that number of times (note that multiplying a string by a will *not* work. Finally, multiplying two strings isn't defined (what's 's' times 'q'??). But *adding* two strings does have an unambiguous interpretation: concatenation! Commenting your code===As you begin to write more complicated code, you will have to spend more time thinking about how to code solutions to the problems you want to solve. Once you come up with an idea, you will spend a fair amount of time troubleshooting your code, and revising your overall approach.Comments allow you to write in English, within your program. In Python, any line that starts with a pound () symbol is ignored by the Python interpreter.
###Code
# This line is a comment.
print("This line is not a comment, it is code.")
###Output
This line is not a comment, it is code.
###Markdown
For multi-line comments (e.g., for explaining what a function does), you can use a triple single quote `'''` but this is generally reserved for documenting code rather than writing simple comments. The triple quote can be useful for *temporarily* commenting out a chunk of your code during debugging. ''' for i in range(10): print "blah" ''' In Sublime Text, you can highlight code you want to comment and press ⌘-/ to prefix it with the `` character thereby commenting it out What makes a good comment?---- It is short and to the point, but a complete thought. Most comments should be written in complete sentences.- It explains your thinking, so that when you return to the code later you will understand how you were approaching the problem.- It explains your thinking, so that others who work with your code will understand your overall approach to a problem.- It explains particularly difficult sections of code in detail.When should you write comments?---- When you have to think about code before writing it.- When you are likely to forget later exactly how you were approaching a problem.- When there is more than one way to solve a problem.- When others are unlikely to anticipate your way of thinking about a problem.Writing good comments is one of the clear signs of a good programmer. If you have any real interest in taking programming seriously, start using comments now. You will see them throughout the examples in these notebooks. [top]() Zen of Python===The Python community is incredibly large and diverse. People are using Python in science, in medicine, in robotics, on the internet, and in any other field you can imagine. This diverse group of thinkers has developed a collective mindset about how programs should be written. If you want to understand Python and the community of Python programmers, it is a good idea to learn the ways Python programmers think.A set of guiding principles that is written right into the language:
###Code
import this
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
###Markdown
Variables, Strings, and Numbers===In this section, you will learn to store information in variables. You will learn about two types of data: strings, which are sets of characters, and numerical data types. [Previous: Hello World](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/hello_world.ipynb) | [Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) | [Next: Lists and Tuples](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/lists_tuples.ipynb) Contents---- [Variables](Variables) - [Example](Example) - [Naming rules](Naming-rules) - [NameError](NameError) - [Exercises](Exercises-variables)- [Strings](Strings) - [Single and double quotes](Single-and-double-quotes) - [Changing case](Changing-case) - [Combining strings (concatenation)](Combining-strings-(concatenation)) - [Whitespace](Whitespace) - [Exercises](Exercises-strings)- [Numbers](Numbers) - [Integer operations](Integer-operations) - [Floating-point numbers](Floating-point-numbers) - [Integers in Python 2.7](Integers-in-Python-2.7) - [Exercises](Exercises-numbers) - [Challenges](Challenges-numbers)- [Comments](Comments) - [What makes a good comment?](What-makes-a-good-comment?) - [When should you write comments?](When-should-you-write-comments?) - [Exercises](Exercises-comments)- [Zen of Python](Zen-of-Python)- [Overall Challenges](Overall-Challenges) Variables===A variable holds a value. Example---
###Code
message = "Hello Python world!"
print(message)
###Output
Hello Python world!
###Markdown
A variable holds a value. You can change the value of a variable at any point.
###Code
###highlight=[5,6]
message = "Hello Python world!"
print(message)
message = "Python is my favorite language!"
print(message)
###Output
Hello Python world!
Python is my favorite language!
###Markdown
Naming rules---- Variables can only contain letters, numbers, and underscores. Variable names can start with a letter or an underscore, but can not start with a number.- Spaces are not allowed in variable names, so we use underscores instead of spaces. For example, use student_name instead of "student name".- You cannot use [Python keywords](http://docs.python.org/3/reference/lexical_analysis.htmlkeywords) as variable names.- Variable names should be descriptive, without being too long. For example mc_wheels is better than just "wheels", and number_of_wheels_on_a_motorycle.- Be careful about using the lowercase letter l and the uppercase letter O in places where they could be confused with the numbers 1 and 0. NameError---There is one common error when using variables, that you will almost certainly encounter at some point. Take a look at this code, and see if you can figure out why it causes an error.
###Code
message = "Thank you for sharing Python with the world, Guido!"
print(mesage)
###Output
_____no_output_____
###Markdown
Let's look through this error message. First, we see it is a NameError. Then we see the file that caused the error, and a green arrow shows us what line in that file caused the error. Then we get some more specific feedback, that "name 'mesage' is not defined".You may have already spotted the source of the error. We spelled message two different ways. Python does not care whether we use the variable name "message" or "mesage". Python only cares that the spellings of our variable names match every time we use them.This is pretty important, because it allows us to have a variable "name" with a single name in it, and then another variable "names" with a bunch of names in it.We can fix NameErrors by making sure all of our variable names are spelled consistently.
###Code
###highlight=[3]
message = "Thank you for sharing Python with the world, Guido!"
print(message)
###Output
Thank you for sharing Python with the world, Guido!
###Markdown
In case you didn't know [Guido](http://en.wikipedia.org/wiki/Guido_van_Rossum) [van Rossum](http://www.python.org/~guido/) created the Python language over 20 years ago, and he is considered Python's [Benevolent Dictator for Life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life). Guido still signs off on all major changes to the core Python language. Exercises--- Hello World - variable- Store your own version of the message "Hello World" in a variable, and print it. One Variable, Two Messages:- Store a message in a variable, and then print that message.- Store a new message in the same variable, and then print that new message. [top]() Strings===Strings are sets of characters. Strings are easier to understand by looking at some examples. Single and double quotes---Strings are contained by either single or double quotes.
###Code
my_string = "This is a double-quoted string."
my_string = 'This is a single-quoted string.'
###Output
_____no_output_____
###Markdown
This lets us make strings that contain quotations.
###Code
quote = "Linus Torvalds once said, 'Any program is only as good as it is useful.'"
###Output
_____no_output_____
###Markdown
Changing case---You can easily change the case of a string, to present it the way you want it to look.
###Code
first_name = 'eric'
print(first_name)
print(first_name.title())
###Output
eric
Eric
###Markdown
It is often good to store data in lower case, and then change the case as you want to for presentation. This catches some TYpos. It also makes sure that 'eric', 'Eric', and 'ERIC' are not considered three different people.Some of the most common cases are lower, title, and upper.
###Code
###highlight=[6,8,9]
first_name = 'eric'
print(first_name)
print(first_name.title())
print(first_name.upper())
first_name = 'Eric'
print(first_name.lower())
###Output
eric
Eric
ERIC
eric
###Markdown
You will see this syntax quite often, where a variable name is followed by a dot and then the name of an action, followed by a set of parentheses. The parentheses may be empty, or they may contain some values.variable_name.action()In this example, the word "action" is the name of a method. A method is something that can be done to a variable. The methods 'lower', 'title', and 'upper' are all functions that have been written into the Python language, which do something to strings. Later on, you will learn to write your own methods. Combining strings (concatenation)---It is often very useful to be able to combine strings into a message or page element that we want to display. Again, this is easier to understand through an example.
###Code
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
print(full_name.title())
###Output
Ada Lovelace
###Markdown
The plus sign combines two strings into one, which is called "concatenation". You can use as many plus signs as you want in composing messages. In fact, many web pages are written as giant strings which are put together through a long series of string concatenations.
###Code
###highlight=[6,7,8]
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
message = full_name.title() + ' ' + "was considered the world's first computer programmer."
print(message)
###Output
Ada Lovelace was considered the world's first computer programmer.
###Markdown
If you don't know who Ada Lovelace is, you might want to go read what [Wikipedia](http://en.wikipedia.org/wiki/Ada_Lovelace) or the [Computer History Museum](http://www.computerhistory.org/babbage/adalovelace/) have to say about her. Her life and her work are also the inspiration for the [Ada Initiative](http://adainitiative.org/faq/about-ada-lovelace/), which supports women who are involved in technical fields. Whitespace---The term "whitespace" refers to characters that the computer is aware of, but are invisible to readers. The most common whitespace characters are spaces, tabs, and newlines.Spaces are easy to create, because you have been using them as long as you have been using computers. Tabs and newlines are represented by special character combinations.The two-character combination "\t" makes a tab appear in a string. Tabs can be used anywhere you like in a string.
###Code
print("Hello everyone!")
print("\tHello everyone!")
print("Hello \teveryone!")
###Output
Hello everyone!
###Markdown
The combination "\n" makes a newline appear in a string. You can use newlines anywhere you like in a string.
###Code
print("Hello everyone!")
print("\nHello everyone!")
print("Hello \neveryone!")
print("\n\n\nHello everyone!")
###Output
Hello everyone!
###Markdown
Stripping whitespaceMany times you will allow users to enter text into a box, and then you will read that text and use it. It is really easy for people to include extra whitespace at the beginning or end of their text. Whitespace includes spaces, tabs, and newlines.It is often a good idea to strip this whitespace from strings before you start working with them. For example, you might want to let people log in, and you probably want to treat 'eric ' as 'eric' when you are trying to see if I exist on your system.You can strip whitespace from the left side, the right side, or both sides of a string.
###Code
name = ' eric '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
###Output
eric
eric
eric
###Markdown
It's hard to see exactly what is happening, so maybe the following will make it a little more clear:
###Code
name = ' eric '
print('-' + name.lstrip() + '-')
print('-' + name.rstrip() + '-')
print('-' + name.strip() + '-')
###Output
-eric -
- eric-
-eric-
###Markdown
Exercises--- Someone Said- Find a quote that you like. Store the quote in a variable, with an appropriate introduction such as "Ken Thompson once said, 'One of my most productive days was throwing away 1000 lines of code'". Print the quote. First Name Cases- Store your first name, in lowercase, in a variable.- Using that one variable, print your name in lowercase, Titlecase, and UPPERCASE. Full Name- Store your first name and last name in separate variables, and then combine them to print out your full name. About This Person- Choose a person you look up to. Store their first and last names in separate variables.- Use concatenation to make a sentence about this person, and store that sentence in a variable.-- Print the sentence. Name Strip- Store your first name in a variable, but include at least two kinds of whitespace on each side of your name.- Print your name as it is stored.- Print your name with whitespace stripped from the left side, then from the right side, then from both sides. [top]() Numbers===Dealing with simple numerical data is fairly straightforward in Python, but there are a few things you should know about. Integers---You can do all of the basic operations with integers, and everything should behave as you expect. Addition and subtraction use the standard plus and minus symbols. Multiplication uses the asterisk, and division uses a forward slash. Exponents use two asterisks.
###Code
print(3+2)
print(3-2)
print(3*2)
print(3/2)
print(3**2)
###Output
9
###Markdown
You can use parenthesis to modify the standard order of operations.
###Code
standard_order = 2+3*4
print(standard_order)
my_order = (2+3)*4
print(my_order)
###Output
20
###Markdown
Floating-Point numbers---Floating-point numbers refer to any number with a decimal point. Most of the time, you can think of floating point numbers as decimals, and they will behave as you expect them to.
###Code
print(0.1+0.1)
###Output
0.2
###Markdown
However, sometimes you will get an answer with an unexpectly long decimal part:
###Code
print(0.1+0.2)
###Output
0.30000000000000004
###Markdown
This happens because of the way computers represent numbers internally; this has nothing to do with Python itself. Basically, we are used to working in powers of ten, where one tenth plus two tenths is just three tenths. But computers work in powers of two. So your computer has to represent 0.1 in a power of two, and then 0.2 as a power of two, and express their sum as a power of two. There is no exact representation for 0.3 in powers of two, and we see that in the answer to 0.1+0.2.Python tries to hide this kind of stuff when possible. Don't worry about it much for now; just don't be surprised by it, and know that we will learn to clean up our results a little later on.You can also get the same kind of result with other operations.
###Code
print(3*0.1)
###Output
0.30000000000000004
###Markdown
Integers in Python 2.7---There are a couple differences in the way Python 2 and Python 3 handle numbers. In Python 2, dividing two integers always results in an integer, while Python 3 always returns a float. This is fine when the result of your integer division is an integer, but it leads to quite different results when the answer is a decimal. Division in Python 2.7
###Code
# Python 2.7
print 4/2
# Python 2.7
print 3/2
###Output
1
###Markdown
Division in Python 3.3
###Code
# Python 3.3
print(4/2)
# Python 3.3
print(3/2)
###Output
1.5
###Markdown
If you are getting numerical results that you don't expect, or that don't make sense, check if the version of Python you are using is treating integers differently than you expect. Exercises--- Arithmetic- Write a program that prints out the results of at least one calculation for each of the basic operations: addition, subtraction, multiplication, division, and exponents. Order of Operations- Find a calculation whose result depends on the order of operations.- Print the result of this calculation using the standard order of operations.- Use parentheses to force a nonstandard order of operations. Print the result of this calculation. Long Decimals- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find at least one other calculation that results in a long decimal like this. Python 2 or Python 3?- Use integer division to show whether your Python interpeter is using Python 2 or Python 3. Challenges--- Neat Arithmetic- Store the results of at least 5 different calculations in separate variables. Make sure you use each operation at least once.- Print a series of informative statements, such as "The result of the calculation 5+7 is 12." Neat Order of Operations- Take your work for "Order of Operations" above.- Instead of just printing the results, print an informative summary of the results. Show each calculation that is being done and the result of that calculation. Explain how you modified the result using parentheses. Long Decimals - Pattern- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find a number of other calculations that result in a long decimal like this. Try to find a pattern in what kinds of numbers will result in long decimals. Python 2 and Python 3- Find a way to make your computer interpret 3/2 once in Python 2, and once in Python 3.- (HINT) Don't spend too much time on this, unless you like reading on forums about installing from packages and what not. If you just want to play around with Python 2 and Python 3, you can easily do so on [pythontutor.com](http://pythontutor.com/). Click "Start using Online Python Tutor now", and then delete the sample code in the text box so you can enter your own code. On that page, there is a drop-down list just below the text box that lets you select different versions of Python. Click "Visualize Execution" to run your code. On the next page, you can either click "Forward" to step through your code one line at a time, or click "Last" to run your entire program. [top]() Comments===As you begin to write more complicated code, you will have to spend more time thinking about how to code solutions to the problems you want to solve. Once you come up with an idea, you will spend a fair amount of time troubleshooting your code, and revising your overall approach.Comments allow you to write in English, within your program. In Python, any line that starts with a pound () symbol is ignored by the Python interpreter.
###Code
# This line is a comment.
print("This line is not a comment, it is code.")
###Output
This line is not a comment, it is code.
###Markdown
What makes a good comment?---- It is short and to the point, but a complete thought. Most comments should be written in complete sentences.- It explains your thinking, so that when you return to the code later you will understand how you were approaching the problem.- It explains your thinking, so that others who work with your code will understand your overall approach to a problem.- It explains particularly difficult sections of code in detail.When should you write comments?---- When you have to think about code before writing it.- When you are likely to forget later exactly how you were approaching a problem.- When there is more than one way to solve a problem.- When others are unlikely to anticipate your way of thinking about a problem.Writing good comments is one of the clear signs of a good programmer. If you have any real interest in taking programming seriously, start using comments now. You will see them throughout the examples in these notebooks. Exercises--- First Comments- Choose the longest, most difficult, or most interesting program you have written so far. Write at least one comment in your program. [top]() Zen of Python===The Python community is incredibly large and diverse. People are using Python in science, in medicine, in robotics, on the internet, and in any other field you can imagine. This diverse group of thinkers has developed a collective mindset about how programs should be written. If you want to understand Python and the community of Python programmers, it is a good idea to learn the ways Python programmers think.You can easily see a set of guiding principles that is written right into the language:
###Code
import this
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
###Markdown
There is a lot here. Let's just take a few lines, and see what they mean for you as a new programmer. Beautiful is better than ugly.Python programmers recognize that good code can actually be beautiful. If you come up with a particularly elegant or efficient way to solve a problem, especially a difficult problem, other Python programmers will respect your work and may even call it beautiful. There is beauty in high-level technical work. Explicit is better than implicit.It is better to be clear about what you are doing, than come up with some shorter way to do something that is difficult to understand. Simple is better than complex. Complex is better than complicated.Keep your code simple whenever possible, but recognize that we sometimes take on really difficult problems for which there are no easy solutions. In those cases, accept the complexity but avoid complication. Readability counts.There are very few interesting and useful programs these days that are written and maintained entirely by one person. Write your code in a way that others can read it as easily as possible, and in a way that you will be able to read and understand it 6 months from now. This includes writing good comments in your code. There should be one-- and preferably only one --obvious way to do it.There are many ways to solve most problems that come up in programming. However, most problems have a standard, well-established approach. Save complexity for when it is needed, and solve problems in the most straightforward way possible. Now is better than never.No one ever writes perfect code. If you have an idea you want to implement it, write some code that works. Release it, let it be used by others, and then steadily improve it. [top]() Overall Challenges===We have learned quite a bit so far about programming, but we haven't learned enough yet for you to go create something. In the next notebook, things will get much more interesting, and there will be a longer list of overall challenges. What I've Learned- Write a program that uses everything you have learned in this notebook at least once.- Write comments that label each section of your program.- For each thing your program does, write at least one line of output that explains what your program did.- For example, you might have one line that stores your name with some whitespace in a variable, and a second line that strips that whitespace from your name:
###Code
# I learned how to strip whitespace from strings.
name = '\t\teric'
print("I can strip tabs from my name: " + name.strip())
###Output
I can strip tabs from my name: eric
###Markdown
Variables, Strings, and Numbers===In this section, you will learn to store information in variables. You will learn about two types of data: strings, which are sets of characters, and numerical data types. [Previous: Hello World](hello_world.ipynb) | [Home](index.ipynb) | [Next: Lists and Tuples](lists_tuples.ipynb) Contents---- [Variables](Variables) - [Example](Example) - [Naming rules](Naming-rules) - [NameError](NameError) - [Exercises](Exercises-variables)- [Strings](Strings) - [Single and double quotes](Single-and-double-quotes) - [Changing case](Changing-case) - [Combining strings (concatenation)](Combining-strings-(concatenation)) - [Whitespace](Whitespace) - [Exercises](Exercises-strings)- [Numbers](Numbers) - [Integer operations](Integer-operations) - [Floating-point numbers](Floating-point-numbers) - [Integers in Python 2.7](Integers-in-Python-2.7) - [Exercises](Exercises-numbers) - [Challenges](Challenges-numbers)- [Comments](Comments) - [What makes a good comment?](What-makes-a-good-comment?) - [When should you write comments?](When-should-you-write-comments?) - [Exercises](Exercises-comments)- [Zen of Python](Zen-of-Python)- [Overall Challenges](Overall-Challenges) Variables===A variable holds a value. Example---
###Code
message = "Hello Python world!"
print(message)
###Output
Hello Python world!
###Markdown
A variable holds a value. You can change the value of a variable at any point.
###Code
###highlight=[5,6]
message = "Hello Python world!"
print(message)
message = "Python is my favorite language!"
print(message)
###Output
Hello Python world!
Python is my favorite language!
###Markdown
Naming rules---- Variables can only contain letters, numbers, and underscores. Variable names can start with a letter or an underscore, but can not start with a number.- Spaces are not allowed in variable names, so we use underscores instead of spaces. For example, use student_name instead of "student name".- You cannot use [Python keywords](http://docs.python.org/3/reference/lexical_analysis.htmlkeywords) as variable names.- Variable names should be descriptive, without being too long. For example mc_wheels is better than just "wheels", and number_of_wheels_on_a_motorycle.- Be careful about using the lowercase letter l and the uppercase letter O in places where they could be confused with the numbers 1 and 0. NameError---There is one common error when using variables, that you will almost certainly encounter at some point. Take a look at this code, and see if you can figure out why it causes an error.
###Code
message = "Thank you for sharing Python with the world, Guido!"
print(mesage)
###Output
_____no_output_____
###Markdown
Let's look through this error message. First, we see it is a NameError. Then we see the file that caused the error, and a green arrow shows us what line in that file caused the error. Then we get some more specific feedback, that "name 'mesage' is not defined".You may have already spotted the source of the error. We spelled message two different ways. Python does not care whether we use the variable name "message" or "mesage". Python only cares that the spellings of our variable names match every time we use them.This is pretty important, because it allows us to have a variable "name" with a single name in it, and then another variable "names" with a bunch of names in it.We can fix NameErrors by making sure all of our variable names are spelled consistently.
###Code
###highlight=[3]
message = "Thank you for sharing Python with the world, Guido!"
print(message)
###Output
Thank you for sharing Python with the world, Guido!
###Markdown
In case you didn't know [Guido](http://en.wikipedia.org/wiki/Guido_van_Rossum) [van Rossum](http://www.python.org/~guido/) created the Python language over 20 years ago, and he is considered Python's [Benevolent Dictator for Life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life). Guido still signs off on all major changes to the core Python language. Exercises--- Hello World - variable- Store your own version of the message "Hello World" in a variable, and print it. One Variable, Two Messages:- Store a message in a variable, and then print that message.- Store a new message in the same variable, and then print that new message. [top]() Strings===Strings are sets of characters. Strings are easier to understand by looking at some examples. Single and double quotes---Strings are contained by either single or double quotes.
###Code
my_string = "This is a double-quoted string."
my_string = 'This is a single-quoted string.'
###Output
_____no_output_____
###Markdown
This lets us make strings that contain quotations.
###Code
quote = "Linus Torvalds once said, 'Any program is only as good as it is useful.'"
###Output
_____no_output_____
###Markdown
Changing case---You can easily change the case of a string, to present it the way you want it to look.
###Code
first_name = 'eric'
print(first_name)
print(first_name.title())
###Output
eric
Eric
###Markdown
It is often good to store data in lower case, and then change the case as you want to for presentation. This catches some TYpos. It also makes sure that 'eric', 'Eric', and 'ERIC' are not considered three different people.Some of the most common cases are lower, title, and upper.
###Code
###highlight=[6,8,9]
first_name = 'eric'
print(first_name)
print(first_name.title())
print(first_name.upper())
first_name = 'Eric'
print(first_name.lower())
###Output
eric
Eric
ERIC
eric
###Markdown
You will see this syntax quite often, where a variable name is followed by a dot and then the name of an action, followed by a set of parentheses. The parentheses may be empty, or they may contain some values.variable_name.action()In this example, the word "action" is the name of a method. A method is something that can be done to a variable. The methods 'lower', 'title', and 'upper' are all functions that have been written into the Python language, which do something to strings. Later on, you will learn to write your own methods. Combining strings (concatenation)---It is often very useful to be able to combine strings into a message or page element that we want to display. Again, this is easier to understand through an example.
###Code
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
print(full_name.title())
###Output
Ada Lovelace
###Markdown
The plus sign combines two strings into one, which is called "concatenation". You can use as many plus signs as you want in composing messages. In fact, many web pages are written as giant strings which are put together through a long series of string concatenations.
###Code
###highlight=[6,7,8]
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
message = full_name.title() + ' ' + "was considered the world's first computer programmer."
print(message)
###Output
Ada Lovelace was considered the world's first computer programmer.
###Markdown
If you don't know who Ada Lovelace is, you might want to go read what [Wikipedia](http://en.wikipedia.org/wiki/Ada_Lovelace) or the [Computer History Museum](http://www.computerhistory.org/babbage/adalovelace/) have to say about her. Her life and her work are also the inspiration for the [Ada Initiative](http://adainitiative.org/faq/about-ada-lovelace/), which supports women who are involved in technical fields. Whitespace---The term "whitespace" refers to characters that the computer is aware of, but are invisible to readers. The most common whitespace characters are spaces, tabs, and newlines.Spaces are easy to create, because you have been using them as long as you have been using computers. Tabs and newlines are represented by special character combinations.The two-character combination "\t" makes a tab appear in a string. Tabs can be used anywhere you like in a string.
###Code
print("Hello everyone!")
print("\tHello everyone!")
print("Hello \teveryone!")
###Output
Hello everyone!
###Markdown
The combination "\n" makes a newline appear in a string. You can use newlines anywhere you like in a string.
###Code
print("Hello everyone!")
print("\nHello everyone!")
print("Hello \neveryone!")
print("\n\n\nHello everyone!")
###Output
Hello everyone!
###Markdown
Stripping whitespaceMany times you will allow users to enter text into a box, and then you will read that text and use it. It is really easy for people to include extra whitespace at the beginning or end of their text. Whitespace includes spaces, tabs, and newlines.It is often a good idea to strip this whitespace from strings before you start working with them. For example, you might want to let people log in, and you probably want to treat 'eric ' as 'eric' when you are trying to see if I exist on your system.You can strip whitespace from the left side, the right side, or both sides of a string.
###Code
name = ' eric '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
###Output
eric
eric
eric
###Markdown
It's hard to see exactly what is happening, so maybe the following will make it a little more clear:
###Code
name = ' eric '
print('-' + name.lstrip() + '-')
print('-' + name.rstrip() + '-')
print('-' + name.strip() + '-')
###Output
-eric -
- eric-
-eric-
###Markdown
Exercises--- Someone Said- Find a quote that you like. Store the quote in a variable, with an appropriate introduction such as "Ken Thompson once said, 'One of my most productive days was throwing away 1000 lines of code'". Print the quote. First Name Cases- Store your first name, in lowercase, in a variable.- Using that one variable, print your name in lowercase, Titlecase, and UPPERCASE. Full Name- Store your first name and last name in separate variables, and then combine them to print out your full name. About This Person- Choose a person you look up to. Store their first and last names in separate variables.- Use concatenation to make a sentence about this person, and store that sentence in a variable.-- Print the sentence. Name Strip- Store your first name in a variable, but include at least two kinds of whitespace on each side of your name.- Print your name as it is stored.- Print your name with whitespace stripped from the left side, then from the right side, then from both sides. [top]() Numbers===Dealing with simple numerical data is fairly straightforward in Python, but there are a few things you should know about. Integers---You can do all of the basic operations with integers, and everything should behave as you expect. Addition and subtraction use the standard plus and minus symbols. Multiplication uses the asterisk, and division uses a forward slash. Exponents use two asterisks.
###Code
print(3+2)
print(3-2)
print(3*2)
print(3/2)
print(3**2)
###Output
9
###Markdown
You can use parenthesis to modify the standard order of operations.
###Code
standard_order = 2+3*4
print(standard_order)
my_order = (2+3)*4
print(my_order)
###Output
20
###Markdown
Floating-Point numbers---Floating-point numbers refer to any number with a decimal point. Most of the time, you can think of floating point numbers as decimals, and they will behave as you expect them to.
###Code
print(0.1+0.1)
###Output
0.2
###Markdown
However, sometimes you will get an answer with an unexpectly long decimal part:
###Code
print(0.1+0.2)
###Output
0.30000000000000004
###Markdown
This happens because of the way computers represent numbers internally; this has nothing to do with Python itself. Basically, we are used to working in powers of ten, where one tenth plus two tenths is just three tenths. But computers work in powers of two. So your computer has to represent 0.1 in a power of two, and then 0.2 as a power of two, and express their sum as a power of two. There is no exact representation for 0.3 in powers of two, and we see that in the answer to 0.1+0.2.Python tries to hide this kind of stuff when possible. Don't worry about it much for now; just don't be surprised by it, and know that we will learn to clean up our results a little later on.You can also get the same kind of result with other operations.
###Code
print(3*0.1)
###Output
0.30000000000000004
###Markdown
Integers in Python 2.7---There are a couple differences in the way Python 2 and Python 3 handle numbers. In Python 2, dividing two integers always results in an integer, while Python 3 always returns a float. This is fine when the result of your integer division is an integer, but it leads to quite different results when the answer is a decimal. Division in Python 2.7
###Code
# Python 2.7
print 4/2
# Python 2.7
print 3/2
###Output
1
###Markdown
Division in Python 3.3
###Code
# Python 3.3
print(4/2)
# Python 3.3
print(3/2)
###Output
1.5
###Markdown
If you are getting numerical results that you don't expect, or that don't make sense, check if the version of Python you are using is treating integers differently than you expect. Exercises--- Arithmetic- Write a program that prints out the results of at least one calculation for each of the basic operations: addition, subtraction, multiplication, division, and exponents. Order of Operations- Find a calculation whose result depends on the order of operations.- Print the result of this calculation using the standard order of operations.- Use parentheses to force a nonstandard order of operations. Print the result of this calculation. Long Decimals- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find at least one other calculation that results in a long decimal like this. Python 2 or Python 3?- Use integer division to show whether your Python interpeter is using Python 2 or Python 3. Challenges--- Neat Arithmetic- Store the results of at least 5 different calculations in separate variables. Make sure you use each operation at least once.- Print a series of informative statements, such as "The result of the calculation 5+7 is 12." Neat Order of Operations- Take your work for "Order of Operations" above.- Instead of just printing the results, print an informative summary of the results. Show each calculation that is being done and the result of that calculation. Explain how you modified the result using parentheses. Long Decimals - Pattern- On paper, 0.1+0.2=0.3. But you have seen that in Python, 0.1+0.2=0.30000000000000004.- Find a number of other calculations that result in a long decimal like this. Try to find a pattern in what kinds of numbers will result in long decimals. Python 2 and Python 3- Find a way to make your computer interpret 3/2 once in Python 2, and once in Python 3.- (HINT) Don't spend too much time on this, unless you like reading on forums about installing from packages and what not. If you just want to play around with Python 2 and Python 3, you can easily do so on [pythontutor.com](http://pythontutor.com/). Click "Start using Online Python Tutor now", and then delete the sample code in the text box so you can enter your own code. On that page, there is a drop-down list just below the text box that lets you select different versions of Python. Click "Visualize Execution" to run your code. On the next page, you can either click "Forward" to step through your code one line at a time, or click "Last" to run your entire program. [top]() Comments===As you begin to write more complicated code, you will have to spend more time thinking about how to code solutions to the problems you want to solve. Once you come up with an idea, you will spend a fair amount of time troubleshooting your code, and revising your overall approach.Comments allow you to write in English, within your program. In Python, any line that starts with a pound () symbol is ignored by the Python interpreter.
###Code
# This line is a comment.
print("This line is not a comment, it is code.")
###Output
This line is not a comment, it is code.
###Markdown
What makes a good comment?---- It is short and to the point, but a complete thought. Most comments should be written in complete sentences.- It explains your thinking, so that when you return to the code later you will understand how you were approaching the problem.- It explains your thinking, so that others who work with your code will understand your overall approach to a problem.- It explains particularly difficult sections of code in detail.When should you write comments?---- When you have to think about code before writing it.- When you are likely to forget later exactly how you were approaching a problem.- When there is more than one way to solve a problem.- When others are unlikely to anticipate your way of thinking about a problem.Writing good comments is one of the clear signs of a good programmer. If you have any real interest in taking programming seriously, start using comments now. You will see them throughout the examples in these notebooks. Exercises--- First Comments- Choose the longest, most difficult, or most interesting program you have written so far. Write at least one comment in your program. [top]() Zen of Python===The Python community is incredibly large and diverse. People are using Python in science, in medicine, in robotics, on the internet, and in any other field you can imagine. This diverse group of thinkers has developed a collective mindset about how programs should be written. If you want to understand Python and the community of Python programmers, it is a good idea to learn the ways Python programmers think.You can easily see a set of guiding principles that is written right into the language:
###Code
import this
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
###Markdown
There is a lot here. Let's just take a few lines, and see what they mean for you as a new programmer. Beautiful is better than ugly.Python programmers recognize that good code can actually be beautiful. If you come up with a particularly elegant or efficient way to solve a problem, especially a difficult problem, other Python programmers will respect your work and may even call it beautiful. There is beauty in high-level technical work. Explicit is better than implicit.It is better to be clear about what you are doing, than come up with some shorter way to do something that is difficult to understand. Simple is better than complex. Complex is better than complicated.Keep your code simple whenever possible, but recognize that we sometimes take on really difficult problems for which there are no easy solutions. In those cases, accept the complexity but avoid complication. Readability counts.There are very few interesting and useful programs these days that are written and maintained entirely by one person. Write your code in a way that others can read it as easily as possible, and in a way that you will be able to read and understand it 6 months from now. This includes writing good comments in your code. There should be one-- and preferably only one --obvious way to do it.There are many ways to solve most problems that come up in programming. However, most problems have a standard, well-established approach. Save complexity for when it is needed, and solve problems in the most straightforward way possible. Now is better than never.No one ever writes perfect code. If you have an idea you want to implement it, write some code that works. Release it, let it be used by others, and then steadily improve it. [top]() Overall Challenges===We have learned quite a bit so far about programming, but we haven't learned enough yet for you to go create something. In the next notebook, things will get much more interesting, and there will be a longer list of overall challenges. What I've Learned- Write a program that uses everything you have learned in this notebook at least once.- Write comments that label each section of your program.- For each thing your program does, write at least one line of output that explains what your program did.- For example, you might have one line that stores your name with some whitespace in a variable, and a second line that strips that whitespace from your name:
###Code
# I learned how to strip whitespace from strings.
name = '\t\teric'
print("I can strip tabs from my name: " + name.strip())
###Output
I can strip tabs from my name: eric
|
tfx/penguin-tfx-simple/.ipynb_checkpoints/vertex_pipelines_simple-checkpoint.ipynb | ###Markdown
Copyright 2021 The TensorFlow Authors. Simple TFX Pipeline for Vertex Pipelines View on TensorFlow.orgRun in Google ColabView source on GitHubDownload notebookRun in Google Cloud AI Platform Notebook This notebook-based tutorial will create a simple TFX pipeline and run it usingGoogle Cloud Vertex Pipelines. This notebook is based on the TFX pipelinewe built in[Simple TFX Pipeline Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple).If you are not familiar with TFX and you have not read that tutorial yet, youshould read it before proceeding with this notebook.Google Cloud Vertex Pipelines helps you to automate, monitor, and governyour ML systems by orchestrating your ML workflow in a serverless manner. Youcan define your ML pipelines using Python with TFX, and then execute yourpipelines on Google Cloud. See[Vertex Pipelines introduction](https://cloud.google.com/vertex-ai/docs/pipelines/introduction)to learn more about Vertex Pipelines. This notebook is intended to be run on[Google Colab](https://colab.research.google.com/notebooks/intro.ipynb) or on[AI Platform Notebooks](https://cloud.google.com/ai-platform-notebooks). If youare not using one of these, you can simply click "Run in Goolge Colab" buttonabove. Set upBefore you run this notebook, ensure that you have following:- A [Google Cloud Platform](http://cloud.google.com/) project.- A [Google Cloud Storage](https://cloud.google.com/storage) bucket. See[the guide for creating buckets](https://cloud.google.com/storage/docs/creating-buckets).- Enable[Vertex AI and Cloud Storage API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,storage-component.googleapis.com).Please see[Vertex documentation](https://cloud.google.com/vertex-ai/docs/pipelines/configure-project)to configure your GCP project further. Install python packages We will install required Python packages including TFX and KFP to author MLpipelines and submit jobs to Vertex Pipelines.
###Code
# Use the latest version of pip.
#!pip install --upgrade pip
!pip install tfx
pip list
###Output
[33mWARNING: Ignoring invalid distribution -oogle-cloud-datastore (/opt/conda/lib/python3.7/site-packages)[0m
Package Version
-------------------------------- -------------------
absl-py 0.11.0
adal 1.2.7
aiohttp 3.7.4
ansiwrap 0.8.4
anyio 2.2.0
appdirs 1.4.4
argon2-cffi 20.1.0
arrow 1.1.0
asn1crypto 1.4.0
astunparse 1.6.3
async-generator 1.10
async-timeout 3.0.1
attrs 20.3.0
avro-python3 1.9.2.1
backcall 0.2.0
backports.functools-lru-cache 1.6.4
binaryornot 0.4.4
black 21.5b0
bleach 3.3.0
blinker 1.4
Bottleneck 1.3.2
brotlipy 0.7.0
cachetools 4.2.2
caip-notebooks-serverextension 1.0.0
certifi 2020.12.5
cffi 1.14.5
chardet 4.0.0
click 7.1.2
cloudpickle 1.6.0
colorama 0.4.4
conda 4.9.2
conda-package-handling 1.7.3
confuse 1.4.0
cookiecutter 1.7.2
crcmod 1.7
cryptography 3.4.7
cycler 0.10.0
decorator 5.0.7
defusedxml 0.7.1
Deprecated 1.2.12
deprecation 2.1.0
dill 0.3.1.1
docker 5.0.0
docker-pycreds 0.4.0
docopt 0.6.2
docstring-parser 0.8.1
entrypoints 0.3
fastavro 1.4.1
fasteners 0.16
fire 0.4.0
flatbuffers 1.12
fsspec 2021.4.0
future 0.18.2
gast 0.3.3
gcsfs 2021.4.0
gitdb 4.0.7
GitPython 3.1.15
google-api-core 1.26.3
google-api-python-client 1.12.8
google-auth 1.30.0
google-auth-httplib2 0.1.0
google-auth-oauthlib 0.4.4
google-cloud-aiplatform 1.0.1
google-cloud-bigquery 2.16.0
google-cloud-bigquery-storage 2.4.0
google-cloud-bigtable 2.2.0
google-cloud-core 1.6.0
google-cloud-dataproc 2.3.1
google-cloud-datastore 1.15.3
google-cloud-dlp 1.0.0
google-cloud-firestore 2.1.1
google-cloud-kms 2.2.0
google-cloud-language 1.3.0
google-cloud-logging 2.3.1
google-cloud-monitoring 2.2.1
google-cloud-pipeline-components 0.1.1
google-cloud-pubsub 1.7.0
google-cloud-scheduler 2.2.0
google-cloud-spanner 1.19.1
google-cloud-speech 2.3.0
google-cloud-storage 1.38.0
google-cloud-tasks 2.2.0
google-cloud-translate 3.1.0
google-cloud-videointelligence 1.16.1
google-cloud-vision 1.0.0
google-crc32c 1.1.2
google-pasta 0.2.0
google-resumable-media 1.2.0
googleapis-common-protos 1.53.0
greenlet 1.1.0
grpc-google-iam-v1 0.12.3
grpcio 1.32.0
grpcio-gcp 0.2.2
h5py 2.10.0
hdfs 2.6.0
htmlmin 0.1.12
httplib2 0.17.4
idna 2.10
ImageHash 4.2.0
importlib-metadata 4.0.1
ipykernel 5.5.4
ipython 7.23.1
ipython-genutils 0.2.0
ipython-sql 0.3.9
ipywidgets 7.6.3
jedi 0.18.0
Jinja2 2.11.3
jinja2-time 0.2.0
joblib 1.0.1
json5 0.9.5
jsonschema 3.2.0
jupyter-client 6.1.12
jupyter-core 4.7.1
jupyter-http-over-ws 0.0.8
jupyter-packaging 0.10.1
jupyter-server 1.6.4
jupyter-server-mathjax 0.2.2
jupyterlab 1.2.16
jupyterlab-executor 0.9.3
jupyterlab-git 0.11.0
jupyterlab-pygments 0.1.2
jupyterlab-server 1.2.0
jupyterlab-widgets 1.0.0
Keras-Preprocessing 1.1.2
kfp 1.6.2
kfp-pipeline-spec 0.1.7
kfp-server-api 1.6.0
kiwisolver 1.3.1
kubernetes 12.0.1
libcst 0.3.18
llvmlite 0.36.0
Markdown 3.3.4
MarkupSafe 1.1.1
matplotlib 3.4.2
matplotlib-inline 0.1.2
missingno 0.4.2
mistune 0.8.4
multidict 5.1.0
multimethod 1.4
mypy-extensions 0.4.3
nb-conda 2.2.1
nb-conda-kernels 2.3.1
nbclient 0.5.3
nbconvert 6.0.7
nbdime 3.0.0
nbformat 5.1.3
nest-asyncio 1.5.1
networkx 2.5
notebook 6.3.0
notebook-executor 0.2
numba 0.53.1
numpy 1.19.5
oauth2client 4.1.3
oauthlib 3.0.1
olefile 0.46
opt-einsum 3.3.0
packaging 20.9
pandas 1.2.4
pandas-profiling 3.0.0
pandocfilters 1.4.2
papermill 2.3.3
parso 0.8.2
pathspec 0.8.1
patsy 0.5.1
pexpect 4.8.0
phik 0.11.2
pickleshare 0.7.5
Pillow 8.1.2
pip 21.1.2
poyo 0.5.0
prettytable 2.1.0
prometheus-client 0.10.1
prompt-toolkit 3.0.18
proto-plus 1.18.1
protobuf 3.16.0
psutil 5.8.0
ptyprocess 0.7.0
pyarrow 2.0.0
pyasn1 0.4.8
pyasn1-modules 0.2.7
pycosat 0.6.3
pycparser 2.20
pydantic 1.8.1
pydot 1.4.2
Pygments 2.9.0
PyJWT 2.1.0
pymongo 3.11.4
pyOpenSSL 20.0.1
pyparsing 2.4.7
pyrsistent 0.17.3
PySocks 1.7.1
python-dateutil 2.8.1
python-slugify 5.0.2
pytz 2021.1
PyWavelets 1.1.1
PyYAML 5.4.1
pyzmq 22.0.3
regex 2021.4.4
requests 2.25.1
requests-oauthlib 1.3.0
requests-toolbelt 0.9.1
retrying 1.3.3
rsa 4.7.2
ruamel-yaml-conda 0.15.80
scikit-learn 0.24.2
scipy 1.6.3
seaborn 0.11.1
Send2Trash 1.5.0
setuptools 49.6.0.post20210108
simplejson 3.17.2
six 1.15.0
smmap 3.0.5
sniffio 1.2.0
SQLAlchemy 1.4.15
sqlparse 0.4.1
statsmodels 0.12.2
strip-hints 0.1.9
tabulate 0.8.9
tangled-up-in-unicode 0.1.0
tenacity 7.0.0
tensorboard 2.5.0
tensorboard-data-server 0.6.1
tensorboard-plugin-wit 1.8.0
tensorflow 2.4.1
tensorflow-estimator 2.4.0
termcolor 1.1.0
terminado 0.9.4
testpath 0.4.4
text-unidecode 1.3
textwrap3 0.9.2
threadpoolctl 2.1.0
toml 0.10.2
tomlkit 0.7.0
tornado 6.1
tqdm 4.60.0
traitlets 5.0.5
typed-ast 1.4.3
typing-extensions 3.7.4.3
typing-inspect 0.6.0
ujson 4.0.2
Unidecode 1.2.0
uritemplate 3.0.1
urllib3 1.26.4
visions 0.7.1
wcwidth 0.2.5
webencodings 0.5.1
websocket-client 0.57.0
Werkzeug 2.0.1
wheel 0.36.2
whichcraft 0.6.1
widgetsnbextension 3.5.1
wrapt 1.12.1
yarl 1.6.3
zipp 3.4.1
[33mWARNING: Ignoring invalid distribution -oogle-cloud-datastore (/opt/conda/lib/python3.7/site-packages)[0m
[33mWARNING: Ignoring invalid distribution -oogle-cloud-datastore (/opt/conda/lib/python3.7/site-packages)[0m
Note: you may need to restart the kernel to use updated packages.
###Markdown
Did you restart the runtime?If you are using Google Colab, the first time that you runthe cell above, you must restart the runtime by clickingabove "RESTART RUNTIME" button or using "Runtime > Restartruntime ..." menu. This is because of the way that Colabloads packages. If you are not on Colab, you can restart runtime with following cell. Login in to Google for this notebookIf you are running this notebook on Colab, authenticate with your user account: **If you are on AI Platform Notebooks**, authenticate with Google Cloud beforerunning the next section, by running```shgcloud auth login```**in the Terminal window** (which you can open via **File** > **New** in themenu). You only need to do this once per notebook instance. Check the package versions.
###Code
import tensorflow as tf
print('TensorFlow version: {}'.format(tf.__version__))
from tfx import v1 as tfx
print('TFX version: {}'.format(tfx.__version__))
import kfp
print('KFP version: {}'.format(kfp.__version__))
###Output
TensorFlow version: 2.4.1
TFX version: 0.30.0
KFP version: 1.6.2
###Markdown
Set up variablesWe will set up some variables used to customize the pipelines below. Followinginformation is required:* GCP Project id. See[Identifying your project id](https://cloud.google.com/resource-manager/docs/creating-managing-projectsidentifying_projects).* GCP Region to run pipelines. For more information about the regions thatVertex Pipelines is available in, see the[Vertex AI locations guide](https://cloud.google.com/vertex-ai/docs/general/locationsfeature-availability).* Google Cloud Storage Bucket to store pipeline outputs.**Enter required values in the cell below before running it**.
###Code
GOOGLE_CLOUD_PROJECT = "feature-store-mars21" # <--- ENTER THIS
GOOGLE_CLOUD_REGION = "us-central1" # <--- ENTER THIS
GCS_BUCKET_NAME = "feature-store-mars21" # <--- ENTER THIS
if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME):
from absl import logging
logging.error('Please set all required parameters.')
###Output
_____no_output_____
###Markdown
Set `gcloud` to use your project.
###Code
PIPELINE_NAME = 'penguin-vertex-pipelines'
# Path to various pipeline artifact.
PIPELINE_ROOT = 'gs://{}/pipeline_root/{}'.format(
GCS_BUCKET_NAME, PIPELINE_NAME)
# Paths for users' Python module.
MODULE_ROOT = 'gs://{}/pipeline_root/{}/module'.format(
GCS_BUCKET_NAME, PIPELINE_NAME)
# Paths for input data.
DATA_ROOT = 'gs://{}/data/{}'.format(GCS_BUCKET_NAME, PIPELINE_NAME)
# This is the path where your model will be pushed for serving.
SERVING_MODEL_DIR = 'gs://{}/pipeline_root/{}/serving_model'.format(
GCS_BUCKET_NAME, PIPELINE_NAME)
print('PIPELINE_ROOT: {}'.format(PIPELINE_ROOT))
###Output
PIPELINE_ROOT: gs://feature-store-mars21/pipeline_root/penguin-vertex-pipelines
###Markdown
Prepare example dataWe will use the same[Palmer Penguins dataset](https://allisonhorst.github.io/palmerpenguins/articles/intro.html)as[Simple TFX Pipeline Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple).There are four numeric features in this dataset which were already normalizedto have range [0,1]. We will build a classification model which predicts the`species` of penguins. We need to make our own copy of the dataset. Because TFX ExampleGen readsinputs from a directory, we need to create a directory and copy dataset to iton GCS.
###Code
!gsutil cp gs://download.tensorflow.org/data/palmer_penguins/penguins_processed.csv {DATA_ROOT}/
###Output
Copying gs://download.tensorflow.org/data/palmer_penguins/penguins_processed.csv [Content-Type=application/octet-stream]...
/ [1 files][ 25.0 KiB/ 25.0 KiB]
Operation completed over 1 objects/25.0 KiB.
###Markdown
Take a quick look at the CSV file.
###Code
!gsutil cat {DATA_ROOT}/penguins_processed.csv | head
###Output
species,culmen_length_mm,culmen_depth_mm,flipper_length_mm,body_mass_g
0,0.2545454545454545,0.6666666666666666,0.15254237288135594,0.2916666666666667
0,0.26909090909090905,0.5119047619047618,0.23728813559322035,0.3055555555555556
0,0.29818181818181805,0.5833333333333334,0.3898305084745763,0.1527777777777778
0,0.16727272727272732,0.7380952380952381,0.3559322033898305,0.20833333333333334
0,0.26181818181818167,0.892857142857143,0.3050847457627119,0.2638888888888889
0,0.24727272727272717,0.5595238095238096,0.15254237288135594,0.2569444444444444
0,0.25818181818181823,0.773809523809524,0.3898305084745763,0.5486111111111112
0,0.32727272727272727,0.5357142857142859,0.1694915254237288,0.1388888888888889
0,0.23636363636363636,0.9642857142857142,0.3220338983050847,0.3055555555555556
###Markdown
Create a pipelineTFX pipelines are defined using Python APIs. We will define a pipeline whichconsists of three components, CsvExampleGen, Trainer and Pusher. The pipelineand model definition is almost the same as[Simple TFX Pipeline Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple).The only difference is that we don't need to set `metadata_connection_config`which is used to locate[ML Metadata](https://www.tensorflow.org/tfx/guide/mlmd) database. BecauseVertex Pipelines uses a managed metadata service, users don't need to careof it, and we don't need to specify the parameter.Before actually define the pipeline, we need to write a model code for theTrainer component first. Write model code.We will use the same model code as in the[Simple TFX Pipeline Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple).
###Code
_trainer_module_file = 'penguin_trainer.py'
%%writefile {_trainer_module_file}
# Copied from https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple
from typing import List
from absl import logging
import tensorflow as tf
from tensorflow import keras
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
from tensorflow_metadata.proto.v0 import schema_pb2
_FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
# Since we're not generating or creating a schema, we will instead create
# a feature spec. Since there are a fairly small number of features this is
# manageable for this dataset.
_FEATURE_SPEC = {
**{
feature: tf.io.FixedLenFeature(shape=[1], dtype=tf.float32)
for feature in _FEATURE_KEYS
},
_LABEL_KEY: tf.io.FixedLenFeature(shape=[1], dtype=tf.int64)
}
def _input_fn(file_pattern: List[str],
data_accessor: tfx.components.DataAccessor,
schema: schema_pb2.Schema,
batch_size: int) -> tf.data.Dataset:
"""Generates features and label for training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: schema of the input data.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_LABEL_KEY),
schema=schema).repeat()
def _make_keras_model() -> tf.keras.Model:
"""Creates a DNN Keras model for classifying penguin data.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [keras.layers.Input(shape=(1,), name=f) for f in _FEATURE_KEYS]
d = keras.layers.concatenate(inputs)
for _ in range(2):
d = keras.layers.Dense(8, activation='relu')(d)
outputs = keras.layers.Dense(3)(d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(1e-2),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# This schema is usually either an output of SchemaGen or a manually-curated
# version provided by pipeline author. A schema can also derived from TFT
# graph if a Transform component is used. In the case when either is missing,
# `schema_from_feature_spec` could be used to generate schema from very simple
# feature_spec, but the schema returned would be very primitive.
schema = schema_utils.schema_from_feature_spec(_FEATURE_SPEC)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
schema,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
schema,
batch_size=_EVAL_BATCH_SIZE)
model = _make_keras_model()
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
# The result of the training should be saved in `fn_args.serving_model_dir`
# directory.
model.save(fn_args.serving_model_dir, save_format='tf')
###Output
Overwriting penguin_trainer.py
###Markdown
Copy the module file to GCS which can be accessed from the pipeline components.Because model training happens on GCP, we need to upload this model definition. Otherwise, you might want to build a container image including the module fileand use the image to run the pipeline.
###Code
!gsutil cp {_trainer_module_file} {MODULE_ROOT}/
###Output
Copying file://penguin_trainer.py [Content-Type=text/x-python]...
/ [1 files][ 3.8 KiB/ 3.8 KiB]
Operation completed over 1 objects/3.8 KiB.
###Markdown
Write a pipeline definitionWe will define a function to create a TFX pipeline.
###Code
# Copied from https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple and
# slightly modified because we don't need `metadata_path` argument.
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
query: str,
beam_pipeline_args: Optional[List[str]],
) -> tfx.dsl.Pipeline:
"""Creates a three component penguin pipeline with TFX."""
# Brings data into the pipeline.
#example_gen = tfx.components.CsvExampleGen(input_base=data_root)
#NEW: Query data in BigQuery as a data source.
example_gen = tfx.extensions.google_cloud_big_query.BigQueryExampleGen(query=query)
# Uses user-provided Python function that trains a model.
trainer = tfx.components.Trainer(
module_file=module_file,
examples=example_gen.outputs['examples'],
train_args=tfx.proto.TrainArgs(num_steps=100),
eval_args=tfx.proto.EvalArgs(num_steps=5))
# Pushes the model to a filesystem destination.
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
# Following three components will be included in the pipeline.
components = [
example_gen,
trainer,
pusher,
]
return tfx.dsl.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
beam_pipeline_args=beam_pipeline_args)
###Output
_____no_output_____
###Markdown
Run the pipeline on Vertex Pipelines.We used `LocalDagRunner` which runs on local environment in[Simple TFX Pipeline Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple).TFX provides multiple orchestrators to run your pipeline. In this tutorial wewill use the Vertex Pipelines together with the Kubeflow V2 dag runner. We need to define a runner to actually run the pipeline. You will compileyour pipeline into our pipeline definition format using TFX APIs.
###Code
import os
PIPELINE_DEFINITION_FILE = PIPELINE_NAME + '_pipeline.json'
QUERY = "SELECT * FROM `tfx-oss-public.palmer_penguins.palmer_penguins`"
BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS = [
'--project=' + GOOGLE_CLOUD_PROJECT,
'--temp_location=' + os.path.join('gs://', GCS_BUCKET_NAME, 'tmp'),
]
runner = tfx.orchestration.experimental.KubeflowV2DagRunner(
config=tfx.orchestration.experimental.KubeflowV2DagRunnerConfig(),
output_filename=PIPELINE_DEFINITION_FILE)
# Following function will write the pipeline definition to PIPELINE_DEFINITION_FILE.
_ = runner.run(
_create_pipeline(
pipeline_name=PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_root=DATA_ROOT,
module_file=os.path.join(MODULE_ROOT, _trainer_module_file),
serving_model_dir=SERVING_MODEL_DIR,
query=QUERY,
beam_pipeline_args=BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS))
###Output
_____no_output_____
###Markdown
The generated definition file can be submitted using kfp client.
###Code
# docs_infra: no_execute
from kfp.v2.google import client
pipelines_client = client.AIPlatformClient(
project_id=GOOGLE_CLOUD_PROJECT,
region=GOOGLE_CLOUD_REGION,
)
_ = pipelines_client.create_run_from_job_spec(PIPELINE_DEFINITION_FILE)
###Output
_____no_output_____ |
01_mysteries_of_neural_networks/04_optimizers/Playing with learning rate.ipynb | ###Markdown
Playing with learning rate--- Visualization of the effects of wrong choice of learning rate ***Author: Piotr Skalski*** Imports
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
###Output
_____no_output_____
###Markdown
Settings
###Code
# parameters a and b of the real function
REAL_PARAMS = [1, 1]
# starting point for gradient descent
INIT_PARAMS = [-0.5, -1]
# output directory (the folder must be created on the drive)
OUTPUT_DIR = "playing_with_learning_rate"
###Output
_____no_output_____
###Markdown
Performing the simulation
###Code
def find_optimization_path(tf_function, init_point, iterations, learning_rate):
x, y = [tf.Variable(initial_value=p, dtype=tf.float32) for p in init_point]
function = tf_function(x, y)
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(function)
x_list, y_list, cost_list = [], [], []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for t in range(iterations):
x_, y_, function_ = sess.run([x, y, function])
x_list.append(x_); y_list.append(y_); cost_list.append(function_)
result, _ = sess.run([function, train_op])
return x_list, y_list, cost_list
###Output
_____no_output_____
###Markdown
Create a blank chart
###Code
def create_blank_chart_with_styling(plot_size):
# my favorite styling kit
plt.style.use('dark_background')
# determining the size of the graph
fig = plt.figure(figsize=plot_size)
# 3D mode
ax = Axes3D(fig)
# transparent axis pane background
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# setting chart axis names
ax.set(xlabel="$x$", ylabel="$y$")
return (fig, ax)
###Output
_____no_output_____
###Markdown
Create animation
###Code
def create_animation(tf_function, np_function, init_point, iterations, learning_rate, plot_name, file_name, dir_name):
# 3D cost figure
for angle in range(iterations):
fix, ax = create_blank_chart_with_styling((6, 6))
x_list, y_list, cost_list = find_optimization_path(tf_function, init_point, iterations, learning_rate)
# parameter space
a3D, b3D = np.meshgrid(np.linspace(-4, 4, 100), np.linspace(-4, 4, 100))
cost3D = np.array([np_function(x_, y_) for x_, y_ in zip(a3D.flatten(), b3D.flatten())]).reshape(a3D.shape)
ax.plot_surface(a3D, b3D, cost3D, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'), alpha=1.0)
# plot 3D gradient descent
if angle < 10:
ax.plot(x_list[:angle], y_list[:angle], zs=cost_list[:angle], zdir='z', c='r', lw=2)
else:
ax.plot(x_list[angle-10:angle], y_list[angle-10:angle], zs=cost_list[angle-10:angle], zdir='z', c='r', lw=2)
# graph rotation
ax.view_init(30, 225 + angle*2)
# addition of a title
ax.set_title(plot_name, fontsize=20)
# saving a file
plt.savefig("./{}/{}_{:05}.png".format(dir_name, file_name, angle))
plt.close()
tf_fun = lambda x, y: 3*(1-x)**2*tf.exp(-(x**2) - (y+1)**2) - 10*(x/5 - x**3 - y**5)*tf.exp(-x**2-y**2) - 1/3*tf.exp(-(x+1)**2 - y**2)
np_fun = lambda x, y: 3*(1-x)**2*np.exp(-(x**2) - (y+1)**2) - 10*(x/5 - x**3 - y**5)*np.exp(-x**2-y**2) - 1/3*np.exp(-(x+1)**2 - y**2)
create_animation(tf_fun, np_fun, INIT_PARAMS, 180, 0.15, "Big learning rate", "big_rate", OUTPUT_DIR)
create_animation(tf_fun, np_fun, INIT_PARAMS, 180, 0.01, "Small learning rate", "small_rate", OUTPUT_DIR)
###Output
_____no_output_____ |
sandbox/density_recovery_from_projections.ipynb | ###Markdown
Say we have the ability to sample points (with bias) via random sampling in an ambient space followed by projection. We'd like to do that to get lots of points in our constraint set, and then evaluate our density at those points to reconstruct the underlying distribution intersected with the constraint set.
###Code
def do_projection(x_targ):
prog = MathematicalProgram()
x = prog.NewContinuousVariables(2, "x")
prog.AddConstraint(x.T.dot(x) <= 1.)
prog.AddCost( np.sum((x - x_targ).T.dot(x - x_targ)))
result = Solve(prog)
assert result.is_success()
return result.GetSolution(x)
zs = np.random.normal(0., 0.25, (100, 2))
xs = np.stack([do_projection(z) for z in zs])
plt.scatter(zs[:, 0], zs[:, 1], color="r", alpha=0.1, label="Unprojected")
plt.scatter(xs[:, 0], xs[:, 1], color="b", alpha=0.25, label="Projected")
plt.legend()
true_mean = torch.tensor([0.25, -0.25])
true_var = torch.eye(2)*0.25
xs_torch = torch.tensor(xs, dtype=torch.float64)
p = MultivariateNormal(true_mean, true_var)
###Output
_____no_output_____
###Markdown
http://proceedings.mlr.press/v54/liu17b/liu17b.pdf
###Code
# Copied from https://github.com/singhalrk/stein_ksd/blob/master/kernels.py
class RBFKernel():
def __init__(self, bw):
self.beta = 1./bw
def value(self, x, y):
r = (x - y).square().sum(axis=-1)
return torch.exp(-self.beta * r)
def grad_x(self, x, y):
r = (x - y).square().sum(axis=-1, keepdim=True).repeat(1, x.shape[-1])
return - 2 * self.beta * (x - y) * torch.exp(-self.beta * r)
def grad_y(self, x, y):
r = (x - y).square().sum(axis=-1, keepdim=True).repeat(1, x.shape[-1])
return 2 * self.beta * (x - y) * torch.exp(-self.beta * r)
def grad_xy(self, x, y):
r = (x - y).square().sum(axis=-1)
_y = 2 * self.beta * torch.exp(-self.beta * r)
_xy = -4 * self.beta**2 * r * torch.exp(-self.beta * r)
return _y + _xy
def stein_kernel(x, y, p, kernel):
x.requires_grad = True
x.grad = None
log_px = p.log_prob(x)
log_px.backward(gradient=torch.ones_like(log_px))
grad_log_px = x.grad
y.requires_grad = True
y.grad = None
log_py = p.log_prob(y)
log_py.backward(gradient=torch.ones_like(log_py))
grad_log_py = y.grad
x.grad = None
y.grad = None
kernel_val = kernel.value(x, y)
kernel_val.backward(gradient=torch.ones_like(kernel_val))
kernel_grad_y = y.grad
kernel_grad_x = x.grad
assert torch.allclose(kernel_grad_x, kernel.grad_x(x, y))
assert torch.allclose(kernel_grad_y, kernel.grad_y(x, y))
p1 = (grad_log_px * grad_log_py).sum(axis=-1) * kernel.value(x, y)
p2 = (grad_log_px * kernel.grad_y(x, y)).sum(axis=-1)
p3 = (grad_log_py * kernel.grad_x(x, y)).sum(axis=-1)
p4 = kernel.grad_xy(x, y)
return p1 + p2 + p3 + p4
N = len(xs_torch)
# Make two vectors of all pairs
x1 = xs_torch.unsqueeze(1).repeat(1, N, 1).reshape(N**2, 2)
x2 = xs_torch.unsqueeze(0).repeat(N, 1, 1).reshape(N**2, 2)
# Bandwidth as median of pairwise square distance
pairwise_dists = ((x1 - x2).square()).sum(axis=-1).sqrt()
median_dist = torch.median(pairwise_dists)
print("Median dist: %f" % median_dist.item())
kernel = RBFKernel(bw=median_dist)
Kp = stein_kernel(x1, x2, p, kernel).reshape(N, N)
Kp = Kp.detach().numpy()
prog = MathematicalProgram()
w = prog.NewContinuousVariables(N, 1, "w")
prog.AddLinearConstraint(w.sum() == 1)
prog.AddBoundingBoxConstraint(np.zeros(N), np.ones(N), w)
prog.AddQuadraticCost(w.transpose().dot(Kp).dot(w)[0, 0])
result = Solve(prog)
assert result.is_success()
w = result.GetSolution(w)
plt.hist(w)
print(w)
x = np.linspace(-1, 1, 100)
y = np.linspace(-1, 1, 100)
xx, yy = np.meshgrid(x,y)
# evaluate kernels at grid points
xxyy = np.c_[xx.ravel(), yy.ravel()]
zz = np.exp(p.log_prob(torch.tensor(xxyy)).numpy())
# reshape and plot image
img = zz.reshape((100,100))
plt.imshow(img, extent=(-1, 1, -1, 1), origin='lower')
plt.scatter(true_mean[0], true_mean[1], c="red", marker="x")
# Show probe point weights with color mapping
plt.scatter(xs[:, 0], xs[:, 1], c=w)
print("Expected value of approximated dist (should be %s): %s" % (
true_mean, (w * xs.T).sum(axis=-1)
))
###Output
Expected value of approximated dist (should be tensor([ 0.2500, -0.2500])): [ 0.14661741 -0.20284873]
|
Web_Crawling/naver-bs4.ipynb | ###Markdown
Syntax –>driver.find_element_by_css_selector("CSS Selectors")-Example-Site content goes here. >content = driver.find_element_by_css_selector('p.content')
###Code
driver.find_element_by_css_selector('.oTravelBox>.boxList>.moreBtnWrap>.moreBtn').click()
###Output
_____no_output_____
###Markdown
해당되는 모든 요소를 찾는 fined_elements 메서드를 활용해 보세요. 그리고 css selector 문법 중에 특정 문자열로 시작되는 특성을 선택하는 방법이 있습니다.findings = driver.find_elements_by_css_selector("*[id^='custom']")for finding in findings: finding.find_element_by_css_selector('div > p > name')
###Code
# searchModule.SetCategoryList(1, '') 스크립트 실행
# 게시물을 넘어갔을때 현상을 확인차
for page in range(1, 4): # 2):
try:
# 자바스크립트 구동하기
driver.execute_script("searchModule.SetCategoryList(%s, '')" % page)
time.sleep(2)
print("%s 페이지 이동" % page)
#############################################################
# 여러 사이트에서 정보를 수집할 경우 공통 정보 정의 단계 필요
# 상품명, 코멘트, 기간1, 기간2, 가격, 평점, 썸네일, 링크(상품상세정보)
boxItems = driver.find_elements_by_css_selector('.oTravelBox>.boxList>li')
# 상품 하나 하나 접근
for li in boxItems:
# 이미지를 링크값을 사용할것인가?
# 직접 다운로드 해서 우리 서버에 업로드(ftp) 할것인가?
print( '썸네임', li.find_element_by_css_selector('img').get_attribute('src') )
print( '링크', li.find_element_by_css_selector('a').get_attribute('onclick') )
print( '상품명', li.find_element_by_css_selector('h5.proTit').text )
print( '코멘트', li.find_element_by_css_selector('.proSub').text )
print( '가격', li.find_element_by_css_selector('.proPrice').text )
area = ''
for info in li.find_elements_by_css_selector('.info-row .proInfo'):
print( info.text )
print('='*100)
# 데이터 모음
# li.find_elements_by_css_selector('.info-row .proInfo')[1].text
# 데이터가 부족하거나 없을수도 있으므로 직접 인덱스로 표현은 위험성이 있음
obj = TourInfo(
li.find_element_by_css_selector('h5.proTit').text,
li.find_element_by_css_selector('.proPrice').text,
li.find_elements_by_css_selector('.info-row .proInfo')[1].text,
li.find_element_by_css_selector('a').get_attribute('onclick'),
li.find_element_by_css_selector('img').get_attribute('src')
)
tour_list.append( obj )
except Exception as e1:
print( '오류', e1 )
print( tour_list, len(tour_list) )
driver.close()
###Output
_____no_output_____ |
ABTesting/L2_Experiment_Size.ipynb | ###Markdown
Experiment SizeWe can use the knowledge of our desired practical significance boundary to plan out our experiment. By knowing how many observations we need in order to detect our desired effect to our desired level of reliability, we can see how long we would need to run our experiment and whether or not it is feasible.Let's use the example from the video, where we have a baseline click-through rate of 10% and want to see a manipulation increase this baseline to 12%. How many observations would we need in each group in order to detect this change with power $1-\beta = .80$ (i.e. detect the 2% absolute increase 80% of the time), at a Type I error rate of $\alpha = .05$?
###Code
# import packages
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
% matplotlib inline
###Output
UsageError: Line magic function `%` not found.
###Markdown
Method 1: Trial and ErrorOne way we could solve this is through trial and error. Every sample size will have a level of power associated with it; testing multiple sample sizes will gradually allow us to narrow down the minimum sample size required to obtain our desired power level. This isn't a particularly efficient method, but it can provide an intuition for how experiment sizing works.Fill in the `power()` function below following these steps:1. Under the null hypothesis, we should have a critical value for which the Type I error rate is at our desired alpha level. - `se_null`: Compute the standard deviation for the difference in proportions under the null hypothesis for our two groups. The base probability is given by `p_null`. Remember that the variance of the difference distribution is the sum of the variances for the individual distributions, and that _each_ group is assigned `n` observations. - `null_dist`: To assist in re-use, this should be a [scipy norm object](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html). Specify the center and standard deviation of the normal distribution using the "loc" and "scale" arguments, respectively. - `p_crit`: Compute the critical value of the distribution that would cause us to reject the null hypothesis. One of the methods of the `null_dist` object will help you obtain this value (passing in some function of our desired error rate `alpha`).2. The power is the proportion of the distribution under the alternative hypothesis that is past that previously-obtained critical value. - `se_alt`: Now it's time to make computations in the other direction. This will be standard deviation of differences under the desired detectable difference. Note that the individual distributions will have different variances now: one with `p_null` probability of success, and the other with `p_alt` probability of success. - `alt_dist`: This will be a scipy norm object like above. Be careful of the "loc" argument in this one. The way the `power` function is set up, it expects `p_alt` to be greater than `p_null`, for a positive difference. - `beta`: Beta is the probability of a Type-II error, or the probability of failing to reject the null for a particular non-null state. That means you should make use of `alt_dist` and `p_crit` here!The second half of the function has already been completed for you, which creates a visualization of the distribution of differences for the null case and for the desired detectable difference. Use the cells that follow to run the function and observe the visualizations, and to test your code against a few assertion statements. Check the following page if you need help coming up with the solution.
###Code
def power(p_null, p_alt, n, alpha = .05, plot = True):
"""
Compute the power of detecting the difference in two populations with
different proportion parameters, given a desired alpha rate.
Input parameters:
p_null: base success rate under null hypothesis
p_alt : desired success rate to be detected, must be larger than
p_null
n : number of observations made in each group
alpha : Type-I error rate
plot : boolean for whether or not a plot of distributions will be
created
Output value:
power : Power to detect the desired difference, under the null.
"""
# Compute the power
se_null = np.sqrt((p_null * (1-p_null) + p_null * (1-p_null)) / n)
null_dist = stats.norm(loc = 0, scale = se_null)
p_crit = null_dist.ppf(1 - alpha)
se_alt = np.sqrt((p_null * (1-p_null) + p_alt * (1-p_alt) ) / n)
alt_dist = stats.norm(loc = p_alt - p_null, scale = se_alt)
beta = alt_dist.cdf(p_crit)
if plot:
# Compute distribution heights
low_bound = null_dist.ppf(.01)
high_bound = alt_dist.ppf(.99)
x = np.linspace(low_bound, high_bound, 201)
y_null = null_dist.pdf(x)
y_alt = alt_dist.pdf(x)
# Plot the distributions
plt.plot(x, y_null)
plt.plot(x, y_alt)
plt.vlines(p_crit, 0, np.amax([null_dist.pdf(p_crit), alt_dist.pdf(p_crit)]),
linestyles = '--')
plt.fill_between(x, y_null, 0, where = (x >= p_crit), alpha = .5)
plt.fill_between(x, y_alt , 0, where = (x <= p_crit), alpha = .5)
plt.legend(['null','alt'])
plt.xlabel('difference')
plt.ylabel('density')
plt.show()
# return power
return (1 - beta)
power(.1, .12, 1000)
assert np.isclose(power(.1, .12, 1000, plot = False), 0.4412, atol = 1e-4)
assert np.isclose(power(.1, .12, 3000, plot = False), 0.8157, atol = 1e-4)
assert np.isclose(power(.1, .12, 5000, plot = False), 0.9474, atol = 1e-4)
print('You should see this message if all the assertions passed!')
###Output
You should see this message if all the assertions passed!
###Markdown
Method 2: Analytic SolutionNow that we've got some intuition for power by using trial and error, we can now approach a closed-form solution for computing a minimum experiment size. The key point to notice is that, for an $\alpha$ and $\beta$ both < .5, the critical value for determining statistical significance will fall between our null click-through rate and our alternative, desired click-through rate. So, the difference between $p_0$ and $p_1$ can be subdivided into the distance from $p_0$ to the critical value $p^*$ and the distance from $p^*$ to $p_1$.Those subdivisions can be expressed in terms of the standard error and the z-scores:$$p^* - p_0 = z_{1-\alpha} SE_{0},$$$$p_1 - p^* = -z_{\beta} SE_{1};$$$$p_1 - p_0 = z_{1-\alpha} SE_{0} - z_{\beta} SE_{1}$$In turn, the standard errors can be expressed in terms of the standard deviations of the distributions, divided by the square root of the number of samples in each group:$$SE_{0} = \frac{s_{0}}{\sqrt{n}},$$$$SE_{1} = \frac{s_{1}}{\sqrt{n}}$$Substituting these values in and solving for $n$ will give us a formula for computing a minimum sample size to detect a specified difference, at the desired level of power:$$n = \lceil \big(\frac{z_{\alpha} s_{0} - z_{\beta} s_{1}}{p_1 - p_0}\big)^2 \rceil$$where $\lceil ... \rceil$ represents the ceiling function, rounding up decimal values to the next-higher integer. Implement the necessary variables in the function below, and test them with the cells that follow.
###Code
def experiment_size(p_null, p_alt, alpha = .05, beta = .20):
"""
Compute the minimum number of samples needed to achieve a desired power
level for a given effect size.
Input parameters:
p_null: base success rate under null hypothesis
p_alt : desired success rate to be detected
alpha : Type-I error rate
beta : Type-II error rate
Output value:
n : Number of samples required for each group to obtain desired power
"""
# Get necessary z-scores and standard deviations (@ 1 obs per group)
z_null = stats.norm.ppf(1 - alpha)
z_alt = stats.norm.ppf(beta)
sd_null = np.sqrt(p_null * (1-p_null) + p_null * (1-p_null))
sd_alt = np.sqrt(p_null * (1-p_null) + p_alt * (1-p_alt) )
# Compute and return minimum sample size
p_diff = p_alt - p_null
n = ((z_null*sd_null - z_alt*sd_alt) / p_diff) ** 2
return np.ceil(n)
experiment_size(.1, .12)
assert np.isclose(experiment_size(.1, .12), 2863)
print('You should see this message if the assertion passed!')
###Output
You should see this message if the assertion passed!
###Markdown
Notes on InterpretationThe example explored above is a one-tailed test, with the alternative value greater than the null. The power computations performed in the first part will _not_ work if the alternative proportion is greater than the null, e.g. detecting a proportion parameter of 0.88 against a null of 0.9. You might want to try to rewrite the code to handle that case! The same issue should not show up for the second approach, where we directly compute the sample size.If you find that you need to do a two-tailed test, you should pay attention to two main things. First of all, the "alpha" parameter needs to account for the fact that the rejection region is divided into two areas. Secondly, you should perform the computation based on the worst-case scenario, the alternative case with the highest variability. Since, for the binomial, variance is highest when $p = .5$, decreasing as $p$ approaches 0 or 1, you should choose the alternative value that is closest to .5 as your reference when computing the necessary sample size.Note as well that the above methods only perform sizing for _statistical significance_, and do not take into account _practical significance_. One thing to realize is that if the true size of the experimental effect is the same as the desired practical significance level, then it's a coin flip whether the mean will be above or below the practical significance bound. This also doesn't even consider how a confidence interval might interact with that bound. In a way, experiment sizing is a way of checking on whether or not you'll be able to get what you _want_ from running an experiment, rather than checking if you'll get what you _need_. Alternative ApproachesThere are also tools and Python packages that can also help with sample sizing decisions, so you don't need to solve for every case on your own. The sample size calculator [here](http://www.evanmiller.org/ab-testing/sample-size.html) is applicable for proportions, and provides the same results as the methods explored above. (Note that the calculator assumes a two-tailed test, however.) Python package "statsmodels" has a number of functions in its [`power` module](https://www.statsmodels.org/stable/stats.htmlpower-and-sample-size-calculations) that perform power and sample size calculations. Unlike previously shown methods, differences between null and alternative are parameterized as an effect size (standardized difference between group means divided by the standard deviation). Thus, we can use these functions for more than just tests of proportions. If we want to do the same tests as before, the [`proportion_effectsize`](http://www.statsmodels.org/stable/generated/statsmodels.stats.proportion.proportion_effectsize.html) function computes [Cohen's h](https://en.wikipedia.org/wiki/Cohen%27s_h) as a measure of effect size. As a result, the output of the statsmodel functions will be different from the result expected above. This shouldn't be a major concern since in most cases, you're not going to be stopping based on an exact number of observations. You'll just use the value to make general design decisions.
###Code
# example of using statsmodels for sample size calculation
from statsmodels.stats.power import NormalIndPower
from statsmodels.stats.proportion import proportion_effectsize
# leave out the "nobs" parameter to solve for it
NormalIndPower().solve_power(effect_size = proportion_effectsize(.12, .1),
alpha = .05,
power = 0.8,
alternative = 'larger')
###Output
_____no_output_____ |
homeworks/hw02/DL_HW2_valentinaBlasone.ipynb | ###Markdown
Homework 02 - Valentina Blasone Deep Learning - A.A. 2020/2021 >Reconstruct in PyTorch the first experiment in [Learning representations by back-propagating errors](https://www.nature.com/articles/323533a0) with learning rule in eq.8 (gradient descent without momentum)> - Try to be as close as possible to the original protocol, except for what regards the learning rule> - Read the paper, if you did not do it yet (don’t worry if you don’t understand the other experiments in detail)> - Create the data, the model and everything is needed (do not use dataloaders if you don’t know how yet how they work)> - Train the model> - Inspect the weights you obtained and check if they provide a solution to the problem> - Compare the solution to the solution reported in the paper **Problem statement**The objective is to implement a network able to detect mirror symmetry about the centre point in the input vectors. More specifically, input vectors are 6-values binary 1D-arrays, i.e., each value of the vectors can wither be $0$ or $1$. **Designing the MLP**
From the specifics contained in the paper, we need to design a MLP with:
* Input layer with 6 units
* 1 hidden layers with 2 units
* Output layer with 1 units
* ReLU as activation function for the hidden layer and softmax for the output layer
* weights initialized as random and uniformly distributed between -0.3 and 0.3
Additionally, we choose to use the negative log-likelihood loss function.
**Creating the data**The dataset will consist in all the possible configurations that the 6-values binary vectors can assume, thus $2^6=64$ configurations. To do so we can rely on a function in the standard library, called `ipertools.product`, used in the following way:
###Code
import itertools
import torch
import random
dataset = torch.Tensor(list(itertools.product([0.,1.],repeat=6)))
dataset.shape
###Output
_____no_output_____
###Markdown
We need also to create the correponding y vector, in which we decide to identify a "mirror-symmetric" vector with the value of $1$ and a "non mirror-symmetric" vector with a value of $0$. We can implement a function able to do do so.
###Code
def flip_tensor(x):
return torch.Tensor([x[i-1] for i in range(len(x), 0, -1)])
def check_mirror_symmetry(x):
assert len(x) == 6
x1 = x[0:len(x)//2]
x2 = x[len(x)//2:len(x)]
if torch.all(x1.eq(flip_tensor(x2))):
y = 1
#print(f'x: {x}, y: {y}') # to check it
else:
y = 0
return y
y = torch.Tensor([[check_mirror_symmetry(x)] for x in dataset])
###Output
_____no_output_____
###Markdown
**Create the model**
###Code
# class for our MLP
class MLP(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(in_features=6, out_features=2, bias=True)
self.layer2 = torch.nn.Linear(in_features=2, out_features=2, bias=True)
def forward(self, X):
out = self.layer1(X)
out = torch.nn.functional.relu(out)
out = self.layer2(out)
out = torch.nn.functional.log_softmax(out)
return out
model = MLP()
model
from torchsummary import summary
_ = summary(model)
###Output
=================================================================
Layer (type:depth-idx) Param #
=================================================================
├─Linear: 1-1 14
├─Linear: 1-2 6
=================================================================
Total params: 20
Trainable params: 20
Non-trainable params: 0
=================================================================
###Markdown
**Initialize the weights**
###Code
bound = 0.3
for param in model.parameters():
torch.nn.init.uniform_(param, a=-bound, b=bound)
###Output
_____no_output_____
###Markdown
**Performance Measures**
###Code
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(y_hat, y):
'''
y_hat is the model output - a Tensor of shape (n x num_classes)
y is the ground truth
'''
classes_prediction = y_hat.argmax(dim=1)
match_ground_truth = classes_prediction == y # -> tensor of booleans
correct_matches = match_ground_truth.sum()
return (correct_matches / y_hat.shape[0]).item()
###Output
_____no_output_____
###Markdown
**Training Loop**
###Code
def train_epoch(model, x, y, loss_fn, optimizer, loss_meter, accuracy_meter, loss_vec, acc_vec):
rand_idx = torch.randperm(64)
x = x[rand_idx]
y = y[rand_idx]
optimizer.zero_grad() # 1. reset the gradients previously accumulated by the optimizer
y_hat = model(x) # 2. get the predictions from the current state of the model (forward pass)
loss = loss_fn(y_hat, y) # 3. calculate the loss on the current mini-batch
loss.backward() # 4. execute the backward pass given the current loss
optimizer.step() # 5. update the value of the params
acc = accuracy(y_hat, y) # 6. calculate the accuracy for this mini-batch
loss_meter.update(val=loss.item(), n=x.shape[0]) # 7. update the loss and accuracy AverageMeter
accuracy_meter.update(val=acc, n=x.shape[0])
loss_vec.append(loss.item())
acc_vec.append(acc)
def train_model(model, x, y, loss_fn, optimizer, num_epochs, loss_vec, acc_vec):
model.train()
for epoch in range(num_epochs):
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
train_epoch(model, x, y, loss_fn, optimizer, loss_meter, accuracy_meter, loss_vec, acc_vec)
return loss_meter.sum, accuracy_meter.avg, loss_vec, acc_vec
model = MLP()
learn_rate = 0.1 # for SGD
num_epochs = 10000 #1425
loss_fn = torch.nn.NLLLoss() # torch.nn.functional.mse_loss
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
loss_vec = []
acc_vec = []
data = torch.cat((dataset,y),1)
y_truth = y.reshape(64)
print(y_truth.shape)
loss, acc, loss_vec, acc_vec = train_model(model, dataset, y_truth.long(), loss_fn,optimizer, num_epochs, loss_vec, acc_vec)
print(f"Training completed - final accuracy {acc} and loss {loss}")
import matplotlib.pyplot as plt
# plot
fig, axes = plt.subplots(1,2, figsize=(12,4))
_ = axes[0].plot(loss_vec, 'r')
_ = axes[0].set_xlabel("Epoch")
_ = axes[0].set_ylabel("Loss")
_ = axes[0].set_title("Loss")
_ = axes[1].plot(acc_vec[10:],'r')
_ = axes[1].set_xlabel("Epoch")
_ = axes[1].set_ylabel("Accuracy")
_ = axes[1].set_title("Accuracy")
for param_name, param in model.state_dict().items():
print(param_name, param)
###Output
layer1.weight tensor([[-1.5731, 0.7933, -3.1037, 3.1038, -0.7925, 1.5735],
[-2.3407, 1.1806, -4.6229, 4.6249, -1.1778, 2.3418]])
layer1.bias tensor([1.1398, 0.0020])
layer2.weight tensor([[-3.6358, 5.4006],
[ 3.6268, -5.0612]])
layer2.bias tensor([ 3.0788, -2.5044])
|
examples/design/multi-tube-design-advanced.ipynb | ###Markdown
Multi-tube design example (advanced) Design small conditional RNAs (scRNAs) for conditional Dicer substrate formation (Hochrein et al., J Am Chem Soc, 2013; see Figure 3). See the accompanying design specification (PDF files). See also the LaTeX spec file that you can edit to make your own design specs in a standardized format. This is a 2-step reaction. To design one scRNA system, there are 3 elementary step tubes plus 1 global crosstalk tube. Target test tubes: - Step 0: Reactants - Step 1: Intermediates- Step 2: Products- Global crosstalk Material: RNA Temperature: 23 C
###Code
# Import Python NUPACK module
from nupack import *
# Define physical model
my_model = Model(material='rna', celsius=37)
# Define sequence domains
a = Domain('N6', name='a')
c = Domain('N8', name='c')
b = Domain('N4', name='b')
w = Domain('N2', name='w')
y = Domain('N4', name='y')
x = Domain('N12', name='x')
z = Domain('N3', name='z')
s = Domain('N5', name='s')
# Define strands containing these domains
sC_out = TargetStrand([w, x, y, s], name='sC_out')
sA = TargetStrand([~c, ~b, ~a, ~z, ~y], name='sA')
sA_toe = TargetStrand([~c], name='sA_toe')
sC = TargetStrand([w, x, y, s, ~a, ~z, ~y, ~x, ~w], name='sC')
sC_loop = TargetStrand([s, ~a, ~z], name='sC_loop')
sB = TargetStrand([x, y, z, a, b], name='sB')
sX = TargetStrand([a, b, c], name='sX')
# Define target complexes
C = TargetComplex([sC], 'D2 D12 D4( U5 U6 U3 )', name='C')
B = TargetComplex([sB], 'U12 U4 U3 U6 U4', name='B')
C_loop = TargetComplex([sC_loop], 'U14', name='C_loop')
AB = TargetComplex([sA, sB], 'U8 D4 D6 D3 D4(+ U12)', name='AB')
X = TargetComplex([sX], 'U18', name='X')
XA = TargetComplex([sX, sA], 'D6 D4 D8(+) U3 U4', name='XA')
C_out = TargetComplex([sC_out], 'U23', name='C_out')
BC = TargetComplex([sB, sC], 'D12 D4 D3 D6 (U4 + U2 U12 U4 U5) U2', name='BC')
A_toe = TargetComplex([sA_toe], 'U8', name='A_toe')
# Define elementary step tubes
Step_0 = TargetTube(on_targets={C: 1e-08, X: 1e-08, AB: 1e-08},
off_targets=SetSpec(max_size=2, include=[[sA], BC], exclude=[XA]), name='Step_0')
Step_1 = TargetTube(on_targets={XA: 1e-08, B: 1e-08},
off_targets=SetSpec(max_size=2, include=[X, AB]), name='Step_1')
Step_2 = TargetTube(on_targets={BC: 1e-08},
off_targets=SetSpec(max_size=2, include=[B, C]), name='Step_2')
# Define global crosstalk tube
Crosstalk = TargetTube(on_targets={
AB: 1e-08,
C: 1e-08,
X: 1e-08,
B: 1e-08,
C_out: 1e-08,
C_loop: 1e-08,
A_toe: 1e-08,
}, off_targets=SetSpec(max_size=2, exclude=[XA, BC, [sX, sA_toe], [sB, sC_loop]]), name='Crosstalk')
# Define hard sequence constraints
# GC content constraints
hard = [
Similarity(d, 'S'*d.nt(), limits=(0.45, 0.55))
for d in [sC_out, sA, sC, sC_loop, sB, sX]
]
# Biological sequences
# Note: biological sequence constraints often require a higher stop condition
tpm3 = 'GAACACTATTAGCTATTTGTAGTACTCTAAAGAGGACTGCAGAACGCATCGCAGTAGTGGTGAAAAGCCGTGCGTGCGCGTGAAACATCTGATCCTCACGTTACTTCCACTCGCTCTGCGTTTGACTTGTTGGCGGGGCGTTGGTGCCTTGGACTTTTTTTTCCTCCTTCTCTTCTTCGCGGCTCGGTCCACTACGCTGCTCGAGAGGAATCTGCTTTATTCGACCACACTACTCCTAAAGTAACACATTAAAATGGCCGGATCAAACAGCATCGATGCAGTTAAGAGAAAAATCAAAGTTTTACAACAGCAAGCAGATGAGGCAGAAGAAAGAGCCGAGATTTTGCAGAGACAGGTCGAGGAGGAGAAGCGTGCCAGGGAGCAGGCTGAGGCAGAGGTGGCTTCTCTGAACAGGCGTATCCAGCTGGTTGAGGAGGAGTTGGATCGTGCTCAGGAGAGACTGGCCACAGCCCTGCAAAAGCTGGAGGAAGCCGAGAAGGCCGCAGATGAGAGCGAGAGAGGGATGAAGGTGATTGAGAACAGGGCTCTGAAGGATGAGGAGAAGATGGAGCTGCAGGAGATCCAGCTTAAGGAGGCCAA'
desm = 'CATTTACACAGCGTACAAACCCAACAGGCCCAGTCATGAGCACGAAATATTCAGCCTCCGCCGAGTCGGCGTCCTCTTACCGCCGCACCTTTGGCTCAGGTTTGGGCTCCTCTATTTTCGCCGGCCACGGTTCCTCAGGTTCCTCTGGCTCCTCAAGACTGACCTCCAGAGTTTACGAGGTGACCAAGAGCTCCGCTTCTCCCCATTTTTCCAGCCACCGTGCGTCCGGCTCTTTCGGAGGTGGCTCGGTGGTCCGTTCCTACGCTGGCCTTGGTGAGAAGCTGGATTTCAATCTGGCTGATGCCATAAACCAGGACTTCCTCAACACGCGTACTAATGAGAAGGCCGAGCTCCAGCACCTCAATGACCGCTTCGCCAGCTACATCGAGAAGGTGCGCTTCCTCGAGCAGCAGAACTCTGCCCTGACGGTGGAGATTGAGCGTCTGCGGGGTCGCGAGCCCACCCGTATTGCAGAGCTGTACGAGGAGGAGATGAGAGAGCTGCGCGGACAGGTGGAGGCACTGACCAATCAGAGATCCCGTGTGGAGATCGAGAGGGACAACCTAGTCGATGACCTACAGAAACTAAAGCTCAGACTTC'
# Window constraints on detection target 'a-b-c' and silencing target 'w-x-y-z'
hard += [Window([a, b, c], [tpm3])]
hard += [Window([w, x, y, z], [desm])]
# Diversity constraint
hard += [Diversity(word=4, types=2)]
# Set a stop condition of 8%
# Set seed for random number generation to get a reproducible result for this demo
my_options = DesignOptions(f_stop=0.08, seed=93)
# Define and run the test tube design job
des = tube_design(tubes=[Step_0, Step_1, Step_2, Crosstalk], model=my_model, hard_constraints=hard, options=my_options)
result = des.run(trials=1)[0]
# Display the result
result
###Output
_____no_output_____ |
examples/Compute Latency/Core Functions.ipynb | ###Markdown
TAT-C: Collect Observations Core Functions ExampleDr. Paul Grogan, I. Josue Tapia-Tamayo, Isaac FeldmanCollective Design (CoDe) LabStevens Institute of Technology, School of Systems and EnterprisesThis example demonstrates how to use direct function calls of the low-level TAT-C library to model observations of single point by a single satellite using an orbit derived from an existing Two Line Element (TLE) set. Dependencies Standard Python LibrariesThis example is compatible with python 3.8.10 and makes use of the standard `pandas` and `datetime` libraries.
###Code
import pandas as pd
from datetime import datetime, timezone, timedelta
###Output
_____no_output_____
###Markdown
TAT-C LibraryAdditionally, this example makes use of the low-level `tatc` library. If you do not have this installed, navigate to the main `tatc-v3` file and make sure you have followed all the instructions for installation of TAT-C.
###Code
from tatc.analysis.coverage import collect_observations
from tatc.schemas.satellite import Satellite
from tatc.schemas.instrument import Instrument
from tatc.schemas.orbit import TwoLineElements
from tatc.schemas.point import Point
###Output
_____no_output_____
###Markdown
Establish the Mission Architecture and Key InformationThe first step in modeling a mission with TAT-C is to establish the key mission parameters -- specifically the satellites, the mission time frame and points on the ground for observation. Modeling Satellite ArchitectureFirst we define the satellites for a mission. In this example we define one satellite (NOAA-1) with a single instrument and an orbit derived from a known two-line element set (TLE). This uses the `TwoLineElements` class from `tatc.schemas.orbit` and the `Instrument` class from `tatc.schemas.instrument`.
###Code
# Save the NOAA-1 TLE as a list where the first element of the list is the first line
# of the TLE and the second list element is the second line
tle = ['1 04793U 70106A 22044.41526573 -.00000035 00000+0 54783-4 0 9991',
'2 04793 101.5750 111.2777 0031771 203.0437 167.8944 12.54003052342042']
# Derive the satellite orbit from the TLE
orbit = TwoLineElements(type='tle',
tle=tle)
# Initialize the instrument for the satellite to make observations
instrument = Instrument(name='Lead Instrument', field_of_regard=180)
# Define the satellite from the orbit and instrument
sat = Satellite(name='NOAA1',
type='satellite',
orbit=orbit,
instruments=[instrument])
###Output
_____no_output_____
###Markdown
Modeling Mission ParametersNext we define the mission parameters, specifically the target point -- using the `Point` class from `tatc.schemas.point` -- and the start and end time of the mission window -- using the `datetime` and `timedelta` objects from the python `datetime` module.
###Code
# Define a target points for observations
point = Point(id=0, latitude=40.74259, longitude=-74.02686)
# Set the start date to be January, 1, 2021 at noon (12 pm) UTC
start = datetime(year=2021, month=1, day=1, hour=12, minute=0, second=0, tzinfo=timezone.utc)
# Set the end date to be 30 days after the start date
end = start + timedelta(days=30)
###Output
_____no_output_____
###Markdown
Collecting ObservationsFinally we utilize the `collect_observations` function from `tatc.analysis.coverage` to determine all the opportunities for the satellite to observe the point with the specified instrument.
###Code
results = collect_observations(point, sat, instrument, start, end, omit_solar=False)
results
###Output
_____no_output_____ |
random.seed0.ipynb | ###Markdown
Random & Seed
###Code
import random
dir(random)
# random.seed is called pseudo random generator
from random import randint,seed
for i in range(10): # loop will execute 10 times.
print(randint(0,10))
# after assigning the seed function , we will get always certain/same random numbers 'or' same result.
seed(123) # set a certain starting point
for i in range(10): # loop will execute 10 times.
print(randint(0,10))
seed(12)
for i in range(10):
print(randint(0,10))
seed(0)
for i in range(10):
print(randint(0,10))
seed(1)
for i in range(10):
print(randint(0,10))
###Output
2
9
1
4
1
7
7
7
10
6
|
.ipynb_checkpoints/multiple-linear-regression-completed-checkpoint.ipynb | ###Markdown
Multiple Linear Regression Linear Regression is a useful tool for predicting a quantitative response. We have an input vector $X^T = (X_1, X_2,...,X_p)$, and want to predict a real-valued output $Y$. The linear regression model has the form $f(x) = \beta_0 + \sum_{j=1}^p X_j \beta_j$. The linear model either assumes that the regression function $E(Y|X)$ is linear, or that the linear model is a reasonable approximation.Here the $\beta_j$'s are unknown parameters or coefficients, and the variables $X_j$ can come from different sources. No matter the source of $X_j$, the model is linear in the parameters. **Simple Linear Regression**: $$Y = \beta_0 + \beta_1 X + \epsilon$$ **Multiple Linear Regression**: $$Y = \beta_0 + \beta_1 X_1 + \beta_2 X_2 +...+ \beta_p X_p + \epsilon$$ $$sales = \beta_0 + \beta_1 \times TV + \beta_2 \times radio + \beta_3 \times newspaper + \epsilon$$ Task 1: Importing Libraries
###Code
import pandas as pd
import numpy as np
import seaborn as sns
from scipy.stats import skew
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use("ggplot")
plt.rcParams['figure.figsize'] = (12, 8)
###Output
_____no_output_____
###Markdown
Task 2: Load the Data The adverstiting dataset captures sales revenue generated with respect to advertisement spends across multiple channles like radio, tv and newspaper.
###Code
advert = pd.read_csv('Advertising.csv')
advert.head()
advert.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 200 entries, 0 to 199
Data columns (total 4 columns):
TV 200 non-null float64
radio 200 non-null float64
newspaper 200 non-null float64
sales 200 non-null float64
dtypes: float64(4)
memory usage: 6.3 KB
###Markdown
Task 3: Relationship between Features and Response
###Code
sns.pairplot(advert, x_vars=['TV','radio','newspaper'], y_vars='sales', height=7, aspect=0.7);
###Output
_____no_output_____
###Markdown
Task 4: Multiple Linear Regression - Estimating Coefficients
###Code
from sklearn.linear_model import LinearRegression
# create X and y
feature_cols = ['TV', 'radio', 'newspaper']
X = advert[feature_cols]
y = advert.sales
# instantiate and fit
lm1 = LinearRegression()
lm1.fit(X, y)
# print the coefficients
print(lm1.intercept_)
print(lm1.coef_)
# pair the feature names with the coefficients
list(zip(feature_cols, lm1.coef_))
sns.heatmap(advert.corr(), annot=True)
###Output
_____no_output_____
###Markdown
Task 5: Feature Selection
###Code
from sklearn.metrics import r2_score
lm2 = LinearRegression().fit(X[['TV', 'radio']], y)
lm2_preds = lm2.predict(X[['TV', 'radio']])
print("R^2: ", r2_score(y, lm2_preds))
lm3 = LinearRegression().fit(X[['TV', 'radio', 'newspaper']], y)
lm3_preds = lm3.predict(X[['TV', 'radio', 'newspaper']])
print("R^2: ", r2_score(y, lm3_preds))
###Output
R^2: 0.8972106381789521
###Markdown
Task 6: Model Evaluation Using Train/Test Split and Metrics **Mean Absolute Error** (MAE) is the mean of the absolute value of the errors: $$\frac{1}{n}\sum_{i=1}^{n} \left |y_i - \hat{y_i} \right |$$**Mean Squared Error** (MSE) is the mean of the squared errors: $$\frac{1}{n}\sum_{i=1}^{n} (y_i - \hat{y_i})^2$$**Root Mean Squared Error** (RMSE) is the mean of the squared errors: $$\sqrt{\frac{1}{n}\sum_{i=1}^{n} (y_i - \hat{y_i})^2}$$ Let's use train/test split with RMSE to see whether newspaper should be kept in the model:
###Code
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
X = advert[['TV', 'radio', 'newspaper']]
y = advert.sales
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 1)
lm4 = LinearRegression()
lm4.fit(X_train, y_train)
lm4_preds = lm4.predict(X_test)
print("RMSE :", np.sqrt(mean_squared_error(y_test, lm4_preds)))
print("R^2: ", r2_score(y_test, lm4_preds))
X = advert[['TV', 'radio']]
y = advert.sales
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 1)
lm5 = LinearRegression()
lm5.fit(X_train, y_train)
lm5_preds = lm5.predict(X_test)
print("RMSE :", np.sqrt(mean_squared_error(y_test, lm5_preds)))
print("R^2: ", r2_score(y_test, lm5_preds))
from yellowbrick.regressor import PredictionError, ResidualsPlot
visualizer = PredictionError(lm5)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.poof()
visualizer = ResidualsPlot(lm5)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.poof()
###Output
_____no_output_____
###Markdown
Task 7: Interaction Effect (Synergy)
###Code
advert['interaction'] = advert['TV'] * advert['radio']
X = advert[['TV', 'radio', 'interaction']]
y = advert.sales
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 1)
lm6 = LinearRegression()
lm6.fit(X_train, y_train)
lm6_preds = lm6.predict(X_test)
print("RMSE :", np.sqrt(mean_squared_error(y_test, lm6_preds)))
print("R^2: ", r2_score(y_test, lm6_preds))
visualizer = PredictionError(lm6)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.poof()
###Output
_____no_output_____ |
site/ko/r2/guide/keras/overview.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
케라스: 빠르게 훑어보기 TensorFlow.org에서 보기 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 지원하려면 [이 양식](https://bit.ly/tf-translate)을작성하거나[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs)로메일을 보내주시기 바랍니다. tf.keras 임포트`tf.keras`는 [케라스 API 명세](https://keras.io){:.external}의 텐서플로 구현입니다. `tf.keras`는 머신러닝 모델을 만들고 훈련하기 위한 고수준 API로서 텐서플로의 특수 기능을 모두 지원합니다. 여기에는 [즉시 실행](eager_execution), `tf.data` 파이프라인(pipeline), [Estimators](./estimators.md)가 포함됩니다. `tf.keras`를 이용하면 유연성과 성능을 손해보지 않고 텐서플로를 쉽게 사용할 수 있습니다.`tf.keras`를 임포트하여 텐서플로 프로그램을 시작합니다:
###Code
!pip install -q pyyaml # pyyaml은 선택사항입니다.
from __future__ import absolute_import, division, print_function, unicode_literals
!pip install tf-nightly-gpu-2.0-preview
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
`tf.keras`는 케라스 API와 호환되는 어떤 코드라도 실행시킬 수 있지만 다음 사항을 유념하세요:* 최신 텐서플로 릴리스에 포함된 `tf.keras` 버전은 PyPI에 있는 최신 `keras` 버전과 같지 않을 수 있습니다. `tf.keras.__version__`을 확인해 보세요.* [모델의 가중치를 저장](weights_only)할 때 `tf.keras`는 기본적으로 [체크포인트 포맷](./checkpoints.md)을 사용합니다. HDF5를 사용하려면 `save_format='h5'`로 설정하세요. 간단한 모델 만들기 Sequential 모델케라스에서는 *층(layer)*을 조합하여 *모델(model)*을 만듭니다. 모델은 (일반적으로) 층의 그래프입니다. 가장 흔한 모델 구조는 층을 차례대로 쌓은 `tf.keras.Sequential` 모델입니다.간단한 완전 연결(fully-connected) 네트워크(즉, 다층 퍼셉트론(multi-layer perceptron))를 만들어 보겠습니다.
###Code
from tensorflow.keras import layers
model = tf.keras.Sequential()
# 64개의 유닛을 가진 완전 연결 층을 모델에 추가합니다:
model.add(layers.Dense(64, activation='relu'))
# 또 하나를 추가합니다:
model.add(layers.Dense(64, activation='relu'))
# 10개의 출력 유닛을 가진 소프트맥스 층을 추가합니다:
model.add(layers.Dense(10, activation='softmax'))
###Output
_____no_output_____
###Markdown
[여기](https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/quickstart/beginner.ipynb)에서 `Sequential` 모델을 어떻게 사용하는지 간단하지만 완전한 예제를 볼 수 있습니다.`Sequential`보다 더 고수준의 모델을 구성하는 방법을 배우려면 다음을 참고하세요:- [케라스 함수형 API 가이드](./functional.ipynb)- [클래스 상속을 통하여 층과 모델을 밑바닥부터 만드는 방법](./custom_layers_and_models.ipynb) 층 설정`tf.keras.layers` 아래의 클래스들은 일부 생성자 매개변수를 공통으로 가지고 있습니다:* `activation`: 층의 활성화 함수를 설정합니다. 이 매개변수에는 기본으로 제공되는 함수의 이름을 쓰거나 호출 가능한 객체를 지정할 수 있습니다. 기본값은 활성화 함수를 적용하지 않는 것입니다.* `kernel_initializer`와 `bias_initializer`: 층의 가중치(weight)(커널(kernel)과 절편(bias))를 초기화하는 방법입니다. 내장 함수나 호출 가능한 객체를 지정합니다. 기본값은 `"glorot_uniform"` 초기화입니다.* `kernel_regularizer`와 `bias_regularizer`: L1 또는 L2 규제(regularization)와 같이 층의 가중치(커널과 절편)에 적용할 규제 방법을 지정합니다. 기본값은 규제를 적용하지 않는 것입니다.다음 코드는 여러가지 생성자 매개변수를 사용하여 `tf.keras.layers.Dense` 층의 객체를 만드는 예입니다:
###Code
# 시그모이드 활성화 층을 만듭니다:
layers.Dense(64, activation='sigmoid')
# 또는 다음도 가능합니다:
layers.Dense(64, activation=tf.keras.activations.sigmoid)
# 커널 행렬에 L1 규제가 적용된 선형 활성화 층. 하이퍼파라미터 0.01은 규제의 양을 조절합니다:
layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01))
# 절편 벡터에 L1 규제가 적용된 선형 활성화 층. 하이퍼파라미터 0.01은 규제의 양을 조절합니다:
layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01))
# 커널을 랜덤한 직교 행렬로 초기화한 선형 활성화 층:
layers.Dense(64, kernel_initializer='orthogonal')
# 절편 벡터를 상수 2.0으로 설정한 선형 활성화 층:
layers.Dense(64, bias_initializer=tf.keras.initializers.Constant(2.0))
###Output
_____no_output_____
###Markdown
훈련과 평가 훈련 준비모델을 구성한 후 `compile` 메서드를 호출하여 학습 과정을 설정합니다:
###Code
model = tf.keras.Sequential([
# 64개의 유닛을 가진 완전 연결 층을 모델에 추가합니다:
layers.Dense(64, activation='relu', input_shape=(32,)),
# 또 하나를 추가합니다:
layers.Dense(64, activation='relu'),
# 10개의 출력 유닛을 가진 소프트맥스 층을 추가합니다:
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
`tf.keras.Model.compile`에는 세 개의 중요한 매개변수가 있습니다:* `optimizer`: 훈련 과정을 설정합니다. `tf.keras.optimizers.Adam`이나 `tf.keras.optimizers.SGD`와 같은 `tf.keras.optimizers` 아래의 옵티마이저 객체를 전달합니다. 기본 매개변수를 사용할 경우 `'adam'`이나 `'sgd'`와 같이 문자열로 지정할 수도 있습니다.* `loss`: 최적화 과정에서 최소화될 손실 함수(loss function)를 설정합니다. 평균 제곱 오차(`mse`)와 `categorical_crossentropy`, `binary_crossentropy` 등이 자주 사용됩니다. 손실 함수의 이름을 지정하거나 `tf.keras.losses` 모듈 아래의 호출 가능한 객체를 전달할 수 있습니다.* `metrics`: 훈련을 모니터링하기 위해 사용됩니다. 이름이나 `tf.keras.metrics` 모듈 아래의 호출 가능한 객체입니다.* 추가적으로 모델의 훈련과 평가를 즉시 실행하려면 `run_eagerly=True` 매개변수를 전달할 수 있습니다.다음 코드는 모델 훈련을 설정하는 몇 가지 예를 보여줍니다:
###Code
# 평균 제곱 오차로 회귀 모델을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
loss='mse', # 평균 제곱 오차
metrics=['mae']) # 평균 절댓값 오차
# 크로스엔트로피 손실 함수로 분류 모델을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
넘파이 데이터를 사용한 훈련데이터셋이 작은 경우 [넘파이](https://www.numpy.org/){:.external}(NumPy) 배열을 메모리에 적재하여 모델을 훈련하고 평가합니다. 모델은 `fit` 메서드를 통해서 훈련 데이터를 학습합니다:
###Code
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.fit(data, labels, epochs=10, batch_size=32)
###Output
_____no_output_____
###Markdown
`tf.keras.Model.fit`에는 세 개의 중요한 매개변수가 있습니다:* `epochs`: 훈련은 *에포크*(epoch)로 구성됩니다. 한 에포크는 전체 입력 데이터를 한번 순회하는 것입니다(작은 배치로 나누어 수행됩니다).* `batch_size`: 넘파이 데이터를 전달하면 모델은 데이터를 작은 배치로 나누고 훈련 과정에서 이 배치를 순회합니다. 이 정수 값은 배치의 크기를 지정합니다. 전체 샘플 개수가 배치 크기로 나누어 떨어지지 않으면 마지막 배치의 크기는 더 작을 수 있습니다.* `validation_data`: 모델의 프로토타입(prototype)을 만들 때는 검증 데이터(validation data)에서 간편하게 성능을 모니터링해야 합니다. 입력과 레이블(label)의 튜플을 이 매개변수로 전달하면 에포크가 끝날 때마다 추론 모드(inference mode)에서 전달된 데이터의 손실과 측정 지표를 출력합니다.다음이 `validation_data`를 사용하는 예입니다:
###Code
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
val_data = np.random.random((100, 32))
val_labels = np.random.random((100, 10))
model.fit(data, labels, epochs=10, batch_size=32,
validation_data=(val_data, val_labels))
###Output
_____no_output_____
###Markdown
tf.data 데이터셋을 사용한 훈련[데이터셋 API](./datasets.md)를 사용하여 대규모 데이터셋이나 복수의 장치로 확장시킬 수 있습니다. `fit` 메서드에 `tf.data.Dataset` 객체를 전달합니다:
###Code
# 예제 `Dataset` 객체를 만듭니다:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
# Dataset에서 `fit` 메서드를 호출할 때 `steps_per_epoch` 설정을 잊지 마세요.
model.fit(dataset, epochs=10, steps_per_epoch=30)
###Output
_____no_output_____
###Markdown
여기에서 `fit` 메서드는 `steps_per_epoch` 매개변수를 사용합니다. 다음 에포크로 넘어가기 전에 모델이 수행할 훈련 단계 횟수입니다. `Dataset`이 배치 데이터를 생성하기 때문에 `batch_size`가 필요하지 않습니다.`Dataset`은 검증 데이터에도 사용할 수 있습니다:
###Code
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
val_dataset = val_dataset.batch(32)
model.fit(dataset, epochs=10,
validation_data=val_dataset)
###Output
_____no_output_____
###Markdown
평가와 예측`tf.keras.Model.evaluate`와 `tf.keras.Model.predict` 메서드에는 넘파이 배열이나 `tf.data.Dataset`을 사용할 수 있습니다.주어진 데이터로 추론 모드의 손실이나 지표를 *평가*합니다:
###Code
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.evaluate(data, labels, batch_size=32)
model.evaluate(dataset, steps=30)
###Output
_____no_output_____
###Markdown
주어진 데이터로 추론 모드에서 마지막 층의 출력을 *예측*하여 넘파이 배열로 반환합니다:
###Code
result = model.predict(data, batch_size=32)
print(result.shape)
###Output
_____no_output_____
###Markdown
맞춤형 훈련 반복을 밑바닥부터 작성하는 방법을 포함하여 훈련과 평가에 대한 완전한 설명은 [훈련과 평가 가이드](./training_and_evaluation.ipynb)를 참고하세요. 고급 모델 만들기 함수형 API`tf.keras.Sequential` 모델은 단순히 층을 쌓은 것으로 임의의 구조를 표현할 수 없습니다. [케라스 함수형 API](./functional.ipynb)를 사용하면 다음과 같은 복잡한 모델 구조를 만들 수 있습니다:* 다중 입력 모델,* 다중 출력 모델,* 층을 공유하는 모델 (동일한 층을 여러번 호출합니다),* 데이터 흐름이 차례대로 진행되지 않는 모델 (예를 들면 잔차 연결(residual connections)).함수형 API로 모델을 만드는 방식은 다음과 같습니다:1. 하나의 층 객체는 호출 가능하고 텐서를 반환합니다.2. `tf.keras.Model` 객체를 정의하기 위해 입력 텐서와 출력 텐서를 사용합니다.3. 이 모델은 `Sequential` 모델과 동일한 방식으로 훈련됩니다.다음 코드는 함수형 API를 사용하여 간단한 완전 연결 네트워크를 만드는 예입니다:
###Code
inputs = tf.keras.Input(shape=(32,)) # 입력 플레이스홀더를 반환합니다.
# 층 객체는 텐서를 사용하여 호출되고 텐서를 반환합니다.
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
predictions = layers.Dense(10, activation='softmax')(x)
###Output
_____no_output_____
###Markdown
입력과 출력을 사용해 모델의 객체를 만듭니다.
###Code
model = tf.keras.Model(inputs=inputs, outputs=predictions)
# 컴파일 단계는 훈련 과정을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 5번의 에포크 동안 훈련합니다.
model.fit(data, labels, batch_size=32, epochs=5)
###Output
_____no_output_____
###Markdown
모델 클래스 상속`tf.keras.Model` 클래스를 상속하고 자신만의 정방향 패스(forward pass)을 정의하여 완전히 커스터마이징된 모델을 만들 수 있습니다. `__init__` 메서드에서 층을 만들어 클래스 객체의 속성으로 지정합니다. 정방향 패스는 `call` 메서드에 정의합니다.[즉시 실행](./eager.md)이 활성화되어 있을 때 정방향 패스를 명령형 프로그래밍 방식으로 작성할 수 있기 때문에 모델 클래스 상속이 매우 유용합니다.노트: 정방향 패스를 *항상* 명령형 프로그래밍 방식으로 실행하려면 `super` 객체의 생성자를 호출할 때 `dynamic=True`를 지정하세요.중요 포인트: 작업에 맞는 API를 사용하세요. 모델 클래스 상속은 유연성을 제공하지만 복잡도가 증가하고 사용자 오류가 발생할 가능성이 높아집니다. 가능한한 함수형 API를 사용하세요.다음 코드는 `tf.keras.Model`의 클래스를 상속하여 명령형 프로그래밍 방식으로 실행할 필요가 없는 정방향 패스를 구현한 예입니다:
###Code
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# 층을 정의합니다.
self.dense_1 = layers.Dense(32, activation='relu')
self.dense_2 = layers.Dense(num_classes, activation='sigmoid')
def call(self, inputs):
# 정방향 패스를 정의합니다.
# `__init__` 메서드에서 정의한 층을 사용합니다.
x = self.dense_1(inputs)
return self.dense_2(x)
###Output
_____no_output_____
###Markdown
새 모델 클래스의 객체를 만듭니다:
###Code
model = MyModel(num_classes=10)
# 컴파일 단계는 훈련 과정을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 5번의 에포크 동안 훈련합니다.
model.fit(data, labels, batch_size=32, epochs=5)
###Output
_____no_output_____
###Markdown
맞춤형 층맞춤형 층(custom layer)을 만들려면 `tf.keras.layers.Layer` 클래스를 상속하고 다음 메서드를 구현합니다:* `__init__`: 이 층에서 사용되는 하위 층을 정의할 수 있습니다.* `build`: 층의 가중치를 만듭니다. `add_weight` 메서드를 사용해 가중치를 추가합니다.* `call`: 정방향 패스를 구현합니다.다음 코드는 입력과 커널 행렬의 `matmul` 계산을 구현한 맞춤형 층의 예입니다:
###Code
class MyLayer(layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# 이 층에서 훈련할 가중치 변수를 만듭니다.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
###Output
_____no_output_____
###Markdown
맞춤형 층을 사용하여 모델을 만듭니다:
###Code
model = tf.keras.Sequential([
MyLayer(10),
layers.Activation('softmax')])
# 컴파일 단계는 훈련 과정을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 5번의 에포크 동안 훈련합니다.
model.fit(data, labels, batch_size=32, epochs=5)
###Output
_____no_output_____
###Markdown
클래스 상속을 통해 맞춤형 층과 모델을 만드는 더 자세한 정보는 [맞춤형 층과 모델을 만드는 방법](./custom_layers_and_models.ipynb)을 참고하세요. 콜백콜백(callback)은 훈련하는 동안 모델의 동작을 변경하고 확장하기 위해 전달하는 객체입니다. 자신만의 콜백을 작성하거나 다음과 같은 내장 `tf.keras.callbacks`을 사용할 수 있습니다:* `tf.keras.callbacks.ModelCheckpoint`: 일정 간격으로 모델의 체크포인트를 저장합니다.* `tf.keras.callbacks.LearningRateScheduler`: 학습률(learning rate)을 동적으로 변경합니다.* `tf.keras.callbacks.EarlyStopping`: 검증 성능이 향상되지 않으면 훈련을 중지합니다.* `tf.keras.callbacks.TensorBoard`: [텐서보드](https://tensorflow.org/tensorboard)를 사용하여 모델을 모니터링합니다.`tf.keras.callbacks.Callback`을 사용하려면 모델의 `fit` 메서드에 전달합니다:
###Code
callbacks = [
# `val_loss`가 2번의 에포크에 걸쳐 향상되지 않으면 훈련을 멈춥니다.
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
# `./logs` 디렉토리에 텐서보드 로그를 기록니다.
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(val_data, val_labels))
###Output
_____no_output_____
###Markdown
저장과 복원 가중치`tf.keras.Model.save_weights`를 사용하여 모델의 가중치를 저장하고 복원합니다:
###Code
model = tf.keras.Sequential([
layers.Dense(64, activation='relu', input_shape=(32,)),
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 가중치를 텐서플로의 체크포인트 파일로 저장합니다.
model.save_weights('./weights/my_model')
# 모델의 상태를 복원합니다.
# 모델의 구조가 동일해야 합니다.
model.load_weights('./weights/my_model')
###Output
_____no_output_____
###Markdown
기본적으로 모델의 가중치는 [텐서플로 체크포인트](../checkpoints.md) 파일 포맷으로 저장됩니다. 케라스의 HDF5 포맷으로 가중치를 저장할 수도 있습니다(다양한 백엔드를 지원하는 케라스 구현에서는 HDF5가 기본 설정입니다):
###Code
# 가중치를 HDF5 파일로 저장합니다.
model.save_weights('my_model.h5', save_format='h5')
# 모델의 상태를 복원합니다.
model.load_weights('my_model.h5')
###Output
_____no_output_____
###Markdown
설정모델 설정을 저장하면 가중치는 제외하고 모델의 구조를 직렬화합니다. 원본 모델을 정의한 코드가 없어도 저장된 설정을 사용하여 동일한 구조를 만들고 초기화할 수 있습니다. 케라스는 JSON과 YAML 직렬화 포맷을 지원합니다:
###Code
# 모델을 JSON 포맷으로 직렬화합니다.
json_string = model.to_json()
json_string
import json
import pprint
pprint.pprint(json.loads(json_string))
###Output
_____no_output_____
###Markdown
JSON 파일로부터 (완전히 새로 초기화된) 모델을 만듭니다.
###Code
fresh_model = tf.keras.models.model_from_json(json_string)
###Output
_____no_output_____
###Markdown
YAML 포맷으로 직렬화하려면 *텐서플로를 임포트하기 전에* `pyyaml`을 설치해야 합니다:
###Code
yaml_string = model.to_yaml()
print(yaml_string)
###Output
_____no_output_____
###Markdown
YAML 파일로부터 모델을 다시 만듭니다.
###Code
fresh_model = tf.keras.models.model_from_yaml(yaml_string)
###Output
_____no_output_____
###Markdown
주의: Model 클래스를 상속하여 만든 모델은 `call` 메서드의 본문에 파이썬 코드로 구조가 정의되어 있기 때문에 직렬화되지 않습니다. 전체 모델가중치와 모델 설정, 심지어 옵티마이저 설정까지 포함된 전체 모델을 파일에 저장할 수 있습니다. 모델의 중간 상태를 저장하고 나중에 원본 코드가 없어도 정확히 동일한 상태에서 훈련을 재개할 수 있습니다.
###Code
# 간단한 모델을 만듭니다.
model = tf.keras.Sequential([
layers.Dense(10, activation='softmax', input_shape=(32,)),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, batch_size=32, epochs=5)
# 전체 모델을 HDF5 파일로 저장합니다.
model.save('my_model.h5')
# 가중치와 옵티마이저를 포함하여 정확히 같은 모델을 다시 만듭니다.
model = tf.keras.models.load_model('my_model.h5')
###Output
_____no_output_____
###Markdown
케라스 모델의 저장과 직렬화에 대한 더 자세한 내용은 [모델 저장과 직렬화 가이드](./saving_and_serializing.ipynb)를 참고하세요. 즉시 실행[즉시 실행](./eager.md)은 연산을 즉각 평가하는 명령형 프로그래밍(imperative programming) 환경입니다. 케라스에서는 즉시 실행이 필수가 아니지만 `tf.keras`는 이를 지원합니다. 이 기능은 프로그램을 검사하고 디버깅하는데 유용합니다.모든 `tf.keras` 모델링 API는 즉시 실행과 호환됩니다. `Sequential`이나 함수형 API와 사용할 수 있지만 즉시 실행은 특히 *모델 상속*과 *맞춤형 층*을 만들 때 장점이 나타납니다. 이런 API는 (기존의 층을 조합하여 모델을 만드는 대신) 직접 정방향 패스의 코드를 작성하기 때문입니다.[즉시 실행 가이드](./eager.ipynbbuild_a_model)에서 맞춤형 훈련 반복과 `tf.GradientTape`를 케라스 모델에 같이 사용하는 예를 참고하세요. 또한 간단하지만 완전한 예제를 [여기](https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/quickstart/advanced.ipynb)에서 볼 수 있습니다. 분산 처리 다중 GPU`tf.keras` 모델은 `tf.distribute.Strategy`를 사용하여 다중 GPU에서 실행할 수 있습니다. 이 API는 기존 코드를 거의 수정하지 않고 다중 GPU에서 훈련을 분산시킬 수 있습니다.현재는 `tf.distribute.MirroredStrategy`가 유일하게 지원되는 분산 전략입니다. `MirroredStrategy`는 한 대의 장치에서 계산 결과를 모두 수집하는 방식인 그래프 내 복제(in-graph replication)를 수행합니다. `distribute.Strategy`를 사용하려면 `Strategy`의 `.scope()` 안에 옵티마이저 객체 생성, 모델 구성, 컴파일 단계를 포함시킨 다음 모델을 훈련합니다.다음 코드는 한 대의 컴퓨터에서 다중 GPU를 사용해 `tf.keras.Model`을 분산 처리하는 예입니다.먼저, `MirroredStrategy`의 `scope()` 안에서 모델을 정의합니다:
###Code
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10,)))
model.add(layers.Dense(1, activation='sigmoid'))
optimizer = tf.keras.optimizers.SGD(0.2)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
###Output
_____no_output_____
###Markdown
그다음, 보통 때와 같은 데이터로 모델을 훈련합니다:
###Code
x = np.random.random((1024, 10))
y = np.random.randint(2, size=(1024, 1))
x = tf.cast(x, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.shuffle(buffer_size=1024).batch(32)
model.fit(dataset, epochs=1)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
케라스: 빠르게 훑어보기 TensorFlow.org에서 보기 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 지원하려면 [이 양식](https://bit.ly/tf-translate)을작성하거나[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs)로메일을 보내주시기 바랍니다. tf.keras 임포트`tf.keras`는 [케라스 API 명세](https://keras.io){:.external}의 텐서플로 구현입니다. `tf.keras`는 머신러닝 모델을 만들고 훈련하기 위한 고수준 API로서 텐서플로의 특수 기능을 모두 지원합니다. 여기에는 [즉시 실행](eager_execution), `tf.data` 파이프라인(pipeline), [Estimators](./estimators.md)가 포함됩니다. `tf.keras`를 이용하면 유연성과 성능을 손해보지 않고 텐서플로를 쉽게 사용할 수 있습니다.`tf.keras`를 임포트하여 텐서플로 프로그램을 시작합니다:
###Code
!pip install -q pyyaml # pyyaml은 선택사항입니다.
from __future__ import absolute_import, division, print_function, unicode_literals
!pip install tensorflow-gpu==2.0.0-alpha0
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
`tf.keras`는 케라스 API와 호환되는 어떤 코드라도 실행시킬 수 있지만 다음 사항을 유념하세요:* 최신 텐서플로 릴리스에 포함된 `tf.keras` 버전은 PyPI에 있는 최신 `keras` 버전과 같지 않을 수 있습니다. `tf.keras.__version__`을 확인해 보세요.* [모델의 가중치를 저장](weights_only)할 때 `tf.keras`는 기본적으로 [체크포인트 포맷](./checkpoints.md)을 사용합니다. HDF5를 사용하려면 `save_format='h5'`로 설정하세요. 간단한 모델 만들기 Sequential 모델케라스에서는 *층(layer)*을 조합하여 *모델(model)*을 만듭니다. 모델은 (일반적으로) 층의 그래프입니다. 가장 흔한 모델 구조는 층을 차례대로 쌓은 `tf.keras.Sequential` 모델입니다.간단한 완전 연결(fully-connected) 네트워크(즉, 다층 퍼셉트론(multi-layer perceptron))를 만들어 보겠습니다.
###Code
from tensorflow.keras import layers
model = tf.keras.Sequential()
# 64개의 유닛을 가진 완전 연결 층을 모델에 추가합니다:
model.add(layers.Dense(64, activation='relu'))
# 또 하나를 추가합니다:
model.add(layers.Dense(64, activation='relu'))
# 10개의 출력 유닛을 가진 소프트맥스 층을 추가합니다:
model.add(layers.Dense(10, activation='softmax'))
###Output
_____no_output_____
###Markdown
[여기](https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/quickstart/beginner.ipynb)에서 `Sequential` 모델을 어떻게 사용하는지 간단하지만 완전한 예제를 볼 수 있습니다.`Sequential`보다 더 고수준의 모델을 구성하는 방법을 배우려면 다음을 참고하세요:- [케라스 함수형 API 가이드](./functional.ipynb)- [클래스 상속을 통하여 층과 모델을 밑바닥부터 만드는 방법](./custom_layers_and_models.ipynb) 층 설정`tf.keras.layers` 아래의 클래스들은 일부 생성자 매개변수를 공통으로 가지고 있습니다:* `activation`: 층의 활성화 함수를 설정합니다. 이 매개변수에는 기본으로 제공되는 함수의 이름을 쓰거나 호출 가능한 객체를 지정할 수 있습니다. 기본값은 활성화 함수를 적용하지 않는 것입니다.* `kernel_initializer`와 `bias_initializer`: 층의 가중치(weight)(커널(kernel)과 절편(bias))를 초기화하는 방법입니다. 내장 함수나 호출 가능한 객체를 지정합니다. 기본값은 `"glorot_uniform"` 초기화입니다.* `kernel_regularizer`와 `bias_regularizer`: L1 또는 L2 규제(regularization)와 같이 층의 가중치(커널과 절편)에 적용할 규제 방법을 지정합니다. 기본값은 규제를 적용하지 않는 것입니다.다음 코드는 여러가지 생성자 매개변수를 사용하여 `tf.keras.layers.Dense` 층의 객체를 만드는 예입니다:
###Code
# 시그모이드 활성화 층을 만듭니다:
layers.Dense(64, activation='sigmoid')
# 또는 다음도 가능합니다:
layers.Dense(64, activation=tf.keras.activations.sigmoid)
# 커널 행렬에 L1 규제가 적용된 선형 활성화 층. 하이퍼파라미터 0.01은 규제의 양을 조절합니다:
layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01))
# 절편 벡터에 L1 규제가 적용된 선형 활성화 층. 하이퍼파라미터 0.01은 규제의 양을 조절합니다:
layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01))
# 커널을 랜덤한 직교 행렬로 초기화한 선형 활성화 층:
layers.Dense(64, kernel_initializer='orthogonal')
# 절편 벡터를 상수 2.0으로 설정한 선형 활성화 층:
layers.Dense(64, bias_initializer=tf.keras.initializers.Constant(2.0))
###Output
_____no_output_____
###Markdown
훈련과 평가 훈련 준비모델을 구성한 후 `compile` 메서드를 호출하여 학습 과정을 설정합니다:
###Code
model = tf.keras.Sequential([
# 64개의 유닛을 가진 완전 연결 층을 모델에 추가합니다:
layers.Dense(64, activation='relu', input_shape=(32,)),
# 또 하나를 추가합니다:
layers.Dense(64, activation='relu'),
# 10개의 출력 유닛을 가진 소프트맥스 층을 추가합니다:
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
`tf.keras.Model.compile`에는 세 개의 중요한 매개변수가 있습니다:* `optimizer`: 훈련 과정을 설정합니다. `tf.keras.optimizers.Adam`이나 `tf.keras.optimizers.SGD`와 같은 `tf.keras.optimizers` 아래의 옵티마이저 객체를 전달합니다. 기본 매개변수를 사용할 경우 `'adam'`이나 `'sgd'`와 같이 문자열로 지정할 수도 있습니다.* `loss`: 최적화 과정에서 최소화될 손실 함수(loss function)를 설정합니다. 평균 제곱 오차(`mse`)와 `categorical_crossentropy`, `binary_crossentropy` 등이 자주 사용됩니다. 손실 함수의 이름을 지정하거나 `tf.keras.losses` 모듈 아래의 호출 가능한 객체를 전달할 수 있습니다.* `metrics`: 훈련을 모니터링하기 위해 사용됩니다. 이름이나 `tf.keras.metrics` 모듈 아래의 호출 가능한 객체입니다.* 추가적으로 모델의 훈련과 평가를 즉시 실행하려면 `run_eagerly=True` 매개변수를 전달할 수 있습니다.다음 코드는 모델 훈련을 설정하는 몇 가지 예를 보여줍니다:
###Code
# 평균 제곱 오차로 회귀 모델을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
loss='mse', # 평균 제곱 오차
metrics=['mae']) # 평균 절댓값 오차
# 크로스엔트로피 손실 함수로 분류 모델을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
넘파이 데이터를 사용한 훈련데이터셋이 작은 경우 [넘파이](https://www.numpy.org/){:.external}(NumPy) 배열을 메모리에 적재하여 모델을 훈련하고 평가합니다. 모델은 `fit` 메서드를 통해서 훈련 데이터를 학습합니다:
###Code
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.fit(data, labels, epochs=10, batch_size=32)
###Output
_____no_output_____
###Markdown
`tf.keras.Model.fit`에는 세 개의 중요한 매개변수가 있습니다:* `epochs`: 훈련은 *에포크*(epoch)로 구성됩니다. 한 에포크는 전체 입력 데이터를 한번 순회하는 것입니다(작은 배치로 나누어 수행됩니다).* `batch_size`: 넘파이 데이터를 전달하면 모델은 데이터를 작은 배치로 나누고 훈련 과정에서 이 배치를 순회합니다. 이 정수 값은 배치의 크기를 지정합니다. 전체 샘플 개수가 배치 크기로 나누어 떨어지지 않으면 마지막 배치의 크기는 더 작을 수 있습니다.* `validation_data`: 모델의 프로토타입(prototype)을 만들 때는 검증 데이터(validation data)에서 간편하게 성능을 모니터링해야 합니다. 입력과 레이블(label)의 튜플을 이 매개변수로 전달하면 에포크가 끝날 때마다 추론 모드(inference mode)에서 전달된 데이터의 손실과 측정 지표를 출력합니다.다음이 `validation_data`를 사용하는 예입니다:
###Code
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
val_data = np.random.random((100, 32))
val_labels = np.random.random((100, 10))
model.fit(data, labels, epochs=10, batch_size=32,
validation_data=(val_data, val_labels))
###Output
_____no_output_____
###Markdown
tf.data 데이터셋을 사용한 훈련[데이터셋 API](./datasets.md)를 사용하여 대규모 데이터셋이나 복수의 장치로 확장시킬 수 있습니다. `fit` 메서드에 `tf.data.Dataset` 객체를 전달합니다:
###Code
# 예제 `Dataset` 객체를 만듭니다:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
# Dataset에서 `fit` 메서드를 호출할 때 `steps_per_epoch` 설정을 잊지 마세요.
model.fit(dataset, epochs=10, steps_per_epoch=30)
###Output
_____no_output_____
###Markdown
여기에서 `fit` 메서드는 `steps_per_epoch` 매개변수를 사용합니다. 다음 에포크로 넘어가기 전에 모델이 수행할 훈련 단계 횟수입니다. `Dataset`이 배치 데이터를 생성하기 때문에 `batch_size`가 필요하지 않습니다.`Dataset`은 검증 데이터에도 사용할 수 있습니다:
###Code
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
val_dataset = val_dataset.batch(32)
model.fit(dataset, epochs=10,
validation_data=val_dataset)
###Output
_____no_output_____
###Markdown
평가와 예측`tf.keras.Model.evaluate`와 `tf.keras.Model.predict` 메서드에는 넘파이 배열이나 `tf.data.Dataset`을 사용할 수 있습니다.주어진 데이터로 추론 모드의 손실이나 지표를 *평가*합니다:
###Code
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.evaluate(data, labels, batch_size=32)
model.evaluate(dataset, steps=30)
###Output
_____no_output_____
###Markdown
주어진 데이터로 추론 모드에서 마지막 층의 출력을 *예측*하여 넘파이 배열로 반환합니다:
###Code
result = model.predict(data, batch_size=32)
print(result.shape)
###Output
_____no_output_____
###Markdown
맞춤형 훈련 반복을 밑바닥부터 작성하는 방법을 포함하여 훈련과 평가에 대한 완전한 설명은 [훈련과 평가 가이드](./training_and_evaluation.ipynb)를 참고하세요. 고급 모델 만들기 함수형 API`tf.keras.Sequential` 모델은 단순히 층을 쌓은 것으로 임의의 구조를 표현할 수 없습니다. [케라스 함수형 API](./functional.ipynb)를 사용하면 다음과 같은 복잡한 모델 구조를 만들 수 있습니다:* 다중 입력 모델,* 다중 출력 모델,* 층을 공유하는 모델 (동일한 층을 여러번 호출합니다),* 데이터 흐름이 차례대로 진행되지 않는 모델 (예를 들면 잔차 연결(residual connections)).함수형 API로 모델을 만드는 방식은 다음과 같습니다:1. 하나의 층 객체는 호출 가능하고 텐서를 반환합니다.2. `tf.keras.Model` 객체를 정의하기 위해 입력 텐서와 출력 텐서를 사용합니다.3. 이 모델은 `Sequential` 모델과 동일한 방식으로 훈련됩니다.다음 코드는 함수형 API를 사용하여 간단한 완전 연결 네트워크를 만드는 예입니다:
###Code
inputs = tf.keras.Input(shape=(32,)) # 입력 플레이스홀더를 반환합니다.
# 층 객체는 텐서를 사용하여 호출되고 텐서를 반환합니다.
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
predictions = layers.Dense(10, activation='softmax')(x)
###Output
_____no_output_____
###Markdown
입력과 출력을 사용해 모델의 객체를 만듭니다.
###Code
model = tf.keras.Model(inputs=inputs, outputs=predictions)
# 컴파일 단계는 훈련 과정을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 5번의 에포크 동안 훈련합니다.
model.fit(data, labels, batch_size=32, epochs=5)
###Output
_____no_output_____
###Markdown
모델 클래스 상속`tf.keras.Model` 클래스를 상속하고 자신만의 정방향 패스(forward pass)을 정의하여 완전히 커스터마이징된 모델을 만들 수 있습니다. `__init__` 메서드에서 층을 만들어 클래스 객체의 속성으로 지정합니다. 정방향 패스는 `call` 메서드에 정의합니다.[즉시 실행](./eager.md)이 활성화되어 있을 때 정방향 패스를 명령형 프로그래밍 방식으로 작성할 수 있기 때문에 모델 클래스 상속이 매우 유용합니다.노트: 정방향 패스를 *항상* 명령형 프로그래밍 방식으로 실행하려면 `super` 객체의 생성자를 호출할 때 `dynamic=True`를 지정하세요.중요 포인트: 작업에 맞는 API를 사용하세요. 모델 클래스 상속은 유연성을 제공하지만 복잡도가 증가하고 사용자 오류가 발생할 가능성이 높아집니다. 가능한한 함수형 API를 사용하세요.다음 코드는 `tf.keras.Model`의 클래스를 상속하여 명령형 프로그래밍 방식으로 실행할 필요가 없는 정방향 패스를 구현한 예입니다:
###Code
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# 층을 정의합니다.
self.dense_1 = layers.Dense(32, activation='relu')
self.dense_2 = layers.Dense(num_classes, activation='sigmoid')
def call(self, inputs):
# 정방향 패스를 정의합니다.
# `__init__` 메서드에서 정의한 층을 사용합니다.
x = self.dense_1(inputs)
return self.dense_2(x)
###Output
_____no_output_____
###Markdown
새 모델 클래스의 객체를 만듭니다:
###Code
model = MyModel(num_classes=10)
# 컴파일 단계는 훈련 과정을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 5번의 에포크 동안 훈련합니다.
model.fit(data, labels, batch_size=32, epochs=5)
###Output
_____no_output_____
###Markdown
맞춤형 층맞춤형 층(custom layer)을 만들려면 `tf.keras.layers.Layer` 클래스를 상속하고 다음 메서드를 구현합니다:* `__init__`: 이 층에서 사용되는 하위 층을 정의할 수 있습니다.* `build`: 층의 가중치를 만듭니다. `add_weight` 메서드를 사용해 가중치를 추가합니다.* `call`: 정방향 패스를 구현합니다.다음 코드는 입력과 커널 행렬의 `matmul` 계산을 구현한 맞춤형 층의 예입니다:
###Code
class MyLayer(layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# 이 층에서 훈련할 가중치 변수를 만듭니다.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
###Output
_____no_output_____
###Markdown
맞춤형 층을 사용하여 모델을 만듭니다:
###Code
model = tf.keras.Sequential([
MyLayer(10),
layers.Activation('softmax')])
# 컴파일 단계는 훈련 과정을 설정합니다.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 5번의 에포크 동안 훈련합니다.
model.fit(data, labels, batch_size=32, epochs=5)
###Output
_____no_output_____
###Markdown
클래스 상속을 통해 맞춤형 층과 모델을 만드는 더 자세한 정보는 [맞춤형 층과 모델을 만드는 방법](./custom_layers_and_models.ipynb)을 참고하세요. 콜백콜백(callback)은 훈련하는 동안 모델의 동작을 변경하고 확장하기 위해 전달하는 객체입니다. 자신만의 콜백을 작성하거나 다음과 같은 내장 `tf.keras.callbacks`을 사용할 수 있습니다:* `tf.keras.callbacks.ModelCheckpoint`: 일정 간격으로 모델의 체크포인트를 저장합니다.* `tf.keras.callbacks.LearningRateScheduler`: 학습률(learning rate)을 동적으로 변경합니다.* `tf.keras.callbacks.EarlyStopping`: 검증 성능이 향상되지 않으면 훈련을 중지합니다.* `tf.keras.callbacks.TensorBoard`: [텐서보드](https://tensorflow.org/tensorboard)를 사용하여 모델을 모니터링합니다.`tf.keras.callbacks.Callback`을 사용하려면 모델의 `fit` 메서드에 전달합니다:
###Code
callbacks = [
# `val_loss`가 2번의 에포크에 걸쳐 향상되지 않으면 훈련을 멈춥니다.
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
# `./logs` 디렉토리에 텐서보드 로그를 기록니다.
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(val_data, val_labels))
###Output
_____no_output_____
###Markdown
저장과 복원 가중치`tf.keras.Model.save_weights`를 사용하여 모델의 가중치를 저장하고 복원합니다:
###Code
model = tf.keras.Sequential([
layers.Dense(64, activation='relu', input_shape=(32,)),
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# 가중치를 텐서플로의 체크포인트 파일로 저장합니다.
model.save_weights('./weights/my_model')
# 모델의 상태를 복원합니다.
# 모델의 구조가 동일해야 합니다.
model.load_weights('./weights/my_model')
###Output
_____no_output_____
###Markdown
기본적으로 모델의 가중치는 [텐서플로 체크포인트](../checkpoints.md) 파일 포맷으로 저장됩니다. 케라스의 HDF5 포맷으로 가중치를 저장할 수도 있습니다(다양한 백엔드를 지원하는 케라스 구현에서는 HDF5가 기본 설정입니다):
###Code
# 가중치를 HDF5 파일로 저장합니다.
model.save_weights('my_model.h5', save_format='h5')
# 모델의 상태를 복원합니다.
model.load_weights('my_model.h5')
###Output
_____no_output_____
###Markdown
설정모델 설정을 저장하면 가중치는 제외하고 모델의 구조를 직렬화합니다. 원본 모델을 정의한 코드가 없어도 저장된 설정을 사용하여 동일한 구조를 만들고 초기화할 수 있습니다. 케라스는 JSON과 YAML 직렬화 포맷을 지원합니다:
###Code
# 모델을 JSON 포맷으로 직렬화합니다.
json_string = model.to_json()
json_string
import json
import pprint
pprint.pprint(json.loads(json_string))
###Output
_____no_output_____
###Markdown
JSON 파일로부터 (완전히 새로 초기화된) 모델을 만듭니다.
###Code
fresh_model = tf.keras.models.model_from_json(json_string)
###Output
_____no_output_____
###Markdown
YAML 포맷으로 직렬화하려면 *텐서플로를 임포트하기 전에* `pyyaml`을 설치해야 합니다:
###Code
yaml_string = model.to_yaml()
print(yaml_string)
###Output
_____no_output_____
###Markdown
YAML 파일로부터 모델을 다시 만듭니다.
###Code
fresh_model = tf.keras.models.model_from_yaml(yaml_string)
###Output
_____no_output_____
###Markdown
주의: Model 클래스를 상속하여 만든 모델은 `call` 메서드의 본문에 파이썬 코드로 구조가 정의되어 있기 때문에 직렬화되지 않습니다. 전체 모델가중치와 모델 설정, 심지어 옵티마이저 설정까지 포함된 전체 모델을 파일에 저장할 수 있습니다. 모델의 중간 상태를 저장하고 나중에 원본 코드가 없어도 정확히 동일한 상태에서 훈련을 재개할 수 있습니다.
###Code
# 간단한 모델을 만듭니다.
model = tf.keras.Sequential([
layers.Dense(10, activation='softmax', input_shape=(32,)),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, batch_size=32, epochs=5)
# 전체 모델을 HDF5 파일로 저장합니다.
model.save('my_model.h5')
# 가중치와 옵티마이저를 포함하여 정확히 같은 모델을 다시 만듭니다.
model = tf.keras.models.load_model('my_model.h5')
###Output
_____no_output_____
###Markdown
케라스 모델의 저장과 직렬화에 대한 더 자세한 내용은 [모델 저장과 직렬화 가이드](./saving_and_serializing.ipynb)를 참고하세요. 즉시 실행[즉시 실행](./eager.md)은 연산을 즉각 평가하는 명령형 프로그래밍(imperative programming) 환경입니다. 케라스에서는 즉시 실행이 필수가 아니지만 `tf.keras`는 이를 지원합니다. 이 기능은 프로그램을 검사하고 디버깅하는데 유용합니다.모든 `tf.keras` 모델링 API는 즉시 실행과 호환됩니다. `Sequential`이나 함수형 API와 사용할 수 있지만 즉시 실행은 특히 *모델 상속*과 *맞춤형 층*을 만들 때 장점이 나타납니다. 이런 API는 (기존의 층을 조합하여 모델을 만드는 대신) 직접 정방향 패스의 코드를 작성하기 때문입니다.[즉시 실행 가이드](./eager.ipynbbuild_a_model)에서 맞춤형 훈련 반복과 `tf.GradientTape`를 케라스 모델에 같이 사용하는 예를 참고하세요. 또한 간단하지만 완전한 예제를 [여기](https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/quickstart/advanced.ipynb)에서 볼 수 있습니다. 분산 처리 다중 GPU`tf.keras` 모델은 `tf.distribute.Strategy`를 사용하여 다중 GPU에서 실행할 수 있습니다. 이 API는 기존 코드를 거의 수정하지 않고 다중 GPU에서 훈련을 분산시킬 수 있습니다.현재는 `tf.distribute.MirroredStrategy`가 유일하게 지원되는 분산 전략입니다. `MirroredStrategy`는 한 대의 장치에서 계산 결과를 모두 수집하는 방식인 그래프 내 복제(in-graph replication)를 수행합니다. `distribute.Strategy`를 사용하려면 `Strategy`의 `.scope()` 안에 옵티마이저 객체 생성, 모델 구성, 컴파일 단계를 포함시킨 다음 모델을 훈련합니다.다음 코드는 한 대의 컴퓨터에서 다중 GPU를 사용해 `tf.keras.Model`을 분산 처리하는 예입니다.먼저, `MirroredStrategy`의 `scope()` 안에서 모델을 정의합니다:
###Code
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10,)))
model.add(layers.Dense(1, activation='sigmoid'))
optimizer = tf.keras.optimizers.SGD(0.2)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
###Output
_____no_output_____
###Markdown
그다음, 보통 때와 같은 데이터로 모델을 훈련합니다:
###Code
x = np.random.random((1024, 10))
y = np.random.randint(2, size=(1024, 1))
x = tf.cast(x, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.shuffle(buffer_size=1024).batch(32)
model.fit(dataset, epochs=1)
###Output
_____no_output_____ |
AssignmentDay05.ipynb | ###Markdown
1 :Write a Python program to find the first 20 non-even prime natural numbers.
###Code
for n in range(1,74):
if n>1:
for i in range(2,n):
if n%i==0:
break
else:
print(n)
###Output
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
###Markdown
2 :Write a Python program to implement 15 functions of string. 1.The capitalize() function used to convert the first letter as capital.
###Code
str="raji"
x=str.capitalize()
print(x)
###Output
Raji
###Markdown
2.The casefold() function used to convert the capitalized letters to lower case
###Code
str="RAJI"
x=str.casefold()
print(x)
###Output
raji
###Markdown
3.rfind() is one of the string functions which gives the highest index (i.e)the index of the last letter that we entered as parameter.If the parameter not found then it returns -1.
###Code
str="Lets upgrade, our selves. "
x=str.rfind("our")
print(x)
str="Lets upgrade, our selves. "
x=str.rfind("is")
print(x)
###Output
-1
###Markdown
4.The swapcase() string function is used to convert the uppercase to lowercase and lowercase to uppercase
###Code
str="LetsUpGrAde"
x=str.swapcase()
print(x)
###Output
lETSuPgRaDE
###Markdown
5.The isdigit() function returns true if the given string has integer values.
###Code
str="2848"
x=str.isdigit()
x
###Output
_____no_output_____
###Markdown
6.The rspilt() is used to split the given string into listed item.
###Code
str="Lets upgrade together"
x=str.rsplit()
x
###Output
_____no_output_____
###Markdown
7.The strip() function used to remove all the blank spaces from the given input
###Code
str=" green "
x=str.strip()
print(x,"is my favourite colour")
###Output
green is my favourite colour
###Markdown
8.This isalpha() function is used to check whether the given string contains only alphabets.
###Code
str="Letssupgrde"
x=str.isalpha()
x
###Output
_____no_output_____
###Markdown
9.The isalnum() functions used to check whether the given string is alphanumeric.
###Code
str="Letsupgrade2020"
x=str.isalnum()
x
###Output
_____no_output_____
###Markdown
10.The find() function used find the index of given parameter if there are more than one text matches then it consider the first text.If we give word as parameter then it consider the first letter of the given word.
###Code
str="Lets upgrade together"
x=str.find("t")
x
###Output
_____no_output_____
###Markdown
11.A string is considered a valid identifier if it only contains alphanumeric letters (a-z) and (0-9), or underscores (_).Otherwise it returns false
###Code
str="Letsupgrade"
x=str.isidentifier()
x
###Output
_____no_output_____
###Markdown
12.This isspace() function is used to check whether the given input has only white spaces.If it is so,it returs true.Otherwise it returns false.
###Code
str=" "
x=str.isspace()
x
###Output
_____no_output_____
###Markdown
13.The partition() function searches for specific string and partitioned from other string.The output is in the form of tuple.
###Code
str="I started to learn python"
x=str.partition("python")
x
###Output
_____no_output_____
###Markdown
14.The zfill() method adds zeros(0) at the beginning of the string untill it reaches the specific length.If the length of the a parameter is less then the length of the string,no filling is done.
###Code
str="200"
x=str.zfill(20)
x
###Output
_____no_output_____
###Markdown
15.This startswith() method returns true if the string is starts with the given parameter
###Code
str="Welcome to Letsupgrade"
x=str.startswith("Welcome")
x
###Output
_____no_output_____
###Markdown
3:Write a Python program to check if the given string is a Palindrome or Anagram or None of them. Display the message accordingly to the user.
###Code
str=input("Enter your string")
if list(reversed(str))==list(str):
print("palindrome")
else:
print("not a palindrome")
str1=input("Enter first string:")
str2=input("Enter second string:")
if(sorted(str1)==sorted(str2)):
print("The given strings are anagrams.")
else:
print("The given strings aren't anagrams.")
###Output
Enter first string:fired
Enter second string:fried
The given strings are anagrams.
###Markdown
4:Write a Python's user-defined function that removes all the additional characters from the string and convert it finally to lower case using built-in lower(). eg: If the string is "Dr. Darshan Ingle @AIML Trainer", then the output be "drdarshaningleaimltrainer".
###Code
unwanted=[".","!","@","%","&","-"]
str="K.Raji@Student&Explorer!"
print("The given string is :",str)
for i in unwanted:
str=str.replace(i,'')
str=str.lower()
print("The output is",str)
###Output
The given string is : K.Raji@Student&Explorer!
The output is krajistudentexplorer
|
experiments/snar_benchmark/visualization.ipynb | ###Markdown
Multiobjective Transforms ChimeraChimera is a scalarisation technique created by Hase et al.
###Code
def plot_chimera(hierarchy, ax):
# Setup
exp = SnarBenchmark()
chim = Chimera(exp.domain, hierarchy)
r = Random(exp.domain,
random_state=np.random.RandomState(100))
# Get data and run chimera transformation
experiments = r.suggest_experiments(50)
results =exp.run_experiments(experiments)
inputs, outputs = chim.transform_inputs_outputs(results)
# Plot
x, y, z = results['sty'], results['e_factor'], outputs['chimera']
ax.scatter(x, y, z,
s=100, marker='o')
base = np.min(z)
for xi, yi, zi in zip(x, y, z):
line=art3d.Line3D(*zip((xi, yi, base), (xi, yi, zi)), markevery=(1, 1))
ax.add_line(line)
ax.set_xlabel('sty')
ax.set_ylabel('E-factor')
ax.set_zlabel('Chimera')
ax.view_init(10, 60)
return ax
# Compare different tolerances
hierarchies = [{'sty': {'hierarchy': 0, 'tolerance': 1},
'e_factor': {'hierarchy': 1, 'tolerance': 1}},
{'sty': {'hierarchy': 0, 'tolerance': 0.5},
'e_factor': {'hierarchy': 1, 'tolerance': 0.5}},
{'sty': {'hierarchy': 0, 'tolerance': 1.0},
'e_factor': {'hierarchy': 1, 'tolerance': 0.5}},
{'sty': {'hierarchy': 0, 'tolerance': 0.5},
'e_factor': {'hierarchy': 1, 'tolerance': 1.0}}
]
fig = plt.figure(figsize=(10,10))
for i, hierarchy in enumerate(hierarchies):
# First plot
ax = fig.add_subplot(2,2, i+1, projection='3d')
ax.xaxis.set_major_locator(ticker.MultipleLocator(2000))
ax.yaxis.set_major_locator(ticker.MultipleLocator(20))
plot_chimera(hierarchy, ax)
ax.set_title(f"STY_tol={hierarchy['sty']['tolerance']}, E-factor_tol={hierarchy['e_factor']['tolerance']}")
fig.tight_layout()
fig.savefig('figures/chimera_comparison.png', dpi=300)
###Output
_____no_output_____
###Markdown
Note that chimera turns everyhting into a minimization problem. Tolerance is probably not the best descriptor of how this works; a better term would be weighting. Higher tolerance values mean you weight an objective more. The relative weighting is what matters. (1,1) and (0.5,0.5) are identical. However, decreasing the weighting of E-factor with minimal change in STY results in STY being weighted much more heavily. Custom ASF
###Code
def plot_custom_asf(ax):
# Setup
exp = SnarBenchmark()
asf = MultitoSingleObjective(exp.domain,expression='-sty/1e4+e_factor/100',maximize=False)
r = Random(exp.domain,
random_state=np.random.RandomState(100))
# Get data and run chimera transformation
experiments = r.suggest_experiments(50)
results =exp.run_experiments(experiments)
inputs, outputs = asf.transform_inputs_outputs(results)
# Plot
x, y, z = results['sty'], results['e_factor'], outputs['scalar_objective']
ax.scatter(x, y, z,
s=100, marker='o')
base = np.min(z)
for xi, yi, zi in zip(x, y, z):
line=art3d.Line3D(*zip((xi, yi, base), (xi, yi, zi)), markevery=(1, 1))
ax.add_line(line)
ax.set_xlabel('sty')
ax.set_ylabel('E-factor')
ax.set_zlabel('Custom ASF')
ax.view_init(10, 60)
return ax
fig =plt.figure(figsize=(5,5))
ax = fig.add_subplot(1,1,1, projection='3d')
ax.xaxis.set_major_locator(ticker.MultipleLocator(2000))
ax.yaxis.set_major_locator(ticker.MultipleLocator(20))
plot_custom_asf(ax)
fig.tight_layout()
fig.savefig('figures/custom_asf.png', dpi=300)
###Output
_____no_output_____
###Markdown
Hypervolume Improvement
###Code
from summit.utils.multiobjective import hypervolume, pareto_efficient
def plot_hvi(ax):
# Setup
exp = SnarBenchmark()
tsemo = TSEMO2(exp.domain)
r = Random(exp.domain,
random_state=np.random.RandomState(100))
# Get data and run chimera transformation
experiments = r.suggest_experiments(50)
results = exp.run_experiments(experiments)
inputs, outputs = tsemo.transform.transform_inputs_outputs(results)
# Extra points for evaluation
new_experiments = r.suggest_experiments(50)
samples = exp.run_experiments(new_experiments)
#Make STY negative for minimization
y = outputs.to_numpy()
y[:, 0] = -1.0*y[:,0]
samples_inputs, samples_outputs = tsemo.transform.transform_inputs_outputs(samples)
y_samples = samples_outputs.to_numpy()
y_samples[:,0] = -1.0*y_samples[:,0]
# Initial pareto front
y_front, _ = pareto_efficient(y, maximize=False)
#Reference point
r = np.max(y_front, axis=0)+0.01*(np.max(y_front, axis=0)-np.min(y_front,axis=0))
# Base hypervolume
hv_base = HvI.hypervolume(y_front, r)
# Hypervolume Improvement
hypervolumes = []
hvi = []
for sample in y_samples:
sample = np.atleast_2d(sample)
y_new = np.append(y_front,sample, axis=0)
y_front_new, _ = pareto_efficient(y_new, maximize=False)
hv = HvI.hypervolume(y_front_new, r)
hypervolumes.append(hv)
hvi.append(hv-hv_base)
# Plot
x, y, z = y_samples[:,0], y_samples[:,1], hvi
ax.scatter(x, y, z,
s=100, marker='o')
for xi, yi, zi in zip(x, y, z):
line=art3d.Line3D(*zip((xi, yi, 0), (xi, yi, zi)), markevery=(1, 1))
ax.add_line(line)
ax.set_xlabel('sty')
ax.set_ylabel('E-factor')
ax.set_zlabel('Hypervolume Improvement')
ax.view_init(0, 60)
return ax, hypervolumes, hvi
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111, projection='3d')
ax, hv, hvi = plot_hvi(ax)
fig.savefig('hypervolume')
###Output
_____no_output_____
###Markdown
Snar Experiments
###Code
# IDs of Neptune Experiments
ids = [f'SUM-{i}' for i in range(4166,4266)] + \
[f'SUM-{i}' for i in range(3916,4016)] + \
[f'SUM-{i}' for i in range(2600,3262)] + \
[f'SUM-{i}' for i in range(4026,4029)]
# Gather experiments from Neptune. This can be slow.
results = PlotExperiments("sustainable-processes/summit",
trajectory_length=50,
experiment_ids=ids,
tag=['snar_experiment'],
state=['succeeded'])
# Save to csv
# results.to_csv("data/snar_results.csv")
###Output
WARNING: There is a new version of neptune-client 0.4.125 (installed: 0.4.118).
###Markdown
Let's make an interactive plot of the hypervolume trajectories, for exploration sake.
###Code
fig = results.plot_hv_trajectories(min_terminal_hv_avg=1000,
include_experiment_ids=True,plot_type='plotly')
###Output
_____no_output_____
###Markdown
You can double click on an item in the legend to solo it. I'm going to increase the threshold for the publication figure, so I only include the top three strategies.
###Code
# Create figure
fig = plt.figure(figsize=(15, 5))
fig.subplots_adjust(wspace=0.35,)
# Hypervolume, Time bar plot
ax = fig.add_subplot(1, 2, 1)
results.time_hv_bar_plot(ax)
ax.text(0.95,0.95, '(a)',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes,
fontdict={'fontsize': 12})
# Hypervolume trajectory plot
ax = fig.add_subplot(1, 2, 2)
ax, legend = results.plot_hv_trajectories(min_terminal_hv_avg=1900,
include_experiment_ids=False,plot_type='matplotlib',
ax=ax)
ax.legend(loc="upper left")
ax.text(0.95,0.95, '(b)',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes,
fontdict={'fontsize': 12})
# Save figure
fig.savefig("figures/snar_hv_time_tradeoff.png", dpi=300, bbox_inches='tight')
# TODO: print out caption with transform settings.
###Output
_____no_output_____
###Markdown
Above is a figure to examine the pros/cons of different optimisation strategies. What you see is that the Bayesian optimisation strategies perform best, finding the most optimal points in the alotted number of iterations. However, that comes at the price of 3 orders of magnitude greater computation cost. Furthermore, the experiments were run on a HPC with up to 32 threads dedicated for each strategy, so our experience is that the runtime increases by ~10x on consumer hardware. The longer computation time is likely acceptable inc the case of TSEMO given the signifcant improvement in performance, but it might not make since in the case of GRYFFIN or the SOBO (GpyOpt) strategies..
###Code
results.parallel_plot("SUM-2688")
###Output
_____no_output_____
###Markdown
I made the above parallel plot to show how the decision variables correspond with the objectives. However, I don't think it's clean enough for a publication.Below, are the pareto plots for the run from each combination of strategy and transform that had the highest terminal hypervolume.
###Code
fig = results.best_pareto_grid(ncols=4, figsize=(15, 30))
fig.savefig('figures/snar_pareto_fronts.png', dpi=300, bbox_inches='tight')
###Output
_____no_output_____
###Markdown
In the publication, I would like to have one figure that gently introduces multiobjective optimisation and hypervolume.
###Code
COLORS = [
(165, 0, 38),
(215, 48, 39),
(244, 109, 67),
(253, 174, 97),
(254, 224, 144),
(255, 255, 191),
(224, 243, 248),
(171, 217, 233),
(116, 173, 209),
(69, 117, 180),
(49, 54, 149),
]
COLORS = np.array(COLORS) / 256
CMAP = mpl.colors.ListedColormap(COLORS)
fontsize = 13
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# Find run with maximum terminal hypervolume
df = results.df
max_index = df["terminal_hypervolume"].argmax()
experiment_id = df.iloc[max_index]['experiment_id']
r = results.runners[experiment_id]
# Pareto plot
ax, im = r.experiment.pareto_plot(colorbar=True, ax=axes[0])
cb = fig.colorbar(im, ax=ax, )
cb.set_label(label='Number of Experiments', size='large')
ax.set_ylim(0, 300)
ax.set_xlabel(r"Space Time Yield / $kg \; m^{-3} h^{-1}$", fontsize=fontsize)
ax.set_ylabel("E-factor", fontsize=fontsize)
axes[0].tick_params(direction='in')
axes[0].text(0.05,0.95, '(a)',
horizontalalignment='center',
verticalalignment='center',
transform = axes[0].transAxes,
fontdict={'fontsize': 12})
# Hypervolume plot
data = r.experiment.data[['sty', 'e_factor']].to_numpy()
data[:, 0] *= -1 # make it a minimzation problem
hv_trajectory = np.zeros([results.trajectory_length])
for i in range(results.trajectory_length):
y_front, _ = pareto_efficient(data[0 : i + 1, :], maximize=False)
hv_trajectory[i] = hypervolume(y_front, ref=[-2957, 10.7])
t = np.arange(1, results.trajectory_length + 1)
axes[1].plot(t, hv_trajectory, c=COLORS[-1])
axes[1].set_xlabel('Experiments', fontsize=fontsize)
axes[1].set_ylabel('Hypervolume', fontsize=fontsize)
axes[1].tick_params(direction='in')
axes[1].text(0.05,0.95, '(b)',
horizontalalignment='center',
verticalalignment='center',
transform = axes[1].transAxes,
fontdict={'fontsize': 12})
# Save plot
fig.savefig('figures/snar_tsemo_pareto_hv.png', dpi=300)
###Output
_____no_output_____
###Markdown
Finally, I want to make make a table that summarises the all the combinations of strategies and multiobjective transforms.
###Code
df = results.df.copy()
df["computation_t"] = df["computation_t"]/results.trajectory_length
# Group repeats
df = df.drop(columns=["expression", "maximize", "noise_level"])
by=[
"strategy_name",
"transform_name",
"sty_tolerance",
"e_factor_tolerance",
"batch_size",
"num_initial_experiments",
]
grouped_df = df.groupby(
by=by,
dropna=False,
).head(20).groupby(by, dropna=False)
# Take mean and standard deviation
stds = grouped_df.std().round(0)
counts = grouped_df.count()['experiment_id']
grouped_df = grouped_df.mean().round(0)
# Count the number of repeats
for col in ["terminal_hypervolume", 'computation_t']:
grouped_df[col] = [f"{m}$\pm${s}"
for m, s in zip(grouped_df[col].values,
stds[col].values)
]
grouped_df['Repeats'] = counts
# Rename column headers and clean up index
grouped_df.rename(columns={"terminal_hypervolume": "Terminal hypervolume",
"computation_t": "Time per iter. (s)"},
inplace=True)
grouped_df.index.rename(["Strategy", "Transform","STY tolerance",
"E-factor tolerance", "batch_size", "num_initial_experiments"],
inplace=True)
grouped_df.index = grouped_df.index.droplevel(["batch_size", "num_initial_experiments"])
#Replace Nas
grouped_df = grouped_df.fillna('-').round(0)
# Disiplay
grouped_df
print("Number of experiments for this benchmark:", len(grouped_df)*20*50)
# Convert the table to Latex format
latex_str = grouped_df.to_latex(index=True, escape=False, bold_rows=False)
latex_str = latex_str.replace("MultitoSingleObjective", "Custom")
latex_str = latex_str.replace('NaN', '-')
print(latex_str)
###Output
\begin{tabular}{llllllr}
\toprule
& & & & Terminal hypervolume & Time per iter. (s) & Repeats \\
Strategy & Transform & STY tolerance & E-factor tolerance & & & \\
\midrule
DRO & Chimera & 0.5 & 0.5 & 10.0$\pm$29.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 2.0$\pm$7.0 & 0.0$\pm$0.0 & 20 \\
& & 1.0 & 0.5 & 0.0$\pm$2.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 7.0$\pm$29.0 & 0.0$\pm$0.0 & 20 \\
& Custom & - & - & 0.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
GRYFFIN & Chimera & 0.5 & 0.5 & 669.0$\pm$1132.0 & 79.0$\pm$11.0 & 20 \\
& & & 1.0 & 1449.0$\pm$2243.0 & 78.0$\pm$11.0 & 20 \\
& & 1.0 & 0.5 & 1715.0$\pm$1766.0 & 106.0$\pm$19.0 & 20 \\
& & & 1.0 & 1959.0$\pm$1545.0 & 87.0$\pm$12.0 & 20 \\
& Custom & - & - & 528.0$\pm$1048.0 & 89.0$\pm$10.0 & 20 \\
NelderMead & Chimera & 0.5 & 0.5 & 0.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 0.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& & 1.0 & 0.5 & 0.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 0.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& Custom & - & - & 43.0$\pm$108.0 & 0.0$\pm$0.0 & 20 \\
Random & Transform & - & - & 1032.0$\pm$1315.0 & 0.0$\pm$0.0 & 20 \\
SNOBFIT & Chimera & 0.5 & 0.5 & 1095.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 1095.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& & 1.0 & 0.5 & 1095.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 1095.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
& Custom & - & - & 0.0$\pm$0.0 & 0.0$\pm$0.0 & 20 \\
SOBO & Chimera & 0.5 & 0.5 & 634.0$\pm$1049.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 593.0$\pm$876.0 & 0.0$\pm$0.0 & 20 \\
& & 1.0 & 0.5 & 1414.0$\pm$1599.0 & 0.0$\pm$0.0 & 20 \\
& & & 1.0 & 786.0$\pm$1325.0 & 0.0$\pm$0.0 & 20 \\
& Custom & - & - & 2013.0$\pm$2155.0 & 0.0$\pm$0.0 & 20 \\
TSEMO & Transform & - & - & 5803.0$\pm$2659.0 & 42.0$\pm$1.0 & 20 \\
\bottomrule
\end{tabular}
###Markdown
SnAr Benchmark Visualization
###Code
%load_ext autoreload
%autoreload 2
from pareto_front import NSGAII, DomainWrapper
from snar_experiment_visualization import PlotExperiments
from summit.benchmarks import SnarBenchmark
from summit.utils.dataset import DataSet
from summit.utils.multiobjective import hypervolume, pareto_efficient
from summit.strategies import Random
from summit.strategies import Chimera, MultitoSingleObjective
from pymoo.model.problem import Problem
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.factory import get_sampling, get_crossover, get_mutation
from pymoo.optimize import minimize
from pymoo.factory import get_termination
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import mpl_toolkits.mplot3d.art3d as art3d
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from hiplot import Experiment
import plotly
import plotly.express as px
import numpy as np
from math import log
import numpy as np
import json
from pandas.plotting import parallel_coordinates
import pandas as pd
###Output
_____no_output_____
###Markdown
Pareto Front RandomHere, I am trying to used random search to determine the pareto front. I really do not think that works well though.
###Code
def determine_pareto_front(n_points=5000, random_seed=100):
exp = SnarBenchmark()
rand = Random(exp.domain,
random_state=np.random.RandomState(random_seed))
experiments = rand.suggest_experiments(n_points)
exp.run_experiments(experiments)
return exp
exp = determine_pareto_front(n_points=int(2e4))
fig, ax = exp.pareto_plot()
fig, ax = exp.pareto_plot()
names = [r'Space Time Yield ($kg \; m^{-3} h^{-1}$)', 'E-factor']
ax.set_xlabel(names[0])
ax.set_ylabel(names[1])
ax.set_ylim(9, 11)
ax.tick_params(direction='in')
###Output
_____no_output_____
###Markdown
NSGAII PlatypusHere, I am using NSGA to determine the pareto front.
###Code
exp = SnarBenchmark()
exp.reset()
optimizer = NSGAII(exp)
r = optimizer.optimize(iterations=int(1e4))
fig, ax = exp.pareto_plot()
names = [r'Space Time Yield ($kg \; m^{-3} h^{-1}$)', 'E-factor']
ax.set_xlabel(names[0], fontsize=12)
ax.set_ylabel(names[1], fontsize=12)
_ = ax.set_ylim(9.3, 10.0)
print("Size of the pareto front:", len(exp.pareto_data))
data = exp.pareto_data[['sty', 'e_factor']].to_numpy()
data[:, 0] = -1.0*data[:, 0]
means = np.mean(data, axis=0)
stds = np.std(data, axis=0)
data_std = (data-means)/stds
plt.scatter(data_std[:,0], data_std[:,1])
HvI.hypervolume(data_std, ref=[1.5,2])
###Output
_____no_output_____
###Markdown
NSGA-II Pymoo
###Code
# Set up problem
exp = SnarBenchmark()
exp.reset()
problem = DomainWrapper(exp)
# Set up optimisation
generations = 100
pop_size = 100
optimizer = NSGA2(pop_size=pop_size)
termination = get_termination("n_gen", generations)
# Run optimisation
res = minimize(
problem, optimizer, termination, seed=1, verbose=True
)
exp.data.to_csv('pareto_front_snar.csv')
fig, ax = plt.subplots()
exp.pareto_plot(ax=ax)
ax.set_xlim (2000, 1.2e4)
ax.set_ylim(9.2, 11)
names = [r'Space Time Yield ($kg \; m^{-3} h^{-1}$)', 'E-factor']
ax.set_xlabel(names[0])
ax.set_ylabel(names[1])
ax.tick_params(direction='in')
fig.savefig('figures/pareto_front_snar.png', dpi=300)
fig.savefig('figures/pareto_front_snar.svg')
pareto_data = exp.pareto_data[['sty', 'e_factor']].to_numpy()
pareto_data[:, 0] *= -1.0 #Convert to minimization
nadir = np.max(pareto_data, axis=0)
print("Reference point:", nadir)
HvI.hypervolume(pareto_data, nadir)
###Output
_____no_output_____
###Markdown
Visualize Sensitivity
###Code
ds = DataSet.read_csv('pareto_front_snar.csv')
data_std_arr = ds.standardize()
data = pd.DataFrame(data_std_arr, columns=ds.data_columns)
# parallel_coordinates(data.iloc[9000:int(1e4)], 'strategy', color=("#dcdfe6"), alpha=0.05)
data_log = ds[ds.data_columns].copy()
data_log[('sty', 'DATA')]= data_log['sty'].apply(log)
data_log[('e_factor', 'DATA')]= data_log['e_factor'].apply(log)
data_std_arr = data_log.standardize()
data_log = pd.DataFrame(data_std_arr, columns=ds.data_columns)
Experiment.from_iterable(data_log.to_dict(orient='records')).display()
data_log_new = data_log.copy()
data_log_new['strategy'] = 'NSGAII'
parallel_coordinates(data_log_new[data_log['e_factor']<-0.5],
"strategy",
color=("#dcdfe6"),
alpha=0.05,
xticks=[r'$\tau$', 'Equiv 1', 'Conc 2', 'Temperature', 'STY'])
###Output
_____no_output_____ |
tests/creator/creator_ml.ipynb | ###Markdown
Normalizer
###Code
from optimus.ml import feature as fe
t.create(fe, "normalizer", None, 'df', None, source_df, input_cols=["features"], p=2.0)
# Problems with precision
t.delete("normalizer")
t.run()
###Output
Creating file ../test_df_ml_2.py
Done
###Markdown
Vector Assembler
###Code
t.create(fe, "vector_assembler", None, 'df', None, source_df, input_cols=["id", "x", "y"])
###Output
Creating test_vector_assembler() test function...
###Markdown
Onehot encoder
###Code
# Creating DataFrame
data = [
(0, "a"),
(1, "b"),
(2, "c"),
(3, "a"),
(4, "a"),
(5, "c")
]
df = op.spark.createDataFrame(data,["id", "category"])
t.create(fe, "one_hot_encoder", None, 'df', None, source_df, input_cols=["id"])
t.run()
###Output
Creating file ../test_df_ml_2.py
Done
|
code/import food.ipynb | ###Markdown
https://www.google.com/search?sxsrf=APq-WBvDdFmieoxx07YBWdTETBH6j2O3Cw:1646563899712&q=food+emissions+dataset&sa=X&ved=2ahUKEwjCtt3CqLH2AhXmSfEDHaczAm0QgwN6BAgBEAE&biw=960&bih=919&dpr=1https://ourworldindata.org/explorers/food-footprints?facet=none&country=Beef+%28beef+herd%29~Lamb+%26+Mutton~Beef+%28dairy+herd%29~Prawns+%28farmed%29~Cheese~Pig+Meat~Poultry+Meat~Eggs~Rice~Tofu+%28soybeans%29~Milk~Tomatoes~Maize~Wheat+%26+Rye~Peas~Bananas~Potatoes~Nuts&Environmental+impact=Carbon+footprint&Metric=Per+kilogram&By+stage+of+supply+chain=falsehttps://figshare.com/articles/dataset/EDGAR-FOOD_emission_data/13476666https://myemissions.green/food-carbon-footprint-calculator/https://www.kaggle.com/selfvivek/choose-your-food-wisely/datahttps://www.kaggle.com/amandaroseknudsen/foodproductemissions/version/1https://eaternity.org/foodprint/databasehttps://www.nature.com/articles/s41597-021-00909-8https://figshare.com/articles/dataset/SU-EATABLE_LIFE_a_comprehensive_database_of_carbon_and_water_footprints_of_food_commodities/13271111
###Code
food = [{"food_id":"504","food_category":"Cupboard","food":"Allspice","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"482","food_category":"Cupboard","food":"Almond extract","serving_weight":"5","serving_desc":"1 teaspoon"},{"food_id":"190","food_category":"Nuts and seeds","food":"Almonds","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"291","food_category":"Drinks","food":"Americano","serving_weight":"120","serving_desc":"Small Americano"},{"food_id":"58","food_category":"Fish and seafood","food":"Anchovies","serving_weight":"80","serving_desc":"About 4 tablespoons"},{"food_id":"41","food_category":"Drinks","food":"Apple juice","serving_weight":"150","serving_desc":"One small glass"},{"food_id":"513","food_category":"Sauces and dressings","food":"Apple sauce","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"99","food_category":"Fruit","food":"Apples","serving_weight":"80","serving_desc":"One apple"},{"food_id":"100","food_category":"Fruit","food":"Apples - dried","serving_weight":"30","serving_desc":"One heaped serving spoon"},{"food_id":"101","food_category":"Fruit","food":"Apples - frozen","serving_weight":"80","serving_desc":"One apple"},{"food_id":"499","food_category":"Fruit","food":"Apricot","serving_weight":"0","serving_desc":""},{"food_id":"534","food_category":"Vegetables","food":"Artichoke","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"225","food_category":"Vegetables","food":"Asparagus","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"226","food_category":"Vegetables","food":"Asparagus - canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"227","food_category":"Vegetables","food":"Asparagus - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"228","food_category":"Vegetables","food":"Aubergine","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"102","food_category":"Fruit","food":"Avocados","serving_weight":"80","serving_desc":"Half an avocado"},{"food_id":"168","food_category":"Meat and alternatives","food":"Bacon","serving_weight":"75","serving_desc":"2 slices"},{"food_id":"1","food_category":"Bakery","food":"Bagel","serving_weight":"85","serving_desc":"1 bagel"},{"food_id":"2","food_category":"Bakery","food":"Baguette","serving_weight":"70","serving_desc":"1\/2 small baguette"},{"food_id":"212","food_category":"Processed food","food":"Baked beans","serving_weight":"200","serving_desc":"Half a standard 400g can"},{"food_id":"425","food_category":"Cupboard","food":"Baking powder","serving_weight":"4","serving_desc":"1 teaspoon"},{"food_id":"103","food_category":"Fruit","food":"Bananas","serving_weight":"80","serving_desc":"One banana"},{"food_id":"104","food_category":"Fruit","food":"Bananas - chipped","serving_weight":"30","serving_desc":"One handful "},{"food_id":"105","food_category":"Fruit","food":"Bananas - dried","serving_weight":"0","serving_desc":""},{"food_id":"354","food_category":"Sauces and dressings","food":"Barbeque sauce","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"146","food_category":"Grains, pulses, and legumes","food":"Barley","serving_weight":"0","serving_desc":""},{"food_id":"439","food_category":"Cupboard","food":"Basil","serving_weight":"0","serving_desc":""},{"food_id":"446","food_category":"Cupboard","food":"Bay leaves","serving_weight":"0","serving_desc":""},{"food_id":"169","food_category":"Meat and alternatives","food":"Beef","serving_weight":"125","serving_desc":"Use the size of pack as a guide"},{"food_id":"275","food_category":"Processed food","food":"Beef Lasagne - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"273","food_category":"Processed food","food":"Beef Roast - ready meal","serving_weight":"360","serving_desc":"Standard-sezed ready meal"},{"food_id":"42","food_category":"Drinks","food":"Beer bottle","serving_weight":"500","serving_desc":"1 bottle"},{"food_id":"43","food_category":"Drinks","food":"Beer can","serving_weight":"330","serving_desc":"1 can"},{"food_id":"229","food_category":"Vegetables","food":"Beetroot","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"230","food_category":"Vegetables","food":"Beetroot - pickled","serving_weight":"80","serving_desc":""},{"food_id":"517","food_category":"Fruit","food":"Berries","serving_weight":"80","serving_desc":""},{"food_id":"424","food_category":"Cupboard","food":"Bicarbonate of soda","serving_weight":"4","serving_desc":"1 teaspoon"},{"food_id":"12","food_category":"Biscuits and confectionery","food":"Biscuits","serving_weight":"0","serving_desc":""},{"food_id":"430","food_category":"Grains, pulses, and legumes","food":"Black beans","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"432","food_category":"Cupboard","food":"Black pepper","serving_weight":"0","serving_desc":""},{"food_id":"55","food_category":"Drinks","food":"Black tea","serving_weight":"400","serving_desc":"Normal-sized cup of tea."},{"food_id":"106","food_category":"Fruit","food":"Blackberries","serving_weight":"80","serving_desc":""},{"food_id":"107","food_category":"Fruit","food":"Blackberries - frozen","serving_weight":"80","serving_desc":""},{"food_id":"108","food_category":"Fruit","food":"Blueberries","serving_weight":"80","serving_desc":""},{"food_id":"109","food_category":"Fruit","food":"Blueberries - frozen","serving_weight":"80","serving_desc":""},{"food_id":"359","food_category":"Sauces and dressings","food":"Bolognese sauce","serving_weight":"175","serving_desc":"Half a jar"},{"food_id":"44","food_category":"Drinks","food":"Bottled water","serving_weight":"500","serving_desc":"1 bottle"},{"food_id":"327","food_category":"Drinks","food":"Brandy","serving_weight":"50","serving_desc":"two 25ml shots"},{"food_id":"3","food_category":"Bakery","food":"Bread","serving_weight":"80","serving_desc":"2 medium slices"},{"food_id":"365","food_category":"Bakery","food":"Bread - brown","serving_weight":"80","serving_desc":"2 medium slices"},{"food_id":"366","food_category":"Bakery","food":"Bread - rye","serving_weight":"80","serving_desc":"2 medium slices"},{"food_id":"364","food_category":"Bakery","food":"Bread - white","serving_weight":"80","serving_desc":"2 medium slices"},{"food_id":"426","food_category":"Bakery","food":"Bread crumbs","serving_weight":"110","serving_desc":"One cup of bread crumbs"},{"food_id":"4","food_category":"Bakery","food":"Bread roll","serving_weight":"60","serving_desc":"2 small, 1 medium roll"},{"food_id":"368","food_category":"Bakery","food":"Bread roll - brown","serving_weight":"60","serving_desc":"2 small, 1 medium roll"},{"food_id":"367","food_category":"Bakery","food":"Bread roll - white","serving_weight":"60","serving_desc":"2 small, 1 medium roll"},{"food_id":"147","food_category":"Cupboard","food":"Breakfast cereal","serving_weight":"40","serving_desc":"About 3 handfuls"},{"food_id":"360","food_category":"Cupboard","food":"Breakfast cereal - non-wholegrain","serving_weight":"40","serving_desc":"About 3 handfuls"},{"food_id":"148","food_category":"Cupboard","food":"Breakfast cereal - wholegrain","serving_weight":"40","serving_desc":"About 3 handfulls"},{"food_id":"231","food_category":"Vegetables","food":"Broccoli","serving_weight":"80","serving_desc":"Two large broccoli spears (or 8 small florets)"},{"food_id":"232","food_category":"Vegetables","food":"Brussels Sprouts","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"233","food_category":"Vegetables","food":"Brussels sprouts - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"471","food_category":"Grains, pulses, and legumes","food":"Buckwheat","serving_weight":"0","serving_desc":"wheat"},{"food_id":"149","food_category":"Grains, pulses, and legumes","food":"Bulgar","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"28","food_category":"Dairy and alternatives","food":"Butter","serving_weight":"14","serving_desc":"1 tablespoon"},{"food_id":"29","food_category":"Dairy and alternatives","food":"Buttermilk","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"234","food_category":"Vegetables","food":"Cabbage","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"235","food_category":"Vegetables","food":"Cabbage - pickled","serving_weight":"80","serving_desc":""},{"food_id":"236","food_category":"Vegetables","food":"Cabbage - pre-cut","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"501","food_category":"Cupboard","food":"Cacao nibs","serving_weight":"0","serving_desc":""},{"food_id":"480","food_category":"Cupboard","food":"Cacao powder","serving_weight":"0","serving_desc":""},{"food_id":"5","food_category":"Bakery","food":"Cake","serving_weight":"0","serving_desc":""},{"food_id":"400","food_category":"Bakery","food":"Cake - chocolate","serving_weight":"0","serving_desc":""},{"food_id":"401","food_category":"Bakery","food":"Cake - fruit","serving_weight":"0","serving_desc":""},{"food_id":"403","food_category":"Bakery","food":"Cake - sponge","serving_weight":"0","serving_desc":""},{"food_id":"290","food_category":"Drinks","food":"Cappuccino","serving_weight":"140","serving_desc":"Small Cappuccino"},{"food_id":"237","food_category":"Vegetables","food":"Carrot","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"238","food_category":"Vegetables","food":"Carrots - canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"239","food_category":"Vegetables","food":"Carrots - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"240","food_category":"Vegetables","food":"Carrots - pre-chopped","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"191","food_category":"Nuts and seeds","food":"Cashew nuts","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"19","food_category":"Cupboard","food":"Cassava Starch","serving_weight":"0","serving_desc":""},{"food_id":"241","food_category":"Vegetables","food":"Cauliflower","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"242","food_category":"Vegetables","food":"Cauliflower - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"243","food_category":"Vegetables","food":"Cauliflower - pre-cut","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"465","food_category":"Cupboard","food":"Cayenne pepper","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"244","food_category":"Vegetables","food":"Celery","serving_weight":"80","serving_desc":"Three sticks of celery"},{"food_id":"30","food_category":"Dairy and alternatives","food":"Cheese","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"371","food_category":"Dairy and alternatives","food":"Cheese - brie","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"377","food_category":"Dairy and alternatives","food":"Cheese - camambert","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"370","food_category":"Dairy and alternatives","food":"Cheese - cheddar","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"384","food_category":"Dairy and alternatives","food":"Cheese - cottage","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"383","food_category":"Dairy and alternatives","food":"Cheese - cream","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"375","food_category":"Dairy and alternatives","food":"Cheese - edam","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"379","food_category":"Dairy and alternatives","food":"Cheese - emmental","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"378","food_category":"Dairy and alternatives","food":"Cheese - feta","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"376","food_category":"Dairy and alternatives","food":"Cheese - goats","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"372","food_category":"Dairy and alternatives","food":"Cheese - gouda","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"374","food_category":"Dairy and alternatives","food":"Cheese - mascarpone","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"382","food_category":"Dairy and alternatives","food":"Cheese - mozzarella","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"380","food_category":"Dairy and alternatives","food":"Cheese - paneer","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"381","food_category":"Dairy and alternatives","food":"Cheese - parmesan","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"373","food_category":"Dairy and alternatives","food":"Cheese - prato","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"385","food_category":"Dairy and alternatives","food":"Cheese - quark","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"369","food_category":"Dairy and alternatives","food":"Cheese - stilton","serving_weight":"30","serving_desc":"About the size of two thumbs"},{"food_id":"6","food_category":"Bakery","food":"Cheesecake","serving_weight":"0","serving_desc":""},{"food_id":"467","food_category":"Fruit","food":"Cherries","serving_weight":"32","serving_desc":"One handful, eight cherries"},{"food_id":"468","food_category":"Fruit","food":"Cherries - frozen","serving_weight":"32","serving_desc":"One handful, eight cherries"},{"food_id":"193","food_category":"Nuts and seeds","food":"Chestnuts","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"466","food_category":"Nuts and seeds","food":"Chia seeds","serving_weight":"0","serving_desc":""},{"food_id":"170","food_category":"Meat and alternatives","food":"Chicken","serving_weight":"160","serving_desc":"About half the size of your hand"},{"food_id":"277","food_category":"Processed food","food":"Chicken Korma Curry - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"278","food_category":"Processed food","food":"Chicken Noodles - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"279","food_category":"Processed food","food":"Chicken Roast - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"150","food_category":"Grains, pulses, and legumes","food":"Chickpeas","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"441","food_category":"Cupboard","food":"Chilli flakes","serving_weight":"0","serving_desc":""},{"food_id":"422","food_category":"Vegetables","food":"Chilli pepper","serving_weight":"15","serving_desc":"Small, fresh chilli pepper"},{"food_id":"423","food_category":"Vegetables","food":"Chilli pepper - jalepeno","serving_weight":"15","serving_desc":"Small, fresh chilli pepper"},{"food_id":"448","food_category":"Cupboard","food":"Chilli powder","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"213","food_category":"Processed food","food":"Chips","serving_weight":"80","serving_desc":""},{"food_id":"494","food_category":"Cupboard","food":"Chives","serving_weight":"0","serving_desc":""},{"food_id":"13","food_category":"Biscuits and confectionery","food":"Chocolate","serving_weight":"30","serving_desc":"6 squares of chocolate"},{"food_id":"14","food_category":"Biscuits and confectionery","food":"Chocolate biscuit","serving_weight":"0","serving_desc":""},{"food_id":"415","food_category":"Biscuits and confectionery","food":"Chocolate chips","serving_weight":"0","serving_desc":""},{"food_id":"348","food_category":"Spreads, jams, and honeys","food":"Chocolate spread","serving_weight":"37","serving_desc":"2 tablespoons"},{"food_id":"545","food_category":"Sauces and dressings","food":"Chutney","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"488","food_category":"Drinks","food":"Cider bottle","serving_weight":"500","serving_desc":"1 bottle"},{"food_id":"489","food_category":"Drinks","food":"Cider can","serving_weight":"330","serving_desc":"1 can"},{"food_id":"433","food_category":"Cupboard","food":"Cinnamon","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"59","food_category":"Fish and seafood","food":"Clams","serving_weight":"0","serving_desc":""},{"food_id":"492","food_category":"Cupboard","food":"Cloves","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"60","food_category":"Fish and seafood","food":"Cockles","serving_weight":"0","serving_desc":""},{"food_id":"171","food_category":"Meat and alternatives","food":"Cocktail sausage","serving_weight":"45","serving_desc":"4 cocktail sausages"},{"food_id":"478","food_category":"Cupboard","food":"Cocoa drink powder","serving_weight":"0","serving_desc":""},{"food_id":"198","food_category":"Oils","food":"Coconut oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"473","food_category":"Cupboard","food":"Coconut sugar","serving_weight":"4","serving_desc":"1 teaspoon"},{"food_id":"61","food_category":"Fish and seafood","food":"Cod","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"516","food_category":"Cupboard","food":"Coffee beans","serving_weight":"0","serving_desc":""},{"food_id":"62","food_category":"Fish and seafood","food":"Coley","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"476","food_category":"Cupboard","food":"Coriander","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"151","food_category":"Grains, pulses, and legumes","food":"Corn","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"214","food_category":"Processed food","food":"Corn crisps","serving_weight":"80","serving_desc":"One bag "},{"food_id":"199","food_category":"Oils","food":"Corn oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"434","food_category":"Cupboard","food":"Cornflour","serving_weight":"0","serving_desc":""},{"food_id":"274","food_category":"Processed food","food":"Cottage Pie - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"200","food_category":"Oils","food":"Cottonseed oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"245","food_category":"Vegetables","food":"Courgette","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"152","food_category":"Grains, pulses, and legumes","food":"Couscous","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"63","food_category":"Fish and seafood","food":"Crab","serving_weight":"0","serving_desc":""},{"food_id":"15","food_category":"Biscuits and confectionery","food":"Crackers","serving_weight":"24","serving_desc":"3 crackers"},{"food_id":"110","food_category":"Fruit","food":"Cranberries","serving_weight":"80","serving_desc":""},{"food_id":"111","food_category":"Fruit","food":"Cranberries - frozen","serving_weight":"80","serving_desc":""},{"food_id":"64","food_category":"Fish and seafood","food":"Crayfish","serving_weight":"0","serving_desc":""},{"food_id":"418","food_category":"Dairy and alternatives","food":"Cream - clotted","serving_weight":"50","serving_desc":""},{"food_id":"416","food_category":"Dairy and alternatives","food":"Cream - double","serving_weight":"50","serving_desc":""},{"food_id":"35","food_category":"Dairy and alternatives","food":"Cream - plant-based","serving_weight":"50","serving_desc":""},{"food_id":"31","food_category":"Dairy and alternatives","food":"Cream - single","serving_weight":"50","serving_desc":""},{"food_id":"417","food_category":"Dairy and alternatives","food":"Cream - whipping","serving_weight":"50","serving_desc":""},{"food_id":"409","food_category":"Bakery","food":"Crepe","serving_weight":"0","serving_desc":""},{"food_id":"215","food_category":"Processed food","food":"Crisps","serving_weight":"25","serving_desc":"A small multipack bag of crisps"},{"food_id":"7","food_category":"Bakery","food":"Crumpet","serving_weight":"50","serving_desc":"1 crumpet"},{"food_id":"246","food_category":"Vegetables","food":"Cucumber","serving_weight":"80","serving_desc":"5cm piece of cucumber"},{"food_id":"247","food_category":"Vegetables","food":"Cucumber - pickled","serving_weight":"80","serving_desc":""},{"food_id":"464","food_category":"Cupboard","food":"Cumin","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"490","food_category":"Fruit","food":"Currants","serving_weight":"80","serving_desc":"One heaped serving spoon"},{"food_id":"216","food_category":"Processed food","food":"Custard","serving_weight":"0","serving_desc":""},{"food_id":"463","food_category":"Fruit","food":"Date","serving_weight":"48","serving_desc":"Two dates"},{"food_id":"483","food_category":"Cupboard","food":"Dill","serving_weight":"0","serving_desc":""},{"food_id":"404","food_category":"Bakery","food":"Doughnut","serving_weight":"0","serving_desc":""},{"food_id":"248","food_category":"Vegetables","food":"Dried peas (shelled)","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"172","food_category":"Meat and alternatives","food":"Duck","serving_weight":"160","serving_desc":"About half the size of your hand"},{"food_id":"65","food_category":"Fish and seafood","food":"Eel","serving_weight":"0","serving_desc":""},{"food_id":"57","food_category":"Eggs","food":"Eggs","serving_weight":"120","serving_desc":"2 eggs"},{"food_id":"288","food_category":"Drinks","food":"Espresso","serving_weight":"30","serving_desc":"One shot of espresso"},{"food_id":"153","food_category":"Grains, pulses, and legumes","food":"Faba beans","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"173","food_category":"Meat and alternatives","food":"Falafel","serving_weight":"40","serving_desc":"2 falafels"},{"food_id":"456","food_category":"Nuts and seeds","food":"Fennel seeds","serving_weight":"0","serving_desc":""},{"food_id":"356","food_category":"Sauces and dressings","food":"Fish sauce","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"66","food_category":"Fish and seafood","food":"Fish sticks","serving_weight":"100","serving_desc":""},{"food_id":"280","food_category":"Processed food","food":"Fisherman Pie","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"503","food_category":"Cupboard","food":"Five spice","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"398","food_category":"Biscuits and confectionery","food":"Flapjacks","serving_weight":"0","serving_desc":""},{"food_id":"292","food_category":"Drinks","food":"Flat White","serving_weight":"160","serving_desc":"Small Flat White"},{"food_id":"67","food_category":"Fish and seafood","food":"Flatfish","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"68","food_category":"Fish and seafood","food":"Flounder","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"167","food_category":"Cupboard","food":"Flour - plain","serving_weight":"0","serving_desc":""},{"food_id":"323","food_category":"Cupboard","food":"Flour - rye","serving_weight":"0","serving_desc":""},{"food_id":"322","food_category":"Cupboard","food":"Flour - self-raising","serving_weight":"0","serving_desc":""},{"food_id":"533","food_category":"Cupboard","food":"Food colouring","serving_weight":"5","serving_desc":"1 teaspoon"},{"food_id":"69","food_category":"Fish and seafood","food":"Frozen fish fingers","serving_weight":"90","serving_desc":"3 fish fingers"},{"food_id":"154","food_category":"Grains, pulses, and legumes","food":"Frozen pulses","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"543","food_category":"Drinks","food":"Fruit cordial","serving_weight":"50","serving_desc":"Enough cordial for a medium glass"},{"food_id":"529","food_category":"Drinks","food":"Fruit juice","serving_weight":"150","serving_desc":"One small glass"},{"food_id":"8","food_category":"Bakery","food":"Fruit pie","serving_weight":"0","serving_desc":""},{"food_id":"332","food_category":"Drinks","food":"Fruit tea","serving_weight":"400","serving_desc":"Normal-sized cup of tea."},{"food_id":"535","food_category":"Biscuits and confectionery","food":"Fudge","serving_weight":"0","serving_desc":""},{"food_id":"174","food_category":"Meat and alternatives","food":"Gammon","serving_weight":"150","serving_desc":"Use the size of pack as a guide"},{"food_id":"502","food_category":"Cupboard","food":"Garam Masala","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"321","food_category":"Vegetables","food":"Garlic","serving_weight":"3","serving_desc":"1 clove"},{"food_id":"459","food_category":"Vegetables","food":"Ginger","serving_weight":"7","serving_desc":"One thumb or a tablespoon of chopped ginger"},{"food_id":"16","food_category":"Biscuits and confectionery","food":"Gluten-free biscuit","serving_weight":"0","serving_desc":""},{"food_id":"175","food_category":"Meat and alternatives","food":"Goat","serving_weight":"160","serving_desc":"About half the size of your hand"},{"food_id":"428","food_category":"Spreads, jams, and honeys","food":"Golden syrup","serving_weight":"22","serving_desc":"1 tablespoon"},{"food_id":"176","food_category":"Meat and alternatives","food":"Goose","serving_weight":"160","serving_desc":"About half the size of your hand"},{"food_id":"362","food_category":"Cupboard","food":"Granola","serving_weight":"40","serving_desc":"About 3 handfuls"},{"food_id":"49","food_category":"Drinks","food":"Grapefruit juice","serving_weight":"150","serving_desc":"One small glass"},{"food_id":"112","food_category":"Fruit","food":"Grapefruits","serving_weight":"80","serving_desc":"Half a grapefruit"},{"food_id":"113","food_category":"Fruit","food":"Grapes","serving_weight":"80","serving_desc":""},{"food_id":"350","food_category":"Sauces and dressings","food":"Gravy - beef","serving_weight":"50","serving_desc":""},{"food_id":"387","food_category":"Sauces and dressings","food":"Gravy - chicken","serving_weight":"50","serving_desc":""},{"food_id":"388","food_category":"Sauces and dressings","food":"Gravy - vegetable","serving_weight":"50","serving_desc":""},{"food_id":"249","food_category":"Vegetables","food":"Green beans","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"250","food_category":"Vegetables","food":"Green beans - canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"251","food_category":"Vegetables","food":"Green beans - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"330","food_category":"Drinks","food":"Green tea","serving_weight":"400","serving_desc":"Normal-sized cup of tea."},{"food_id":"201","food_category":"Oils","food":"Groundnut oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"177","food_category":"Meat and alternatives","food":"Guinea fowl","serving_weight":"125","serving_desc":"About half the size of your hand"},{"food_id":"70","food_category":"Fish and seafood","food":"Haddock","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"71","food_category":"Fish and seafood","food":"Hake","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"72","food_category":"Fish and seafood","food":"Halibut","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"178","food_category":"Meat and alternatives","food":"Ham","serving_weight":"30","serving_desc":"2 slices"},{"food_id":"469","food_category":"Grains, pulses, and legumes","food":"Haricot beans","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"220","food_category":"Spreads, jams, and honeys","food":"Hazelnut paste","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"221","food_category":"Spreads, jams, and honeys","food":"Hazelnut spread","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"194","food_category":"Nuts and seeds","food":"Hazelnuts - chocolate covered","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"329","food_category":"Drinks","food":"Herbal tea","serving_weight":"400","serving_desc":"Normal-sized cup of tea."},{"food_id":"20","food_category":"Cupboard","food":"Herbs and spices","serving_weight":"0","serving_desc":""},{"food_id":"73","food_category":"Fish and seafood","food":"Herring","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"74","food_category":"Fish and seafood","food":"Hoki","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"222","food_category":"Spreads, jams, and honeys","food":"Honey","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"46","food_category":"Drinks","food":"Hot chocolate - made with milk","serving_weight":"0","serving_desc":""},{"food_id":"45","food_category":"Drinks","food":"Hot chocolate - made with water","serving_weight":"0","serving_desc":""},{"food_id":"461","food_category":"Sauces and dressings","food":"Hummus","serving_weight":"16","serving_desc":"One tablespoon"},{"food_id":"410","food_category":"Dairy and alternatives","food":"Ice cream","serving_weight":"70","serving_desc":"1 scoop of ice cream"},{"food_id":"487","food_category":"Cupboard","food":"Icing - to roll","serving_weight":"0","serving_desc":""},{"food_id":"286","food_category":"Drinks","food":"Instant Coffee - black","serving_weight":"400","serving_desc":"Normal-sized mug of coffee"},{"food_id":"287","food_category":"Drinks","food":"Instant Coffee - with milk","serving_weight":"400","serving_desc":"Normal-sized mug of coffee"},{"food_id":"223","food_category":"Spreads, jams, and honeys","food":"Jam","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"331","food_category":"Drinks","food":"Jasmin tea","serving_weight":"400","serving_desc":"Normal-sized cup of tea."},{"food_id":"474","food_category":"Vegetables","food":"Kale","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"155","food_category":"Grains, pulses, and legumes","food":"Kidney beans","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"75","food_category":"Fish and seafood","food":"Kipper","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"333","food_category":"Fruit","food":"Kiwi","serving_weight":"76","serving_desc":"Average weight of a medium kiwi fruit"},{"food_id":"76","food_category":"Fish and seafood","food":"Krill","serving_weight":"0","serving_desc":""},{"food_id":"179","food_category":"Meat and alternatives","food":"Lamb","serving_weight":"140","serving_desc":"Use the size of pack as a guide"},{"food_id":"282","food_category":"Processed food","food":"Lamb Masala Curry - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"283","food_category":"Processed food","food":"Lamb Roast - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"505","food_category":"Cupboard","food":"Lard","serving_weight":"16","serving_desc":"1 tablespoon"},{"food_id":"289","food_category":"Drinks","food":"Latte","serving_weight":"240","serving_desc":"Small Latte"},{"food_id":"342","food_category":"Vegetables","food":"Leek","serving_weight":"80","serving_desc":""},{"food_id":"50","food_category":"Drinks","food":"Lemon Juice","serving_weight":"0","serving_desc":""},{"food_id":"77","food_category":"Fish and seafood","food":"Lemon sole","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"114","food_category":"Fruit","food":"Lemons","serving_weight":"80","serving_desc":"One lemon"},{"food_id":"156","food_category":"Grains, pulses, and legumes","food":"Lentils","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"252","food_category":"Vegetables","food":"Lettuce","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"253","food_category":"Vegetables","food":"Lettuce - pre-cut","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"440","food_category":"Fruit","food":"Lime","serving_weight":"45","serving_desc":"One lime"},{"food_id":"477","food_category":"Drinks","food":"Lime Juice","serving_weight":"0","serving_desc":""},{"food_id":"412","food_category":"Biscuits and confectionery","food":"Liquorice","serving_weight":"0","serving_desc":""},{"food_id":"78","food_category":"Fish and seafood","food":"Lobster","serving_weight":"0","serving_desc":""},{"food_id":"528","food_category":"Grains, pulses, and legumes","food":"Macaroni","serving_weight":"75","serving_desc":"2 handfuls. Use your finger and thumb to measure a bunch the size of a \u00a31 coin"},{"food_id":"79","food_category":"Fish and seafood","food":"Mackerel","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"157","food_category":"Grains, pulses, and legumes","food":"Maize","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"115","food_category":"Fruit","food":"Mandarins","serving_weight":"80","serving_desc":"Two mandarins"},{"food_id":"116","food_category":"Fruit","food":"Mandarins - canned","serving_weight":"80","serving_desc":"Half a can "},{"food_id":"117","food_category":"Fruit","food":"Mangoes","serving_weight":"80","serving_desc":"Two slices of mango (5cm slices)."},{"food_id":"118","food_category":"Fruit","food":"Mangoes - canned","serving_weight":"80","serving_desc":"Half a can "},{"food_id":"119","food_category":"Fruit","food":"Mangoes - dried","serving_weight":"30","serving_desc":"One heaped serving spoon"},{"food_id":"120","food_category":"Fruit","food":"Mangoes - frozen","serving_weight":"80","serving_desc":"Two slices of mango (5cm slices)."},{"food_id":"429","food_category":"Spreads, jams, and honeys","food":"Maple syrup","serving_weight":"22","serving_desc":"1 tablespoon"},{"food_id":"349","food_category":"Spreads, jams, and honeys","food":"Marmalade","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"347","food_category":"Sauces and dressings","food":"Marmite","serving_weight":"8","serving_desc":"one tablespoon"},{"food_id":"532","food_category":"Biscuits and confectionery","food":"Marshmallows","serving_weight":"0","serving_desc":""},{"food_id":"536","food_category":"Cupboard","food":"Marzipan","serving_weight":"0","serving_desc":""},{"food_id":"344","food_category":"Sauces and dressings","food":"Mayonnaise","serving_weight":"14","serving_desc":"one tablespoon"},{"food_id":"180","food_category":"Meat and alternatives","food":"Meatless","serving_weight":"0","serving_desc":""},{"food_id":"121","food_category":"Fruit","food":"Melons","serving_weight":"80","serving_desc":"One 5cm slice of melon"},{"food_id":"33","food_category":"Dairy and alternatives","food":"Milk","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"26","food_category":"Dairy and alternatives","food":"Milk - almond","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"27","food_category":"Dairy and alternatives","food":"Milk - buffalo","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"34","food_category":"Dairy and alternatives","food":"Milk - concentrated","serving_weight":"0","serving_desc":""},{"food_id":"32","food_category":"Dairy and alternatives","food":"Milk - goats","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"453","food_category":"Dairy and alternatives","food":"Milk - plant-based","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"37","food_category":"Dairy and alternatives","food":"Milk - sheeps","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"39","food_category":"Dairy and alternatives","food":"Milk - soy","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"435","food_category":"Cupboard","food":"Mint","serving_weight":"0","serving_desc":""},{"food_id":"413","food_category":"Biscuits and confectionery","food":"Mints","serving_weight":"0","serving_desc":""},{"food_id":"452","food_category":"Sauces and dressings","food":"Mirin","serving_weight":"16","serving_desc":"1 tablespoon"},{"food_id":"451","food_category":"Cupboard","food":"Miso paste","serving_weight":"15","serving_desc":"1 tablespoon"},{"food_id":"521","food_category":"Nuts and seeds","food":"Mixed nuts","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"523","food_category":"Nuts and seeds","food":"Mixed seeds","serving_weight":"0","serving_desc":""},{"food_id":"293","food_category":"Drinks","food":"Mocha","serving_weight":"120","serving_desc":"Small Mocha"},{"food_id":"361","food_category":"Cupboard","food":"Muesli","serving_weight":"40","serving_desc":"About 3 handfuls"},{"food_id":"158","food_category":"Grains, pulses, and legumes","food":"Muesli","serving_weight":"50","serving_desc":"About 3 handfuls"},{"food_id":"405","food_category":"Bakery","food":"Muffin","serving_weight":"0","serving_desc":""},{"food_id":"343","food_category":"Vegetables","food":"Mushrooms","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"80","food_category":"Fish and seafood","food":"Mussels","serving_weight":"0","serving_desc":""},{"food_id":"460","food_category":"Sauces and dressings","food":"Mustard","serving_weight":"16","serving_desc":"1 tablespoon"},{"food_id":"202","food_category":"Oils","food":"Mustard oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"9","food_category":"Bakery","food":"Naan bread - plain","serving_weight":"70","serving_desc":"1\/2 naan"},{"food_id":"122","food_category":"Fruit","food":"Nectarines","serving_weight":"80","serving_desc":"One nectarine"},{"food_id":"123","food_category":"Fruit","food":"Nectarines - canned","serving_weight":"80","serving_desc":"Half a can "},{"food_id":"124","food_category":"Fruit","food":"Nectarines - frozen","serving_weight":"80","serving_desc":"One nectarine"},{"food_id":"393","food_category":"Grains, pulses, and legumes","food":"Noodles","serving_weight":"75","serving_desc":"Weight of uncooked noodles, usually ready-portioned"},{"food_id":"443","food_category":"Cupboard","food":"Nutmeg","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"363","food_category":"Cupboard","food":"Oat flakes","serving_weight":"40","serving_desc":"About 3 handfuls"},{"food_id":"296","food_category":"Dairy and alternatives","food":"Oatly Oat Drink","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"297","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Barista Edition","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"298","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Calcium","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"299","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Calcium Organic","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"300","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Chocolate","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"301","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Chocolate Deluxe","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"302","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Deluxe","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"311","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Deluxe (Germany, Austria, Netherlands)","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"303","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Low-Fat","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"304","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Low-Fat Organic","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"305","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Orange Mango","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"306","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Organic","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"307","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Semi-skimmed","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"308","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Semi-skimmed Organic","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"309","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Vanilla","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"310","food_category":"Dairy and alternatives","food":"Oatly Oat Drink - Vanilla Organic","serving_weight":"200","serving_desc":"One small glass. Milk on cereal would be 125ml"},{"food_id":"159","food_category":"Grains, pulses, and legumes","food":"Oats","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"81","food_category":"Fish and seafood","food":"Octopus","serving_weight":"0","serving_desc":""},{"food_id":"203","food_category":"Oils","food":"Olive oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"334","food_category":"Fruit","food":"Olives","serving_weight":"20","serving_desc":"Average weight of 5 olives"},{"food_id":"254","food_category":"Vegetables","food":"Onion","serving_weight":"80","serving_desc":"1 onion"},{"food_id":"255","food_category":"Vegetables","food":"Onion - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"256","food_category":"Vegetables","food":"Onion - pickled","serving_weight":"80","serving_desc":""},{"food_id":"51","food_category":"Drinks","food":"Orange juice","serving_weight":"150","serving_desc":"One small glass"},{"food_id":"125","food_category":"Fruit","food":"Oranges","serving_weight":"80","serving_desc":"One orange"},{"food_id":"438","food_category":"Cupboard","food":"Oregano","serving_weight":"0","serving_desc":""},{"food_id":"160","food_category":"Grains, pulses, and legumes","food":"Other pulses","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"54","food_category":"Drinks","food":"Other spirits","serving_weight":"50","serving_desc":"two 25ml shots"},{"food_id":"355","food_category":"Sauces and dressings","food":"Oyster sauce","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"82","food_category":"Fish and seafood","food":"Oysters","serving_weight":"0","serving_desc":""},{"food_id":"204","food_category":"Oils","food":"Palm oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"407","food_category":"Bakery","food":"Pancake","serving_weight":"0","serving_desc":""},{"food_id":"525","food_category":"Meat and alternatives","food":"Pancetta","serving_weight":"30","serving_desc":"2 slices"},{"food_id":"83","food_category":"Fish and seafood","food":"Pangasius","serving_weight":"0","serving_desc":""},{"food_id":"542","food_category":"Fruit","food":"Papaya","serving_weight":"80","serving_desc":""},{"food_id":"437","food_category":"Cupboard","food":"Paprika","serving_weight":"0","serving_desc":""},{"food_id":"436","food_category":"Cupboard","food":"Parsley","serving_weight":"0","serving_desc":""},{"food_id":"335","food_category":"Vegetables","food":"Parsnips","serving_weight":"80","serving_desc":"Recommended portion size"},{"food_id":"497","food_category":"Fruit","food":"Passata","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"161","food_category":"Grains, pulses, and legumes","food":"Pasta","serving_weight":"75","serving_desc":"2 handfuls. For spaghetti: use your finger and thumb to measure a bunch the size of a \u00a31 coin"},{"food_id":"392","food_category":"Grains, pulses, and legumes","food":"Pasta - wholemeal","serving_weight":"75","serving_desc":"2 handfuls. For spaghetti: use your finger and thumb to measure a bunch the size of a \u00a31 coin"},{"food_id":"406","food_category":"Bakery","food":"Pastry","serving_weight":"0","serving_desc":""},{"food_id":"126","food_category":"Fruit","food":"Peaches","serving_weight":"80","serving_desc":"One peach"},{"food_id":"127","food_category":"Fruit","food":"Peaches - canned","serving_weight":"80","serving_desc":"Half a can "},{"food_id":"128","food_category":"Fruit","food":"Peaches - frozen","serving_weight":"80","serving_desc":"One peach"},{"food_id":"195","food_category":"Nuts and seeds","food":"Peanut","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"224","food_category":"Spreads, jams, and honeys","food":"Peanut butter","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"205","food_category":"Oils","food":"Peanut oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"129","food_category":"Fruit","food":"Pears","serving_weight":"80","serving_desc":"One pear"},{"food_id":"130","food_category":"Fruit","food":"Pears - canned","serving_weight":"80","serving_desc":"Half a can "},{"food_id":"131","food_category":"Fruit","food":"Pears - dried","serving_weight":"30","serving_desc":"One heaped serving spoon"},{"food_id":"257","food_category":"Vegetables","food":"Peas","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"258","food_category":"Vegetables","food":"Peas - dried and canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"259","food_category":"Vegetables","food":"Peas - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"260","food_category":"Vegetables","food":"Peas - shelled and canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"338","food_category":"Vegetables","food":"Peas - split, canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"470","food_category":"Nuts and seeds","food":"Pecan","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"508","food_category":"Cupboard","food":"Peppermint extract","serving_weight":"5","serving_desc":"1 teaspoon"},{"food_id":"421","food_category":"Vegetables","food":"Peppers","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"420","food_category":"Vegetables","food":"Peppers - green","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"262","food_category":"Vegetables","food":"Peppers - pickled","serving_weight":"80","serving_desc":""},{"food_id":"261","food_category":"Vegetables","food":"Peppers - red","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"537","food_category":"Sauces and dressings","food":"Pesto","serving_weight":"14","serving_desc":"1 tablespoon"},{"food_id":"52","food_category":"Drinks","food":"Pineapple juice","serving_weight":"150","serving_desc":"One small glass"},{"food_id":"132","food_category":"Fruit","food":"Pineapples","serving_weight":"80","serving_desc":"One large slice of pineapple"},{"food_id":"133","food_category":"Fruit","food":"Pineapples - canned","serving_weight":"80","serving_desc":"One large slice of pineapple"},{"food_id":"134","food_category":"Fruit","food":"Pineapples - dried","serving_weight":"30","serving_desc":"One heaped serving spoon"},{"food_id":"196","food_category":"Nuts and seeds","food":"Pistachio","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"10","food_category":"Bakery","food":"Pitta bread","serving_weight":"60","serving_desc":"2 small, one medium pitta"},{"food_id":"217","food_category":"Processed food","food":"Pizza","serving_weight":"250","serving_desc":"1 small\/medium sized pizza"},{"food_id":"84","food_category":"Fish and seafood","food":"Plaice","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"36","food_category":"Dairy and alternatives","food":"Plant-based spread","serving_weight":"14","serving_desc":"1 tablespoon"},{"food_id":"135","food_category":"Fruit","food":"Plums","serving_weight":"80","serving_desc":"Two plums"},{"food_id":"136","food_category":"Fruit","food":"Plums - canned","serving_weight":"80","serving_desc":"Half a can "},{"food_id":"137","food_category":"Fruit","food":"Plums - dried","serving_weight":"30","serving_desc":"One heaped serving spoon"},{"food_id":"500","food_category":"Grains, pulses, and legumes","food":"Polenta","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"85","food_category":"Fish and seafood","food":"Pollock","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"181","food_category":"Meat and alternatives","food":"Pork","serving_weight":"150","serving_desc":"Use the size of pack as a guide"},{"food_id":"281","food_category":"Processed food","food":"Pork and Prawns Fried Rice - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"285","food_category":"Processed food","food":"Pork Roast - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"162","food_category":"Cupboard","food":"Porridge","serving_weight":"45","serving_desc":"About 1 and a half handfuls"},{"food_id":"211","food_category":"Potatoes","food":"Potatoes","serving_weight":"220","serving_desc":"1 baked potato, about the size of your fist"},{"food_id":"86","food_category":"Fish and seafood","food":"Prawn","serving_weight":"80","serving_desc":"About 4 tablespoons"},{"food_id":"475","food_category":"Nuts and seeds","food":"Pumpkin seeds","serving_weight":"0","serving_desc":""},{"food_id":"163","food_category":"Grains, pulses, and legumes","food":"Quinoa","serving_weight":"120","serving_desc":"About 6 tablespoons"},{"food_id":"182","food_category":"Meat and alternatives","food":"Quorn","serving_weight":"100","serving_desc":"Use the size of pack as a guide"},{"food_id":"316","food_category":"Meat and alternatives","food":"Quorn Classic Burger","serving_weight":"90","serving_desc":""},{"food_id":"317","food_category":"Meat and alternatives","food":"Quorn Crispy Nuggets","serving_weight":"80","serving_desc":""},{"food_id":"314","food_category":"Meat and alternatives","food":"Quorn Mince","serving_weight":"75","serving_desc":""},{"food_id":"315","food_category":"Meat and alternatives","food":"Quorn Mince (Australia, New Zealand, and Asia)","serving_weight":"75","serving_desc":""},{"food_id":"312","food_category":"Meat and alternatives","food":"Quorn Pieces","serving_weight":"75","serving_desc":""},{"food_id":"313","food_category":"Meat and alternatives","food":"Quorn Pieces (Australia, New Zealand, and Asia)","serving_weight":"75","serving_desc":""},{"food_id":"318","food_category":"Meat and alternatives","food":"Quorn Sausages","serving_weight":"84","serving_desc":""},{"food_id":"319","food_category":"Meat and alternatives","food":"Quorn Vegan Nuggets","serving_weight":"70","serving_desc":""},{"food_id":"320","food_category":"Meat and alternatives","food":"Quorn Vegan Pieces","serving_weight":"70","serving_desc":""},{"food_id":"183","food_category":"Meat and alternatives","food":"Rabbit","serving_weight":"160","serving_desc":"About half the size of your hand"},{"food_id":"138","food_category":"Fruit","food":"Raisins","serving_weight":"80","serving_desc":"One heaped serving spoon"},{"food_id":"206","food_category":"Oils","food":"Rapeseed oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"139","food_category":"Fruit","food":"Raspberries","serving_weight":"80","serving_desc":""},{"food_id":"140","food_category":"Fruit","food":"Raspberries - frozen","serving_weight":"80","serving_desc":""},{"food_id":"339","food_category":"Vegetables","food":"Red onion","serving_weight":"80","serving_desc":"1 onion"},{"food_id":"544","food_category":"Sauces and dressings","food":"Relish","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"522","food_category":"Vegetables","food":"Rhubarb","serving_weight":"80","serving_desc":""},{"food_id":"395","food_category":"Grains, pulses, and legumes","food":"Rice - basmati","serving_weight":"65","serving_desc":"About 2 handfuls"},{"food_id":"394","food_category":"Grains, pulses, and legumes","food":"Rice - brown","serving_weight":"65","serving_desc":"About 2 handfuls"},{"food_id":"396","food_category":"Grains, pulses, and legumes","food":"Rice - jasmine","serving_weight":"65","serving_desc":"About 2 handfuls"},{"food_id":"397","food_category":"Grains, pulses, and legumes","food":"Rice - long grain","serving_weight":"65","serving_desc":"About 2 handfuls"},{"food_id":"419","food_category":"Grains, pulses, and legumes","food":"Rice - sushi","serving_weight":"0","serving_desc":""},{"food_id":"164","food_category":"Grains, pulses, and legumes","food":"Rice - white","serving_weight":"65","serving_desc":"About 2 handfuls"},{"food_id":"524","food_category":"Vegetables","food":"Rocket","serving_weight":"0","serving_desc":""},{"food_id":"445","food_category":"Cupboard","food":"Rosemary","serving_weight":"0","serving_desc":""},{"food_id":"328","food_category":"Drinks","food":"Rum","serving_weight":"50","serving_desc":"two 25ml shots"},{"food_id":"507","food_category":"Cupboard","food":"Sage","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"345","food_category":"Sauces and dressings","food":"Salad cream","serving_weight":"14","serving_desc":"one tablespoon"},{"food_id":"346","food_category":"Sauces and dressings","food":"Salad dressing","serving_weight":"14","serving_desc":"one tablespoon"},{"food_id":"514","food_category":"Vegetables","food":"Salad leaves","serving_weight":"0","serving_desc":""},{"food_id":"87","food_category":"Fish and seafood","food":"Salmon","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"431","food_category":"Cupboard","food":"Salt","serving_weight":"0","serving_desc":""},{"food_id":"88","food_category":"Fish and seafood","food":"Sardines","serving_weight":"80","serving_desc":"About 4 tablespoons"},{"food_id":"89","food_category":"Fish and seafood","food":"Scallops","serving_weight":"120","serving_desc":"4 scallops"},{"food_id":"272","food_category":"Fish and seafood","food":"Scampi","serving_weight":"80","serving_desc":"About 4 tablespoons"},{"food_id":"402","food_category":"Bakery","food":"Scones","serving_weight":"0","serving_desc":""},{"food_id":"90","food_category":"Fish and seafood","food":"Sea bass","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"91","food_category":"Fish and seafood","food":"Seabass","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"538","food_category":"Fish and seafood","food":"Seaweed","serving_weight":"0","serving_desc":""},{"food_id":"21","food_category":"Nuts and seeds","food":"Sesame seeds","serving_weight":"0","serving_desc":""},{"food_id":"207","food_category":"Oils","food":"Sesameseed oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"341","food_category":"Vegetables","food":"Shallot","serving_weight":"40","serving_desc":"2 shallots"},{"food_id":"284","food_category":"Processed food","food":"Shepherds Pie - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"295","food_category":"Drinks","food":"Sherry","serving_weight":"150","serving_desc":"One medium glass"},{"food_id":"92","food_category":"Fish and seafood","food":"Shrimp","serving_weight":"80","serving_desc":"About 4 tablespoons"},{"food_id":"93","food_category":"Fish and seafood","food":"Skate","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"427","food_category":"Fish and seafood","food":"Smoked salmon","serving_weight":"120","serving_desc":"The size of half to a whole hand"},{"food_id":"414","food_category":"Biscuits and confectionery","food":"Snack bar - chocolate","serving_weight":"0","serving_desc":""},{"food_id":"399","food_category":"Biscuits and confectionery","food":"Snack bar - muesli","serving_weight":"0","serving_desc":""},{"food_id":"53","food_category":"Drinks","food":"Soft drink","serving_weight":"330","serving_desc":"1 can"},{"food_id":"38","food_category":"Dairy and alternatives","food":"Sour cream","serving_weight":"60","serving_desc":"About 2 tablespoons"},{"food_id":"351","food_category":"Sauces and dressings","food":"Soy sauce","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"208","food_category":"Oils","food":"Soybean oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"165","food_category":"Grains, pulses, and legumes","food":"Soybeans","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"276","food_category":"Processed food","food":"Spagheti Bolognese - ready meal","serving_weight":"360","serving_desc":"Standard ready meal"},{"food_id":"511","food_category":"Grains, pulses, and legumes","food":"Spaghetti","serving_weight":"75","serving_desc":"2 handfuls. Use your finger and thumb to measure a bunch the size of a \u00a31 coin"},{"food_id":"263","food_category":"Vegetables","food":"Spinach","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"264","food_category":"Vegetables","food":"Spinach - canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"265","food_category":"Vegetables","food":"Spinach - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"340","food_category":"Vegetables","food":"Spring onion","serving_weight":"40","serving_desc":"2 spring onions"},{"food_id":"266","food_category":"Vegetables","food":"Squash","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"94","food_category":"Fish and seafood","food":"Squid","serving_weight":"0","serving_desc":""},{"food_id":"484","food_category":"Meat and alternatives","food":"Steak","serving_weight":"225","serving_desc":"One 8oz steak"},{"food_id":"352","food_category":"Sauces and dressings","food":"Stir fry sauce","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"389","food_category":"Sauces and dressings","food":"Stock - beef","serving_weight":"240","serving_desc":"1 cup of stock"},{"food_id":"390","food_category":"Sauces and dressings","food":"Stock - chicken","serving_weight":"240","serving_desc":"1 cup of stock"},{"food_id":"442","food_category":"Sauces and dressings","food":"Stock - lamb","serving_weight":"240","serving_desc":"1 cup of stock"},{"food_id":"391","food_category":"Sauces and dressings","food":"Stock - vegetable","serving_weight":"240","serving_desc":"1 cup of stock"},{"food_id":"141","food_category":"Fruit","food":"Strawberries","serving_weight":"80","serving_desc":"7 strawberries"},{"food_id":"142","food_category":"Fruit","food":"Strawerries - frozen","serving_weight":"80","serving_desc":"7 strawberries"},{"food_id":"386","food_category":"Cupboard","food":"Sugar - brown","serving_weight":"4","serving_desc":"1 teaspoon"},{"food_id":"486","food_category":"Cupboard","food":"Sugar - icing","serving_weight":"8","serving_desc":"1 tablespoon"},{"food_id":"22","food_category":"Cupboard","food":"Sugar - white","serving_weight":"4","serving_desc":"1 teaspoon"},{"food_id":"209","food_category":"Oils","food":"Sunflower oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"498","food_category":"Nuts and seeds","food":"Sunflower seeds","serving_weight":"3","serving_desc":"1 teaspoon"},{"food_id":"337","food_category":"Vegetables","food":"Swedes","serving_weight":"80","serving_desc":"Recommended portion size"},{"food_id":"449","food_category":"Potatoes","food":"Sweet potato","serving_weight":"115","serving_desc":"1 medium sweet potato"},{"food_id":"267","food_category":"Vegetables","food":"Sweetcorn - canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"268","food_category":"Vegetables","food":"Sweetcorn - frozen","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"269","food_category":"Vegetables","food":"Sweetcorn - on the cob","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"270","food_category":"Vegetables","food":"Sweetcorn - packaged","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"17","food_category":"Biscuits and confectionery","food":"Sweets","serving_weight":"0","serving_desc":""},{"food_id":"18","food_category":"Biscuits and confectionery","food":"Sweets - milk-based","serving_weight":"0","serving_desc":""},{"food_id":"527","food_category":"Grains, pulses, and legumes","food":"Tagliatelle","serving_weight":"75","serving_desc":"2 handfuls. Use your finger and thumb to measure a bunch the size of a \u00a31 coin"},{"food_id":"462","food_category":"Sauces and dressings","food":"Tahini","serving_weight":"16","serving_desc":"One tablespoon"},{"food_id":"472","food_category":"Sauces and dressings","food":"Tamari","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"447","food_category":"Cupboard","food":"Tapioca starch","serving_weight":"0","serving_desc":""},{"food_id":"294","food_category":"Drinks","food":"Tea - with milk","serving_weight":"400","serving_desc":"Normal-sized cup of tea with a dash of milk"},{"food_id":"184","food_category":"Meat and alternatives","food":"Tempeh","serving_weight":"80","serving_desc":"Use the size of pack as a guide"},{"food_id":"324","food_category":"Drinks","food":"Tequila","serving_weight":"50","serving_desc":"two 25ml shots"},{"food_id":"455","food_category":"Cupboard","food":"Thyme","serving_weight":"0","serving_desc":""},{"food_id":"411","food_category":"Biscuits and confectionery","food":"Toffee","serving_weight":"0","serving_desc":""},{"food_id":"185","food_category":"Meat and alternatives","food":"Tofu","serving_weight":"80","serving_desc":"Use the size of pack as a guide"},{"food_id":"218","food_category":"Sauces and dressings","food":"Tomato ketchup","serving_weight":"40","serving_desc":"2 tablespoons"},{"food_id":"219","food_category":"Processed food","food":"Tomato paste","serving_weight":"80","serving_desc":""},{"food_id":"358","food_category":"Sauces and dressings","food":"Tomato sauce","serving_weight":"175","serving_desc":"Half a jar"},{"food_id":"143","food_category":"Fruit","food":"Tomatoes","serving_weight":"80","serving_desc":"One medium tomato or seven cherry tomatoes"},{"food_id":"144","food_category":"Fruit","food":"Tomatoes - bottled","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"145","food_category":"Fruit","food":"Tomatoes - canned","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"11","food_category":"Bakery","food":"Tortilla wrap","serving_weight":"65","serving_desc":"1 wrap"},{"food_id":"510","food_category":"Spreads, jams, and honeys","food":"Treacle","serving_weight":"22","serving_desc":"1 tablespoon"},{"food_id":"95","food_category":"Fish and seafood","food":"Trout","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"96","food_category":"Fish and seafood","food":"Tuna - fresh","serving_weight":"100","serving_desc":""},{"food_id":"186","food_category":"Meat and alternatives","food":"Turkey","serving_weight":"160","serving_desc":"About half the size of your hand"},{"food_id":"493","food_category":"Cupboard","food":"Turmeric","serving_weight":"3","serving_desc":"One teaspoon"},{"food_id":"336","food_category":"Vegetables","food":"Turnips","serving_weight":"80","serving_desc":"Recommended portion size"},{"food_id":"271","food_category":"Vegetables","food":"Unshelled peas","serving_weight":"80","serving_desc":"Three heaped serving spoons"},{"food_id":"187","food_category":"Meat and alternatives","food":"Valess","serving_weight":"80","serving_desc":"Use the size of pack as a guide"},{"food_id":"491","food_category":"Cupboard","food":"Vanilla","serving_weight":"5","serving_desc":"1 teaspoon"},{"food_id":"481","food_category":"Cupboard","food":"Vanilla extract","serving_weight":"5","serving_desc":"1 teaspoon"},{"food_id":"188","food_category":"Meat and alternatives","food":"Veal","serving_weight":"125","serving_desc":"Use the size of pack as a guide"},{"food_id":"210","food_category":"Oils","food":"Vegetable oil","serving_weight":"14","serving_desc":"1 tablespoon oil"},{"food_id":"189","food_category":"Meat and alternatives","food":"Vegetable Patty","serving_weight":"75","serving_desc":"1 burger"},{"food_id":"541","food_category":"Meat and alternatives","food":"Venison","serving_weight":"160","serving_desc":"About half the size of your hand"},{"food_id":"357","food_category":"Sauces and dressings","food":"Vinegar","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"325","food_category":"Drinks","food":"Vodka","serving_weight":"50","serving_desc":"Two 25ml shots"},{"food_id":"408","food_category":"Bakery","food":"Waffle","serving_weight":"0","serving_desc":""},{"food_id":"197","food_category":"Nuts and seeds","food":"Walnut","serving_weight":"20","serving_desc":"About the amount that fits in your palm"},{"food_id":"454","food_category":"Drinks","food":"Water","serving_weight":"0","serving_desc":""},{"food_id":"526","food_category":"Vegetables","food":"Watercress","serving_weight":"0","serving_desc":""},{"food_id":"166","food_category":"Grains, pulses, and legumes","food":"Wheat","serving_weight":"0","serving_desc":""},{"food_id":"326","food_category":"Drinks","food":"Whiskey","serving_weight":"50","serving_desc":"two 25ml shots"},{"food_id":"97","food_category":"Fish and seafood","food":"Whitebait","serving_weight":"80","serving_desc":"About 4 tablespoons"},{"food_id":"98","food_category":"Fish and seafood","food":"Whiting","serving_weight":"170","serving_desc":"The size of half to a whole hand"},{"food_id":"56","food_category":"Drinks","food":"Wine","serving_weight":"150","serving_desc":"One medium glass"},{"food_id":"353","food_category":"Sauces and dressings","food":"Worcestershire sauce","serving_weight":"18","serving_desc":"1 tablespoon"},{"food_id":"450","food_category":"Cupboard","food":"Xanthan gum","serving_weight":"4","serving_desc":"1 teaspoon"},{"food_id":"458","food_category":"Cupboard","food":"Yeast","serving_weight":"12","serving_desc":"1 tablespoon"},{"food_id":"457","food_category":"Cupboard","food":"Yeast extract","serving_weight":"4","serving_desc":"1 teaspoon"},{"food_id":"40","food_category":"Dairy and alternatives","food":"Yogurt","serving_weight":"120","serving_desc":"About 4 tablespoons (or see pot for exact weight)"}]
food
food_list = []
for f in food:
food_list.append(f['food'])
food_list
len(food_list)
def create_call(food):
food = food.replace(" ", "%20")
call = f"https://myemissions.green/wp-admin/admin-ajax.php?action=getEmissionsFromDB&foodsSelected%5B%5D={food}"
r = requests.get(call)
return r.json()[0]
create_call("chocolate")
food_values = []
for food in food_list:
food_values.append(create_call(food))
food_values
import json
with open("food_values.json", "w+") as fp:
json.dump(food_values, fp)
"#calculator > div > div > script"
###Output
_____no_output_____ |
Final_Experiments/CustomNNClass.ipynb | ###Markdown
Install libraries & version checkCode pipeline from the PNAS 2020 paper by Jiawei Zhuang et al.
###Code
# %%capture
# !pip install -U numpy==1.18.5
# !pip install h5py==2.10.0
'Comment above cell and restart runtime'
'Upload 3 arrays for OOA analysis'
'Check numpys version BEFORE and AFTER runtime restart'
import numpy as np
import matplotlib.pyplot as plt
print(np.__version__)
###Output
1.18.5
###Markdown
Setup
###Code
%%capture
# !git clone https://github.com/aditya5252/Multiprocessor_Advection_.git
!pip install git+https://github.com/JiaweiZhuang/data-driven-pdes@fix-beam
%tensorflow_version 1.x
import os
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import choice
import pandas as pd
import tensorflow as tf
tf.enable_eager_execution()
%matplotlib inline
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 14
from google.colab import files # colab-specific utilities; comment out when running locally
tf.enable_eager_execution()
tf.__version__, tf.keras.__version__
import xarray
from datadrivenpdes.core import grids
from datadrivenpdes.core import integrate
from datadrivenpdes.core import models
from datadrivenpdes.core import tensor_ops
from datadrivenpdes.advection import equations as advection_equations
from datadrivenpdes.pipelines import model_utils
from datadrivenpdes.core import geometry
# tf.keras.backend.set_floatx('float32')
import copy
def CD2_data(linear,init,c,ntime,N_x,delt,delx):
data_ls=[]
u=init
if linear == True:
for step in range(ntime):
data_ls.append(u) # At t = 0 ,step = 0 At t = ntime-1 , step = ntime-1
unew=u.copy()
for i in range(1,N_x-1):
unew[i]=u[i] + delt*( -c*(u[i+1]-u[i-1])/(2*delx) ) ## B.D. model
unew[0]=u[0] + delt*( -c*(u[1]-u[N_x-2])/(2*delx) )
unew[N_x-1]=unew[0]
u=unew
elif linear == False:
pass
data_sol=np.stack(data_ls)
return data_sol
def upwind_data(linear,init,c,ntime,N_x,delt,delx):
data_ls=[]
u=init
if ((linear == True) and (c>0)): ## Apply B.D. with preiodic boundary conditions
for step in range(ntime):
data_ls.append(u) # At t = 0 ,step = 0 At t = ntime-1 , step = ntime-1
unew=u.copy()
for i in range(1,N_x-1):
unew[i]=u[i] + delt*( -c*(u[i]-u[i-1])/delx ) ## B.D. model
unew[0]=u[0] + delt*( -c*(u[0]-u[N_x-2])/delx )
unew[N_x-1]=unew[0]
u=unew
elif ((linear == True) and (c<=0)): ## Apply F.D. with preiodic boundary conditions
for step in range(ntime):
data_ls.append(u) # At t = 0 ,step = 0 At t = ntime-1 , step = ntime-1
unew=u.copy()
for i in range(1,N_x-1):
unew[i]=u[i] + delt*( -c*(u[i+1]-u[i])/delx ) ## F.D. model
unew[0]=u[0] + delt*( -c*(u[1]-u[0])/delx )
unew[N_x-1]=unew[0]
u=unew
else:
print(c)
for step in range(ntime):
data_ls.append(u) # At t = 0 ,step = 0 At t = ntime-1 , step = ntime-1
unew=u.copy()
for i in range(1,N_x-1):
if u[i]>0:
unew[i]=u[i] + delt*( -u[i]*(u[i]-u[i-1])/delx)
else:
unew[i]=u[i] + delt*( -u[i]*(u[i+1]-u[i])/delx)
if u[0]>0:
unew[0]=u[0] + delt*( -u[0]*(u[0]-u[N_x-2])/delx)
else:
unew[0]=u[0] + delt*( -u[0]*(u[1]-u[0])/delx )
unew[N_x-1]=unew[0]
u=unew
data_sol=np.stack(data_ls)
return data_sol
def ic(A,K,PHI,x_mesh):
u=np.zeros_like(x_mesh)
for A1,k1 in zip(A,K):
for phi1 in PHI:
u+= A1*np.sin(k1*x_mesh + phi1)
return u
def solution_data(A,K,PHI,x_mesh,ntime,delt):
# data_ls=[ u_ana[i]+= amp[k1]*exp(-kappa[k1]*kappa[k1]*nu*tEnd)*sin(kappa[k1]*(x[i]-cx*tEnd)+phi[k2]) for i in range(ntime)]
data_ls=[]
for step in range(ntime):
u=np.zeros_like(x_mesh)
for A1,k1 in zip(A,K):
for phi1 in PHI:
u+= A1*np.sin(k1*(x_mesh-step*delt) + phi1)
data_ls.append(u)
data_sol=np.stack(data_ls)
return data_sol
'Find dt for Advection-1d equation'
def _dx_dt(data,adv_coff):
dx=2*np.pi/(data.shape[1])
return dx,dx*0.08/adv_coff
'Plot time propagation of dataset'
def plot_time_prop(data,t0,t1,t2,tr='UnTrained'):
plt.plot(data[t0],label=f'Max_{t0}={data[t0].max()}')
plt.plot(data[t1],label=f'Max_{t1}={data[t1].max()}')
plt.plot(data[t2],label=f'Max_{t2}={data[t2].max()}')
plt.ylabel('Concentration')
plt.xlabel('N_x')
plt.title(tr+'Model Predictions')
plt.legend()
plt.show()
'Create initial_state dictionary from dataset'
def create_init_state_from_2d_data(data,adv_coff):
c_init=data[0][np.newaxis,:,np.newaxis]
initial_state_obj = {
'concentration': c_init.astype(np.float32), # tensorflow code expects float32
'x_velocity': adv_coff*np.ones(c_init.shape, np.float32) * 1.0,
'y_velocity': np.zeros(c_init.shape, np.float32)
}
for k, v in initial_state_obj.items():
print(k, v.shape) # (sample, x, y)
return initial_state_obj
def create_init_state_from_Burger_init(c_data):
c_init=c_data[np.newaxis,:,np.newaxis]
initial_state_obj = {
'concentration': c_init.astype(np.float32), # tensorflow code expects float32
'x_velocity': c_init.astype(np.float32),
'y_velocity': np.zeros(c_init.shape, np.float32)}
for k, v in initial_state_obj.items():
print(k, v.shape) # (sample, x, y)
return initial_state_obj
'Create xarray DatArray from integrated dictionary'
def wrap_as_xarray(integrated):
dr = xarray.DataArray(
integrated['concentration'].numpy().squeeze(-1),
dims = ('time', 'sample', 'x'),
coords = {'time': time_steps, 'x': x_coarse.squeeze()}
)
return dr
def plotOOA(m,c,ls,err_ls):
plt.plot(np.log(ls),-m*np.log(ls)+c,'r',label=f'{m}order accurate')
plt.plot(np.log(ls),np.log(err_ls),'b',label='Log-Error')
plt.xlabel('LogNx')
plt.ylabel('LogError')
plt.legend()
plt.title('Order of Accuracy Plot')
plt.show()
def delay_(max_delay,prob_dist):
allowed_delays=np.arange(0.,max_delay)
delay_chosen=choice(allowed_delays,p=prob_dist)
return delay_chosen
def modify_data(sub_data,DAsync=None):
one_arr=np.ones_like(sub_data)
boundary_arr=np.zeros_like(sub_data)
boundary_arr[:,0]=1.
boundary_arr[:,-1]=1.
if (DAsync==0):
delay_arr=np.zeros_like(sub_data)
elif (DAsync==1):
delay_arr=np.zeros_like(sub_data)
for i in range(delay_arr.shape[0]):
delay_arr[i,0]=delay_(nlevels,prob_set)
delay_arr[i,-1]=delay_(nlevels,prob_set)
del_arr = delay_arr + boundary_arr + one_arr
sub_data_modified=np.multiply(del_arr,sub_data)
return sub_data_modified
# This data-generation code is a bit involved, mostly because we use multi-step loss function.
# To produce large training data in parallel, refer to the create_training_data.py script in source code.
def reference_solution(initial_state_fine, fine_grid, coarse_grid,
coarse_time_steps=256):
'What does this function do'
'Runs high-accuracy model at high-resolution'
'smaller dx, => More Nx => More Nt'
'Subsample with subsampling_factor=Resamplingfactor '
'High accuracy data achieved on a coarse grid'
'So essentially obtain coarse-grained, HIGH-ACCURACY, GROUND TRUTH data'
'Return dict of items'
'For my simple use-case , Resamplingfactor = 1 '
'Hence, given sync_data dataset(128 x 32)'
'sync_data dataset itself is taken as the ground truth'
'Hence we do not need this function to obtain Ground truth data '
# use high-order traditional scheme as reference model
equation = advection_equations.VanLeerAdvection(cfl_safety_factor=0.08)
key_defs = equation.key_definitions
# reference model runs at high resolution
model = models.FiniteDifferenceModel(equation, fine_grid)
# need 8x more time steps for 8x higher resolution to satisfy CFL
coarse_ratio = fine_grid.size_x // coarse_grid.size_x
steps = np.arange(0, coarse_time_steps*coarse_ratio+1, coarse_ratio)
# solve advection at high resolution
integrated_fine = integrate.integrate_steps(model, initial_state_fine, steps)
# regrid to coarse resolution
integrated_coarse = tensor_ops.regrid(
integrated_fine, key_defs, fine_grid, coarse_grid)
return integrated_coarse
def ground_dict_from_data(data):
conc_ground=tf.convert_to_tensor(data[:,np.newaxis,:,np.newaxis], dtype=tf.float32, dtype_hint=None, name=None)
ground_soln_dict = {
'concentration': conc_ground, # tensorflow code expects float32
'x_velocity': tf.ones_like(conc_ground, dtype=None, name=None) * 1.0,
'y_velocity': tf.zeros_like(conc_ground, dtype=None, name=None)
}
for k, v in ground_soln_dict.items():
print(k, v.shape) # (sample, x, y)
return ground_soln_dict
def make_train_data(integrated_coarse, coarse_time_steps=256, example_time_steps=4):
# we need to re-format data so that single-step input maps to multi-step output
# remove the last several time steps, as training input
train_input = {k: v[:-example_time_steps] for k, v in integrated_coarse.items()}
# merge time and sample dimension as required by model
n_time, n_sample, n_x, n_y = train_input['concentration'].shape
for k in train_input:
train_input[k] = tf.reshape(train_input[k], [n_sample * n_time, n_x, n_y])
print('\n train_input shape:')
for k, v in train_input.items():
print(k, v.shape) # (merged_sample, x, y)
# pick the shifted time series, as training output
output_list = []
for shift in range(1, example_time_steps+1):
# output time series, starting from each single time step
output_slice = integrated_coarse['concentration'][shift:coarse_time_steps - example_time_steps + shift + 1]
# merge time and sample dimension as required by training
n_time, n_sample, n_x, n_y = output_slice.shape
output_slice = tf.reshape(output_slice, [n_sample * n_time, n_x, n_y])
output_list.append(output_slice)
train_output = tf.stack(output_list, axis=1) # concat along shift_time dimension, after sample dimension
print('\n train_output shape:', train_output.shape) # (merged_sample, shift_time, x, y)
# sanity check on shapes
assert train_output.shape[0] == train_input['concentration'].shape[0] # merged_sample
assert train_output.shape[2] == train_input['concentration'].shape[1] # x
assert train_output.shape[3] == train_input['concentration'].shape[2] # y
assert train_output.shape[1] == example_time_steps
return train_input, train_output
###Output
_____no_output_____
###Markdown
Results
###Code
'Redefine conv_2d_stack to be used as input to model_nn'
'For that, redefine conv2dperiodic'
# models.co
class myConvPeriodic(tf.keras.layers.Layer):
"""Conv2D layer with periodic boundary conditions."""
def __init__(self, filters, kernel_size, **kwargs):
# Let Conv2D handle argument normalization, e.g., kernel_size -> tuple
self._layer = tf.keras.layers.Conv2D(
filters, kernel_size, padding='valid', **kwargs)
self.filters = self._layer.filters
self.kernel_size = self._layer.kernel_size
if any(size % 2 == 0 for size in self.kernel_size):
raise ValueError('kernel size for conv2d is not odd: {}'
.format(self.kernel_size))
super().__init__()
def build(self, input_shape):
self._layer.build(input_shape)
super().build(input_shape)
def compute_output_shape(self, input_shape):
return input_shape[:-1] + (self.filters,)
def call(self, inputs):
padded = tensor_ops.pad_periodic_2d(inputs, self.kernel_size)
# tensor_ops._pad_periodic_by_axis(stack_ls, [1, 1],1)
result = self._layer(padded)
assert result.shape[1:3] == inputs.shape[1:3], (result, inputs)
return result
m1 = tf.keras.Sequential()
print(type(m1(initial_state)))
m1.add(tf.keras.layers.Lambda(stack_dict))
print(type(m1(initial_state)))
print(m1(initial_state).shape)
layer1=models.Conv2DPeriodic(filters=9, kernel_size=(5,1), activation='relu')
# layer1= tf.keras.layers.Conv2D(filters=9, kernel_size=(7,1), activation='relu')
m1.add(layer1)
print(m1(initial_state).shape)
def conv2d_stack(num_outputs, num_layers=5, filters=32, kernel_size=5,
activation='relu', **kwargs):
"""Create a sequence of Conv2DPeriodic layers."""
model = tf.keras.Sequential()
model.add(tf.keras.layers.Lambda(stack_dict))
for _ in range(num_layers - 1):
layer = models.Conv2DPeriodic(
filters, kernel_size, activation=activation, **kwargs)
model.add(layer)
model.add(models.Conv2DPeriodic(num_outputs, kernel_size, **kwargs))
return model
coreModel = conv2d_stack(num_outputs = 60, num_layers=5, filters=32, kernel_size=(5,1),
activation='relu')
print(coreModel(initial_state).shape)
###Output
_____no_output_____
###Markdown
Define & Initialize NN model
###Code
res=2**6
numPE=1
grid_length = 2*np.pi
fine_grid_resolution = res
# 1d domain, so only 1 point along y dimension
fine_grid = grids.Grid(
size_x=fine_grid_resolution, size_y=1,
step=grid_length/fine_grid_resolution
)
x_fine, _ = fine_grid.get_mesh()
print(x_fine.shape)
CFL,u0,tend=0.08,1.,15.
dx=grid_length/len(x_fine)
dt=dx*CFL/abs(u0)
N_t=int(tend//dt)
time_steps=np.arange(N_t)
initS=[[1],[1],[0]]
data_ana=solution_data(initS[0],initS[1],initS[2],x_fine[:,0],N_t,dt)
'Create initial state from data'
initial_state=create_init_state_from_2d_data(data_ana,u0)
# model1=models.conv2d_stack(num_outputs=10,num_layers=5,filters=32, kernel_size=5,activation='relu')
model_nn = models.PseudoLinearModel(advection_equations.FiniteDifferenceAdvection(0.08), fine_grid,
num_time_steps=10,stencil_size=3, kernel_size=(3,1), num_layers=5, filters=16,constrained_accuracy_order=1,
learned_keys = {'concentration_x', 'concentration_y'}, activation='relu',)
integrated_UT1 = integrate.integrate_steps(model_nn, initial_state, time_steps)
model_nn(initial_state).shape
print(model_nn.output_layers['concentration_x'].kernel_size)
print('Input to model_nn is dict initial_state \n with keys conc, x-vel y-vel')
print(initial_state['concentration'].shape)
print('The _apply_model method outputs \n delc/dex array \n & \n delc/dey array')
print(model_nn._apply_model(initial_state).keys())
print(list(model_nn._apply_model(initial_state).values())[0].shape)
print(list(model_nn._apply_model(initial_state).values())[1].shape)
print(integrated_UT1['concentration'].shape)
'First analyze method _apply_model of PseudoLinearMode Class'
'Analyze core_model_func i.e. conv_2d_stack'
from typing import (
Any, Dict, List, Optional, Mapping, Set, TypeVar, Tuple, Union,
)
T = TypeVar('T')
def sorted_values(x: Dict[Any, T]) -> List[T]:
"""Returns the sorted values of a dictionary."""
return [x[k] for k in sorted(x)]
def stack_dict(state: Dict[Any, tf.Tensor]) -> tf.Tensor:
"""Stack a dict of tensors along its last axis."""
return tf.stack(sorted_values(state), axis=-1)
def conv2d_stack(num_outputs, num_layers=5, filters=32, kernel_size=5,
activation='relu', **kwargs):
"""Create a sequence of Conv2DPeriodic layers."""
model = tf.keras.Sequential()
model.add(tf.keras.layers.Lambda(stack_dict))
for _ in range(num_layers - 1):
layer = models.Conv2DPeriodic(
filters, kernel_size, activation=activation, **kwargs)
model.add(layer)
model.add(models.Conv2DPeriodic(num_outputs, kernel_size, **kwargs))
return model
'Check function sorted_values'
dc={'ab':456,'xy':1,'rt':1234}
print(dc)
print(sorted_values(dc))
'Check function stack_dict'
stack_ls=stack_dict(initial_state)
print(stack_dict(initial_state).shape)
'''Stacks initial_state concen,x-vel,y-vel in alphabetical order
along last axis'''
stack_dict(initial_state)[...,2].shape
'Check function entire conv2d_stack'
for i in range(20):
coreModel = conv2d_stack(num_outputs = 3, num_layers=5, filters=32, kernel_size=3,
activation='relu')
print(coreModel(initial_state).shape)
def conv2d_stack(num_outputs, num_layers=5, filters=32, kernel_size=5,
activation='relu', **kwargs):
"""Create a sequence of Conv2DPeriodic layers."""
model = tf.keras.Sequential()
model.add(tf.keras.layers.Lambda(stack_dict))
for _ in range(num_layers - 1):
layer = models.Conv2DPeriodic(
filters, kernel_size, activation=activation, **kwargs)
model.add(layer)
model.add(models.Conv2DPeriodic(num_outputs, kernel_size, **kwargs))
return model
m1 = tf.keras.Sequential()
print(type(m1(initial_state)))
m1.add(tf.keras.layers.Lambda(stack_dict))
print(type(m1(initial_state)))
print(m1(initial_state).shape)
# layer1=models.Conv2DPeriodic(filters=9, kernel_size=(3.1), activation='relu')
layer1= tf.keras.layers.Conv2D(filters=9, kernel_size=(7,1), activation='relu')
m1.add(layer1)
print(m1(initial_state).shape)
'Analyze Conv2dPeriodic class '
in_sh=stack_ls.shape
print(in_sh)
print(in_sh[:-1]+(9,)) # For 9 filters used in convolution
stack_ls.shape
stack_ls[0,:,0,2]
# tensor_ops.pad_periodic_2d(stack_ls,))
padded_=tensor_ops._pad_periodic_by_axis(stack_ls, [1, 1],1)
print(padded_.shape)
padded_[0,:,0,2]
###Output
Input to model_nn is dict initial_state
with keys conc, x-vel y-vel
(1, 64, 1)
The _apply_model method outputs
delc/dex array
&
delc/dey array
dict_keys(['concentration_x', 'concentration_y'])
(1, 64, 1)
(1, 64, 1)
(1909, 1, 64, 1)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
(1, 64, 1, 3)
<class 'dict'>
<class 'tensorflow.python.framework.ops.EagerTensor'>
(1, 64, 1, 3)
(1, 58, 1, 9)
(1, 64, 1, 3)
(1, 64, 1, 9)
(1, 66, 1, 3)
###Markdown
Post convolution
###Code
print(model_nn.learned_keys)
print(model_nn.fixed_keys)
l,f=(models.normalize_learned_and_fixed_keys(model_nn.learned_keys, model_nn.fixed_keys,advection_equations.FiniteDifferenceAdvection(0.08)))
print(l)
print(f)
coreModel = conv2d_stack(num_outputs = 12, num_layers=5, filters=32, kernel_size=3,activation='relu')
print(coreModel(initial_state).shape)
for stenc in [11,5,7,9,3]:
coreModel = conv2d_stack(num_outputs = 12, num_layers=5, filters=32, kernel_size=3,activation='relu')
out_layers = models.build_output_layers(equation=advection_equations.FiniteDifferenceAdvection(0.08), grid=fine_grid, learned_keys=model_nn.learned_keys, stencil_size=stenc,
initial_accuracy_order=1,constrained_accuracy_order=1, layer_cls=models.VaryingCoefficientsLayer,predict_permutations=True)
print(type(out_layers))
for key in out_layers:
print(key)
for key in out_layers:
print(out_layers[key].kernel_size)
size_splits = [out_layers[key].kernel_size for key in out_layers]
size_splits
size_splits
net=coreModel(initial_state)
net.shape
heads = tf.split(net, size_splits, axis=-1)
print(type(heads))
print(len(heads))
print(heads[0].shape)
print(heads[1].shape)
out_layers
out_layers.items()
for key,layer in out_layers.items():
print(key)
print(layer)
print('\n')
for head in heads:
print(head.shape)
for (key, layer), head in zip(out_layers.items(), heads):
print(key)
print(layer)
print(head.shape)
print('\n')
for (key, layer), head in zip(out_layers.items(), heads):
print(key)
print(layer.input_key)
print(head.shape)
print('\n')
result = {}
for (key, layer), head in zip(out_layers.items(), heads):
input_tensor = initial_state[layer.input_key]
'Input tensor of shape 1,64,1'
'polyAcc layer takes Input : '
'(1, 64, 1, 118)-delc/delx '
'(1, 64, 1 )-c '
'result[concx] = gives delc/delx'
'result[concy] = gives delc/dely'
result[key] = layer([head, input_tensor])
print(type(result))
print(len(result))
for key,tens in result.items():
print(key)
print(tens.shape)
print('\n')
###Output
{'concentration_x', 'concentration_y'}
{'y_velocity', 'x_velocity', 'concentration'}
{'concentration_x', 'concentration_y'}
{'y_velocity', 'x_velocity', 'concentration'}
###Markdown
Create standard schemes data
###Code
datu=upwind_data(linear=True,init=initial_state['concentration'].squeeze(),c=u0,ntime=N_t,N_x=len(x_fine),delt=dt,delx=dx)
datc=CD2_data(linear=True,init=initial_state['concentration'].squeeze(),c=u0,ntime=N_t,N_x=len(x_fine),delt=dt,delx=dx)
###Output
_____no_output_____
###Markdown
At t = tend Compare 3 schemes
###Code
def compare_t(nt):
plt.rcParams.update({'font.size': 18})
plt.figure(figsize=(8,8))
plt.plot(x_fine,datu[nt],'g',label='Upwind predictions')
plt.plot(x_fine,datc[nt],'r',label='CD2 predictions')
plt.plot(x_fine,integrated_T1['concentration'].numpy().squeeze()[nt],'m',label='Neural Network predictions',linestyle='dashed',linewidth=6, markersize=12)
plt.plot(x_fine,data_ana[nt],'b',label='True Solution')
plt.plot(x_fine,-np.ones_like(x_fine),'k')
plt.plot(x_fine,np.ones_like(x_fine),'k')
plt.xlabel('x')
plt.ylabel('Concentration')
plt.legend()
plt.grid()
# plt.title(f'Concentration plot at time step N_t = {nt}',y=1.08)
plt.title(f'Concentration plot at time step N_t = {nt}')
# plt.tight_layout()
plt.show()
compare_t(N_t-1)
# compare_t(30)
# compare_t(1000)
# compare_t(1500)
###Output
_____no_output_____
###Markdown
Compare Amplitude progression with time for 3 schemes
###Code
ampUp_avg = np.stack([np.mean(np.abs(datu[i])) for i in range(N_t)])
ampCD_avg = np.stack([np.mean(np.abs(datc[i])) for i in range(N_t)])
ampNN_avg = np.stack([np.mean(np.abs(integrated_T1['concentration'].numpy().squeeze()[i])) for i in range(N_t)])
ampGround_avg = np.stack([np.mean(np.abs(data_ana[i])) for i in range(N_t)])
plt.rcParams.update({'font.size': 18})
plt.figure(figsize=(8,8))
plt.plot(np.arange(N_t),ampUp_avg,'g',label='Upwind Scheme')
plt.plot(np.arange(N_t),ampCD_avg,'r',label='CD2 Scheme')
plt.plot(np.arange(N_t),ampNN_avg,'k',label='Neural Network')
markers_on = np.arange(0,1900,100).tolist()
plt.plot(np.arange(N_t),ampGround_avg,'D',markevery=markers_on,label='Ground Truth')
plt.ylabel('Mean Absolute Amplitude')
plt.xlabel('N_t = Time step')
plt.legend()
plt.title('Mean Amplitude with time')
plt.show()
plt.rcParams.update({'font.size': 18})
plt.figure(figsize=(8,8))
# plt.plot(np.arange(N_t),ampUp_avg,'g',label='Upwind Scheme')
# plt.plot(np.arange(N_t),ampCD_avg,'r',label='CD2 Scheme')
plt.plot(np.arange(N_t),ampNN_avg,'g',label='Neural Network')
plt.plot(np.arange(N_t),ampGround_avg,'.',label='Ground Truth')
plt.ylabel('Mean Absolute Amplitude')
plt.xlabel('N_t = Time step')
plt.legend()
plt.title('Mean Amplitude with time')
plt.show()
###Output
_____no_output_____
###Markdown
Compare Order of Accuracy
###Code
import numpy as np
import matplotlib.pyplot as plt
ne=np.loadtxt('nn')
ce=np.loadtxt('cd')
ue=np.loadtxt('up')
nl=2**(np.arange(5,9))
def OOA(x,y,m,c,mylabel='Neural Network Error'):
plt.rcParams.update({'font.size': 20})
plt.figure(figsize=(8,6))
plt.plot(np.log(x),np.log(y),'o-',label=mylabel,linestyle='-.',linewidth=3, markersize=12)
plt.plot(np.log(x),-m*np.log(x)+c,'o-',label=f'Line with slope = -{m}',linestyle='-',linewidth=3, markersize=12)
plt.ylabel('log(err_MAE)')
plt.xlabel('log(N_x)')
plt.grid()
plt.title(' log-log accuracy order')
plt.legend()
plt.show()
def OOA_loop(x,y,m_ls,c_ls,mylabel='Neural Network'):
plt.rcParams.update({'font.size': 20})
plt.figure(figsize=(8,6))
plt.plot(np.log(x),np.log(y),'o-',label=mylabel+' Error',linestyle='-.',linewidth=3, markersize=12)
for i in range(len(m_ls)):
plt.plot(np.log(x),-m_ls[i]*np.log(x)+c_ls[i],'o-',label=f'Line with slope = -{m_ls[i]}',linestyle='-',linewidth=3, markersize=12)
plt.ylabel('log(err_MAE)')
plt.xlabel('log(N_x)')
plt.grid()
plt.title(f'{mylabel} log-log accuracy order')
plt.legend()
plt.show()
OOA(nl,ne,2,0,'Neural Network Error')
OOA(nl,ue,1,0,'Upwind Scheme Error')
OOA(nl,ce,1,0,'CD2 Scheme Error')
OOA_loop(x=nl,y=ue,m_ls=[1,2,3],c_ls=[2,5,7],mylabel='Upwind Scheme')
OOA_loop(x=nl,y=ce,m_ls=[1,2,3],c_ls=[2,5,7],mylabel='CD2 Scheme')
OOA_loop(x=nl,y=ne,m_ls=[1,2,3],c_ls=[0.5,1.5,3],mylabel='Neural Network')
###Output
_____no_output_____
###Markdown
Other ideas
###Code
'N_x other than just powers of 2'
'''Mean solution amplitude with time
MAE with time
MAE with grid points increase'''
###Output
_____no_output_____ |
experiments/siamese_triplet_loss.ipynb | ###Markdown
Siamese Triplets Siamese triplet loss training creates embedding spaces where similar items are pulled closer to one another, and dissimilar items are pushed away from one another. Siamese networks were independently introduced by both Bromley et al.(1993) and Baldi and Chauvin (1993) as a similarity-learning algorithm for signature verification and fingerprint verification, respectively. Instead of predicting a class label, these networks directly measure the similarity between samples of the same and differing classes. This is useful for scenarios where the number of classes is very large or unknownduring training, or where there is a only a few training samples per class(Chopraet al., 2005).For the sampling of triplets, we employ a technique called online semi-hard mining (Schroffet al., 2015). For a given minibatch, we first compute the embeddings for all the samples in the minibatch. To make up the triplets for the minibatch, all the possible positive anchor pairs $(\boldsymbol{x}_a, \boldsymbol{x}_p)$ are selected, and accompanied with a semi-hard negative that satisfies $D(\boldsymbol{x}_a, \boldsymbol{x}_p) < D(\boldsymbol{x}_a, \boldsymbol{x}_n) < D(\boldsymbol{x}_a, \boldsymbol{x}_p) + m$, where $D(\cdot)$ is the distance function and $m$ is the margin. We train the multi-head attention encoder architecture using siamese triplet loss.
###Code
import sys
import os
#sys.path.append(os.path.join(\"..\")) # path to source relative to current directory"
import numpy as np
import gensim
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D
from tensorflow.keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional, TimeDistributed, Input, Flatten, AdditiveAttention
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import preprocess_data
import losses
import pandas as pd
data = pd.read_csv('dataset_7B', delimiter = ';', engine = 'python')
data_text = data.loc[data['set'] == 'Train'][['helpdesk_question']]
number_of_classes = data.loc[data['set'] == 'Train']['helpdesk_reply'].value_counts().shape[0]
data = data[['helpdesk_question', 'helpdesk_reply', 'set', 'low_resource']]
responses = pd.DataFrame(data.loc[data['set'] == 'Train']['helpdesk_reply'].value_counts()).reset_index()
responses['reply'] = responses['index']
responses['index'] = responses.index
responses = dict(responses.set_index('reply')['index'])
len(responses)
data_text['index'] = data_text.index
documents = data_text
dictionary = preprocess_data.create_dictionary(data_text, 1, 0.25, 95000) #our entire vocabulary
df_train = data.loc[data['set'] == 'Train']
df_train = df_train.reset_index()[['helpdesk_question', 'helpdesk_reply']]
df_train_keep = df_train
df_valid = data.loc[data['set'] == 'Valid']
df_valid = df_valid.reset_index()[['helpdesk_question', 'helpdesk_reply']]
df_test = data.loc[data['set'] == 'Test']
df_test = df_test.reset_index()[['helpdesk_question', 'helpdesk_reply']]
df_LR = data.loc[(data['set'] == 'Test') & (data['low_resource'] == 'True') ]
df_LR = df_LR.reset_index()[['helpdesk_question', 'helpdesk_reply']]
df_train.shape
unique_words = dictionary
len(unique_words) + 1
max_length = 30
min_token_length = 0
word_to_id, id_to_word = preprocess_data.create_lookup_tables(unique_words)
###Output
_____no_output_____
###Markdown
Transforming the input sentence into a sequence of word IDs
###Code
train_x_word_ids = []
for question in df_train['helpdesk_question'].apply(preprocess_data.preprocess_question,
args = [unique_words, min_token_length]):
word_ids = preprocess_data.transform_sequence_to_word_ids(question, word_to_id)
train_x_word_ids.append(np.array(word_ids, dtype = float))
train_x_word_ids = np.stack(train_x_word_ids)
print(train_x_word_ids.shape)
val_x_word_ids = []
for question in data['helpdesk_question'].loc[data['set'] == 'Valid'].apply(preprocess_data.preprocess_question,
args = [unique_words, min_token_length]):
word_ids = preprocess_data.transform_sequence_to_word_ids(question, word_to_id)
val_x_word_ids.append(np.array(word_ids, dtype = float))
val_x_word_ids = np.stack(val_x_word_ids)
test_x_word_ids = []
for question in data['helpdesk_question'].loc[data['set'] == 'Test'].apply(preprocess_data.preprocess_question,
args = [unique_words, min_token_length]):
word_ids = preprocess_data.transform_sequence_to_word_ids(question, word_to_id)
test_x_word_ids.append(np.array(word_ids, dtype = float))
test_x_word_ids = np.stack(test_x_word_ids)
LR_x_word_ids = []
for question in data['helpdesk_question'].loc[(data['set'] == 'Test') &
(data['low_resource'] == 'True')].apply(preprocess_data.preprocess_question,
args = [unique_words, min_token_length]):
word_ids = preprocess_data.transform_sequence_to_word_ids(question, word_to_id)
LR_x_word_ids.append(np.array(word_ids, dtype = float))
LR_x_word_ids = np.stack(LR_x_word_ids)
def get_dummies(reply, all_responses):
""" Constructs a one-hot vector for replies
Args:
reply: query item
all_responses: dict containing all the template responses with their corresponding IDs
Return:
a one-hot vector where the corresponding ID of the reply is the one-hot index
"""
Y = np.zeros(len(all_responses), dtype = int)
Y[all_responses[reply]] += 1
return Y
def get_label_id(reply, all_responses):
""" Returns integer ID corresponding to response for easy comparison and classification
Args:
reply: query item
all_responses: dict containing all the template responses with their corresponding IDs
Return:
integer corresponding to each response
"""
return all_responses[reply]
train_y = np.array(list(df_train['helpdesk_reply'].apply(get_dummies, args = [responses])))
valid_y = np.array(list(df_valid['helpdesk_reply'].apply(get_dummies, args = [responses])))
test_y = np.array(list(df_test['helpdesk_reply'].apply(get_dummies, args = [responses])))
LR_y = np.array(list(df_LR['helpdesk_reply'].apply(get_dummies, args = [responses])))
train_x_word_ids = train_x_word_ids.reshape(train_x_word_ids.shape[:-1])
val_x_word_ids = val_x_word_ids.reshape(val_x_word_ids.shape[:-1])
test_x_word_ids = test_x_word_ids.reshape(test_x_word_ids.shape[:-1])
LR_x_word_ids = LR_x_word_ids.reshape(LR_x_word_ids.shape[:-1])
###Output
_____no_output_____
###Markdown
Transform vectors where the input sentence yields a sequence of length 0
###Code
train_zero_vectors = np.where(train_x_word_ids.sum(axis = 1) == 0.0)[0]
for t in range(train_zero_vectors.shape[0]):
train_x_word_ids[train_zero_vectors[t]][0] += 1
val_zero_vectors = np.where(val_x_word_ids.sum(axis = 1) == 0.0)[0]
for t in range(val_zero_vectors.shape[0]):
val_x_word_ids[val_zero_vectors[t]][0] += 1
###Output
_____no_output_____
###Markdown
Building the encoder (from the Transformer)Original code obtained from https://www.tensorflow.org/tutorials/text/transformer with minor adaptions
###Code
def get_angles(pos, i, d_model):
""" Multiplying angle rates and positions gives a map of the position encoding angles as a
function of depth. The angle rates range from 1 [rads/step] to min_rate [rads/step] over the
vector depth.
Args:
pos: vector of positions
i: embedding vector
d_model: dimension of embedding vector
Returns:
Vector of angle radians
"""
angle_rate = 1/np.power(10000, ((2*i)/np.float32(d_model)))
return pos * angle_rate
def positional_encoding(position, d_model):
""" Calculate positional encodings to inject information about relative and absolute positions/
The positional encodings are obtained by taking the sine and cosine of the angle radians.
Args:
position: maximum position encoding
d_model: dimension of embedding vector
Returns:
A positional encoding vector
"""
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def scaled_dot_product_attention(q, k, v, mask):
""" Calculate the attention weights. q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
""" Multi-head attention consists of four parts: linear layers that split into heads,
scaled dot-product attention, the concatenation of heads, and a final linear layer.
"""
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
""" Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
Args:
x: feed forward layer
batch_size: number of items in a batch
Returns:
tuple containing (batch size, number of heads, sequence length, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
""" Call function to split the heads of the linear layers.
Returns the scaled attention dense layer and attention weights
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: float tensor with shape broadcastable
Returns:
output, attention_weights
"""
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size,
#seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
""" Construct a two-layer feedforward NN with layer dimensions d_model and dff respectively
and ReLU activations between layers.
Args:
d_model: dimension of embedding layer
dff: dimension of the second layer
Returns:
A two-layer feedforward NN
"""
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
""" Each encoder layer consists of Multi-head attention (with padding mask) and pointwise
feedforward networks.
"""
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(0.1)
self.dropout2 = tf.keras.layers.Dropout(0.1)
def call(self, x, training=False, mask=None):
""" Constructs the encoder layer.
Args:
x: sequential layer
training: flag indicating training or testing
mask: float tensor with shape broadcastable
"""
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class Encoder(tf.keras.layers.Layer):
""" The Encoder consists of an input embedding, summed with positional encoding, and N encoder layers.
The summation is the input to the encoder layers. The output of the encoder is the input to the decoder.
"""
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = Embedding(input_vocab_size, d_model,)
self.pos_encoding = positional_encoding(maximum_position_encoding, self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = Dropout(rate)
def call(self, x, training, mask=None):
""" This function constructs the encoder.
Note we move the dropout to right before the summation (of embedding and positional encodings).
Args:
x: sequential layer
training: flag indicating training or testing
mask: float tensor with shape broadcastable
Returns:
An encoder model
"""
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x = self.dropout(x, training = training)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
#x = self.dropout(x, training = training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x
def multihead_attention_encoder(num_layers, max_features, input_length=30, model_dim=512, dff = 128,
num_heads=4):
""" Constructs a multihead attention encoder model
Args:
num_layers: number of encoder layers
max_features: size of vocabulary
input_length: length of input sequence
model_dim: dimension of embedding vector
dff: dimension of second layer in pointwise FFNN
num_heads: number of heads to split
Returns:
Model object
"""
inputs = Input(shape=(input_length, ))
x = Encoder(num_layers, model_dim, num_heads, dff, max_features, maximum_position_encoding = 10000,
rate=0.5)(inputs)
x = GlobalAveragePooling1D()(x)
outputs = Dense(300, activation=None)(x)
return Model(inputs=inputs, outputs=outputs)
###Output
_____no_output_____
###Markdown
Multi-head Attention Encoder with Average PoolingWe use average pooling to construct a single feature vector from the variable-length sequence of encodings produced by the MHA Encoder. This is then connected to a single dense layer with 300 dimensions. Our MHA has 8 heads, 2 layers, and dropout of 50% to regularize the model during training.
###Code
max_features = len(unique_words) + 1
num_layers = 2
model = multihead_attention_encoder(num_layers, max_features, input_length=30, model_dim=128,
num_heads=8)
model.summary()
###Output
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 30)] 0
_________________________________________________________________
encoder (Encoder) (None, 30, 128) 7564928
_________________________________________________________________
global_average_pooling1d (Gl (None, 128) 0
_________________________________________________________________
dense_12 (Dense) (None, 300) 38700
=================================================================
Total params: 7,603,628
Trainable params: 7,603,628
Non-trainable params: 0
_________________________________________________________________
###Markdown
Siamese Triplet Loss TrainingWe perform the Siamese triplet loss training with mini-batch sizes of 256, cosine as our distance function and a margin $m$ of 0.5. For online sampling we use a batch size of 256. Larger batch sizes consumed too much memory.
###Code
loss = losses.triplet_semihard_loss(margin=0.5, metric="cosine")
es = EarlyStopping(monitor='val_loss', verbose=1, restore_best_weights=False, patience=50)
model.compile(loss=loss, optimizer=tf.keras.optimizers.Adadelta(learning_rate= 0.05))
model.fit(train_x_word_ids, np.array(df_train['helpdesk_reply'].apply(get_label_id, args = [responses])),
batch_size=256,
epochs=1000,
callbacks=[es],
validation_data=(val_x_word_ids, np.array(df_valid['helpdesk_reply'].apply(get_label_id,
args = [responses]))))
def label_preprocess(entry):
""" Returns integer ID corresponding to response for easy comparison and classification
Args:
entry: query item
responses: dict containing all the template responses with their corresponding IDs
Return:
integer corresponding to each response
"""
if responses.get(entry) != None:
return responses[entry]
else:
return len(responses) #default unknown class
x_train = model.predict(train_x_word_ids)
y_train = df_train_keep['helpdesk_reply'].apply(label_preprocess)
x_valid = model.predict(val_x_word_ids)
y_valid = df_valid['helpdesk_reply'].apply(label_preprocess)
x_test = model.predict(test_x_word_ids)
y_test = df_test['helpdesk_reply'].apply(label_preprocess)
x_LR = model.predict(LR_x_word_ids)
y_LR = df_LR['helpdesk_reply'].apply(label_preprocess)
from sklearn.neighbors import KNeighborsClassifier
def train_knn_model(x_train, y_train, metric, k, weights):
""" Fit k-nearest neighbour model to the sentence embeddings
Args:
x_train: matrix of sentence embeddings
y_train: class labels associated with each sentence embedding
metric: distance metric to use
k: number of neighbours to consider
weights: to either use uniform voting (equal weighting) or weighted voting (the weight of
each vote is proportional to its distance to query)
Returns:
A trained KNN classifier
"""
print(k, 'Nearest Neighbours')
clf = KNeighborsClassifier(n_neighbors=k, weights= weights, metric = metric)
clf.fit(x_train, y_train)
return clf
###Output
_____no_output_____
###Markdown
Validation accuracy
###Code
clf_1NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine',
k = 1, weights = 'distance')
score = clf_1NN.score(x_train, y_train)
print("Train accuracy", score)
score = clf_1NN.score(x_valid, y_valid)
print("Validation accuracy", score)
clf_5NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine',
k = 5, weights = 'distance')
score = clf_5NN.score(x_valid, y_valid)
print("Validation accuracy", score)
clf_25NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine',
k = 25, weights = 'distance')
score = clf_25NN.score(x_valid, y_valid)
print("Validation accuracy", score)
clf_50NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine',
k = 50, weights = 'distance')
score = clf_50NN.score(x_valid, y_valid)
print("Validation accuracy", score)
###Output
50 Nearest Neighbours
Validation accuracy 0.5836332342356438
###Markdown
Test score
###Code
score = clf_1NN.score(x_test, y_test)
print("Test accuracy on 1-NN", score)
score = clf_5NN.score(x_test, y_test)
print("Test accuracy on 5-NN", score)
score = clf_25NN.score(x_test, y_test)
print("Test accuracy on 25-NN", score)
score = clf_50NN.score(x_test, y_test)
print("Test accuracy on 50-NN", score)
###Output
Test accuracy on 1-NN 0.5298607017652717
Test accuracy on 5-NN 0.5683616169763906
Test accuracy on 25-NN 0.586976080414482
Test accuracy on 50-NN 0.5855799956566252
###Markdown
LR test score
###Code
score = clf_1NN.score(x_LR, y_LR)
print("LR Test accuracy on 1-NN", score)
score = clf_5NN.score(x_LR, y_LR)
print("LR Test accuracy on 5-NN", score)
score = clf_25NN.score(x_LR, y_LR)
print("LR Test accuracy on 25-NN", score)
score = clf_50NN.score(x_LR, y_LR)
print("LR Test accuracy on 50-NN", score)
###Output
LR Test accuracy on 1-NN 0.42818509615384615
LR Test accuracy on 5-NN 0.4586838942307692
LR Test accuracy on 25-NN 0.47866586538461536
LR Test accuracy on 50-NN 0.48091947115384615
###Markdown
Assessing the quality of cross-lingual embeddingsWe design a small experiment to assess the quality of the cross-lingual embeddings for English and Zulu. The translations were obtained using google translate and verified by a Zulu speaker. We compute the sentence embedding for each English-Zulu translation pair and calculate the cosine distance between the two embeddings.
###Code
def create_sentence_embeddings(question, model, unique_words, min_token_length, word_to_id):
"""Create sentence embeddings from the output of the pretrained model
Args:
question: raw text sentence
model: pretrained sentence embedding model
unique_words: vocabulary of unique words
min_token_length: shortest allowed length for token to be included
word_to_id: dict mapping words to their unique integer IDs
Returns:
A sentence embedding for the input question
"""
q = preprocess_data.preprocess_question(question, unique_words, min_token_length)
word_ids = preprocess_data.transform_sequence_to_word_ids(q, word_to_id)
word_ids = np.array(word_ids, dtype = float)
word_ids = word_ids.reshape((1, word_ids.shape[0]))
embedding = model.predict(word_ids)
return embedding
eng_A = "can you drink coca cola when you are pregnant"
zulu_A = "ungayiphuza yini i-coca cola uma ukhulelwe"
eng_B = "when can i stop breastfeeding"
zulu_B = "ngingakuyeka nini ukuncelisa ibele"
eng_C = "when can I start feeding my baby solid food"
zulu_C = "ngingaqala nini ukondla ingane yami ukudla okuqinile"
eng_D = "what are the signs of labour"
zulu_D = "yiziphi izimpawu zokubeletha"
eng_E = "when can I learn the gender of my baby"
zulu_E = "ngingabazi ubulili bengane yami"
embed_eng_A = create_sentence_embeddings(eng_A, model, unique_words, min_token_length, word_to_id)
embed_eng_B = create_sentence_embeddings(eng_B, model, unique_words, min_token_length, word_to_id)
embed_eng_C = create_sentence_embeddings(eng_C, model, unique_words, min_token_length, word_to_id)
embed_eng_D = create_sentence_embeddings(eng_D, model, unique_words, min_token_length, word_to_id)
embed_eng_E = create_sentence_embeddings(eng_E, model, unique_words, min_token_length, word_to_id)
embed_zulu_A = create_sentence_embeddings(zulu_A, model, unique_words, min_token_length, word_to_id)
embed_zulu_B = create_sentence_embeddings(zulu_B, model, unique_words, min_token_length, word_to_id)
embed_zulu_C = create_sentence_embeddings(zulu_C, model, unique_words, min_token_length, word_to_id)
embed_zulu_D = create_sentence_embeddings(zulu_D, model, unique_words, min_token_length, word_to_id)
embed_zulu_E = create_sentence_embeddings(zulu_E, model, unique_words, min_token_length, word_to_id)
from scipy.spatial.distance import cosine
print("Sentence A:", cosine(embed_eng_A, embed_zulu_A))
print("Sentence B:", cosine(embed_eng_B, embed_zulu_B))
print("Sentence C:", cosine(embed_eng_C, embed_zulu_C))
print("Sentence D:", cosine(embed_eng_D, embed_zulu_D))
print("Sentence E:", cosine(embed_eng_E, embed_zulu_E))
###Output
Sentence A: 0.22015321254730225
Sentence B: 0.3873268961906433
Sentence C: 0.34619617462158203
Sentence D: 0.3381393551826477
Sentence E: 0.6258534491062164
|
introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-lst-format.ipynb | ###Markdown
Image classification training with image format1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prerequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Deploy The Model](Deploy-the-model) 1. [Create model](Create-model) 2. [Batch transform](Batch-transform) 3. [Realtime inference](Realtime-inference) 1. [Create endpoint configuration](Create-endpoint-configuration) 2. [Create endpoint](Create-endpoint) 3. [Perform inference](Perform-inference) 4. [Clean up](Clean-up) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon SageMaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on ImageNet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using the [Caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prerequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon SageMaker image classification docker image which need not be changed
###Code
%%time
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker import image_uris
role = get_execution_role()
bucket = sagemaker.session.Session().default_bucket()
training_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe Caltech-256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
s3 = boto3.client("s3")
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories.tar",
"256_ObjectCategories.tar",
)
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download("https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py")
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open("example.lst", "r")
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = "s3://{}/image-classification/train/".format(bucket)
s3validation = "s3://{}/image-classification/validation/".format(bucket)
s3train_lst = "s3://{}/image-classification/train_lst/".format(bucket)
s3validation_lst = "s3://{}/image-classification/validation_lst/".format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internally by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to set up the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this sample but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for the Caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. ImageNet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For Caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon SageMaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client("s3")
# create unique job name
job_name_prefix = "sagemaker-imageclassification-notebook"
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = job_name_prefix + timestamp
training_params = {
# specify the training docker image
"AlgorithmSpecification": {"TrainingImage": training_image, "TrainingInputMode": "File"},
"RoleArn": role,
"OutputDataConfig": {"S3OutputPath": "s3://{}/{}/output".format(bucket, job_name_prefix)},
"ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.p2.xlarge", "VolumeSizeInGB": 50},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model),
},
"StoppingCondition": {"MaxRuntimeInSeconds": 360000},
# Training data should be inside a subdirectory called "train"
# Validation data should be inside a subdirectory called "validation"
# The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
],
}
print("Training job name: {}".format(job_name))
print(
"\nInput Data Location: {}".format(
training_params["InputDataConfig"][0]["DataSource"]["S3DataSource"]
)
)
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name="sagemaker")
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)["TrainingJobStatus"]
print("Training job current status: {}".format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter("training_job_completed_or_stopped").wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
except:
print("Training failed to start")
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
print(training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Deploy The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Batch Transform](BatchTransform) - Create a transform job to perform batch inference.1. [Host the model for realtime inference](HostTheModel) - Create an inference endpoint and perform realtime inference. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name="sagemaker")
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
model_name = "image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info["ModelArtifacts"]["S3ModelArtifacts"]
print(model_data)
hosting_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
primary_container = {
"Image": hosting_image,
"ModelDataUrl": model_data,
}
create_model_response = sage.create_model(
ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container
)
print(create_model_response["ModelArn"])
###Output
_____no_output_____
###Markdown
Batch transformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction.
###Code
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
batch_job_name = "image-classification-model" + timestamp
batch_input = s3validation + "001.ak47/"
request = {
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {"S3OutputPath": "s3://{}/{}/output".format(bucket, batch_job_name)},
"TransformInput": {
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None",
},
"TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1},
}
print("Transform job name: {}".format(batch_job_name))
print("\nInput Data Location: {}".format(batch_input))
sagemaker = boto3.client("sagemaker")
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while True:
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response["TransformJobStatus"]
if status == "Completed":
print("Transform job ended with status: " + status)
break
if status == "Failed":
message = response["FailureReason"]
print("Transform failed with the following error: {}".format(message))
raise Exception("Transform job failed")
time.sleep(30)
###Output
_____no_output_____
###Markdown
After the job completes, let's check the prediction results.
###Code
from urllib.parse import urlparse
import json
import numpy as np
s3_client = boto3.client("s3")
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content["Key"] for content in response["Contents"]]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split("/")[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data["prediction"])
probability = data["prediction"][index]
print("Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip("/"))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 2 images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]
###Output
_____no_output_____
###Markdown
Realtime inferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps,1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](PerformInference) - Perform inference on some input data using the endpoint.1. [Clean up](CleanUp) - Delete the endpoint and model Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_config_name = job_name_prefix + "-epc-" + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"InstanceType": "ml.p2.xlarge",
"InitialInstanceCount": 1,
"ModelName": model_name,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint configuration name: {}".format(endpoint_config_name))
print("Endpoint configuration arn: {}".format(endpoint_config_response["EndpointConfigArn"]))
###Output
_____no_output_____
###Markdown
Create endpointNext, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_name = job_name_prefix + "-ep-" + timestamp
print("Endpoint name: {}".format(endpoint_name))
endpoint_params = {
"EndpointName": endpoint_name,
"EndpointConfigName": endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print("EndpointArn = {}".format(endpoint_response["EndpointArn"]))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response["EndpointStatus"]
print("EndpointStatus = {}".format(status))
try:
sagemaker.get_waiter("endpoint_in_service").wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Create endpoint ended with status: " + status)
if status != "InService":
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
raise Exception("Endpoint creation did not succeed")
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name="runtime.sagemaker")
###Output
_____no_output_____
###Markdown
Download test image
###Code
file_name = "/tmp/test.jpg"
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories/008.bathtub/008_0007.jpg",
file_name,
)
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, "rb") as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(
EndpointName=endpoint_name, ContentType="application/x-image", Body=payload
)
result = response["Body"].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format demo1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Set Up Hosting For The Model](Set-up-hosting-for-the-model) 1. [Create model](Create-model) 2. [Create endpoint configuration](Create-endpoint-configuration) 3. [Create endpoint](Create-endpoint) 4. [Perform inference](Perform-inference) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
role = get_execution_role()
bucket='<<bucket-name>>' # customize to your bucket
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest',
'ap-northeast-1': '501404015308.dkr.ecr.ap-northeast-1.amazonaws.com/image-classification:latest',
'ap-northeast-2': '306986355934.dkr.ecr.ap-northeast-2.amazonaws.com/image-classification:latest'}
training_image = containers[boto3.Session().region_name]
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar')
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py')
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open('example.lst','r')
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/train/'.format(bucket)
s3validation = 's3://{}/validation/'.format(bucket)
s3train_lst = 's3://{}/train_lst/'.format(bucket)
s3validation_lst = 's3://{}/validation_lst/'.format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'sagemaker-imageclassification-notebook'
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
job_name = job_name_prefix + timestamp
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
print (training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Set Up Hosting For The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](Perform Inference) - Perform inference on some input data using the endpoint. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = containers[boto3.Session().region_name]
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.p2.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
###Output
_____no_output_____
###Markdown
Create endpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
try:
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Create endpoint ended with status: " + status)
if status != 'InService':
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = '/tmp/test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prerequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Deploy The Model](Deploy-the-model) 1. [Create model](Create-model) 2. [Batch transform](Batch-transform) 3. [Realtime inference](Realtime-inference) 1. [Create endpoint configuration](Create-endpoint-configuration) 2. [Create endpoint](Create-endpoint) 3. [Perform inference](Perform-inference) 4. [Clean up](Clean-up) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon SageMaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on ImageNet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using the [Caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prerequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon SageMaker image classification docker image which need not be changed
###Code
%%time
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker import image_uris
role = get_execution_role()
bucket = sagemaker.session.Session().default_bucket()
training_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe Caltech-256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
s3 = boto3.client("s3")
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories.tar",
"256_ObjectCategories.tar",
)
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download("https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py")
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open("example.lst", "r")
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = "s3://{}/image-classification/train/".format(bucket)
s3validation = "s3://{}/image-classification/validation/".format(bucket)
s3train_lst = "s3://{}/image-classification/train_lst/".format(bucket)
s3validation_lst = "s3://{}/image-classification/validation_lst/".format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internally by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to set up the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this sample but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for the Caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. ImageNet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For Caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon SageMaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client("s3")
# create unique job name
job_name_prefix = "sagemaker-imageclassification-notebook"
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = job_name_prefix + timestamp
training_params = {
# specify the training docker image
"AlgorithmSpecification": {"TrainingImage": training_image, "TrainingInputMode": "File"},
"RoleArn": role,
"OutputDataConfig": {"S3OutputPath": "s3://{}/{}/output".format(bucket, job_name_prefix)},
"ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.p2.xlarge", "VolumeSizeInGB": 50},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model),
},
"StoppingCondition": {"MaxRuntimeInSeconds": 360000},
# Training data should be inside a subdirectory called "train"
# Validation data should be inside a subdirectory called "validation"
# The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
],
}
print("Training job name: {}".format(job_name))
print(
"\nInput Data Location: {}".format(
training_params["InputDataConfig"][0]["DataSource"]["S3DataSource"]
)
)
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name="sagemaker")
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)["TrainingJobStatus"]
print("Training job current status: {}".format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter("training_job_completed_or_stopped").wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
except:
print("Training failed to start")
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
print(training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Deploy The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Batch Transform](BatchTransform) - Create a transform job to perform batch inference.1. [Host the model for realtime inference](HostTheModel) - Create an inference endpoint and perform realtime inference. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name="sagemaker")
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
model_name = "image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info["ModelArtifacts"]["S3ModelArtifacts"]
print(model_data)
hosting_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
primary_container = {
"Image": hosting_image,
"ModelDataUrl": model_data,
}
create_model_response = sage.create_model(
ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container
)
print(create_model_response["ModelArn"])
###Output
_____no_output_____
###Markdown
Batch transformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction.
###Code
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
batch_job_name = "image-classification-model" + timestamp
batch_input = s3validation + "001.ak47/"
request = {
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {"S3OutputPath": "s3://{}/{}/output".format(bucket, batch_job_name)},
"TransformInput": {
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None",
},
"TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1},
}
print("Transform job name: {}".format(batch_job_name))
print("\nInput Data Location: {}".format(batch_input))
sagemaker = boto3.client("sagemaker")
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while True:
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response["TransformJobStatus"]
if status == "Completed":
print("Transform job ended with status: " + status)
break
if status == "Failed":
message = response["FailureReason"]
print("Transform failed with the following error: {}".format(message))
raise Exception("Transform job failed")
time.sleep(30)
###Output
_____no_output_____
###Markdown
After the job completes, let's check the prediction results.
###Code
from urllib.parse import urlparse
import json
import numpy as np
s3_client = boto3.client("s3")
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content["Key"] for content in response["Contents"]]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split("/")[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data["prediction"])
probability = data["prediction"][index]
print("Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip("/"))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 2 images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]
###Output
_____no_output_____
###Markdown
Realtime inferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps,1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](PerformInference) - Perform inference on some input data using the endpoint.1. [Clean up](CleanUp) - Delete the endpoint and model Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_config_name = job_name_prefix + "-epc-" + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"InstanceType": "ml.p2.xlarge",
"InitialInstanceCount": 1,
"ModelName": model_name,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint configuration name: {}".format(endpoint_config_name))
print("Endpoint configuration arn: {}".format(endpoint_config_response["EndpointConfigArn"]))
###Output
_____no_output_____
###Markdown
Create endpointNext, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_name = job_name_prefix + "-ep-" + timestamp
print("Endpoint name: {}".format(endpoint_name))
endpoint_params = {
"EndpointName": endpoint_name,
"EndpointConfigName": endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print("EndpointArn = {}".format(endpoint_response["EndpointArn"]))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take a few minutes to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response["EndpointStatus"]
print("EndpointStatus = {}".format(status))
try:
sagemaker.get_waiter("endpoint_in_service").wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Create endpoint ended with status: " + status)
if status != "InService":
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
raise Exception("Endpoint creation did not succeed")
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name="runtime.sagemaker")
###Output
_____no_output_____
###Markdown
Download test image
###Code
file_name = "/tmp/test.jpg"
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories/008.bathtub/008_0007.jpg",
file_name,
)
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, "rb") as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(
EndpointName=endpoint_name, ContentType="application/x-image", Body=payload
)
result = response["Body"].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format demo1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Set Up Hosting For The Model](Set-up-hosting-for-the-model) 1. [Create model](Create-model) 2. [Create endpoint configuration](Create-endpoint-configuration) 3. [Create endpoint](Create-endpoint) 4. [Perform inference](Perform-inference) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
role = get_execution_role()
bucket='<<bucket-name>>' # customize to your bucket
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest',
'ap-northeast-1': '501404015308.dkr.ecr.ap-northeast-1.amazonaws.com/image-classification:latest'}
training_image = containers[boto3.Session().region_name]
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar')
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py')
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open('example.lst','r')
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/train/'.format(bucket)
s3validation = 's3://{}/validation/'.format(bucket)
s3train_lst = 's3://{}/train_lst/'.format(bucket)
s3validation_lst = 's3://{}/validation_lst/'.format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'sagemaker-imageclassification-notebook'
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
job_name = job_name_prefix + timestamp
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
print (training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Set Up Hosting For The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](Perform Inference) - Perform inference on some input data using the endpoint. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = containers[boto3.Session().region_name]
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.p2.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
###Output
_____no_output_____
###Markdown
Create endpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
try:
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Create endpoint ended with status: " + status)
if status != 'InService':
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = '/tmp/test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Deploy The Model](Deploy-the-model) 1. [Create model](Create-model) 2. [Batch transform](Batch-transform) 3. [Realtime inference](Realtime-inference) 1. [Create endpoint configuration](Create-endpoint-configuration) 2. [Create endpoint](Create-endpoint) 3. [Perform inference](Perform-inference) 4. [Clean up](Clean-up) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
role = get_execution_role()
bucket = "<<bucket-name>>" # customize to your bucket
training_image = get_image_uri(boto3.Session().region_name, "image-classification")
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download("http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar")
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download("https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py")
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open("example.lst", "r")
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = "s3://{}/image-classification/train/".format(bucket)
s3validation = "s3://{}/image-classification/validation/".format(bucket)
s3train_lst = "s3://{}/image-classification/train_lst/".format(bucket)
s3validation_lst = "s3://{}/image-classification/validation_lst/".format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client("s3")
# create unique job name
job_name_prefix = "sagemaker-imageclassification-notebook"
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = job_name_prefix + timestamp
training_params = {
# specify the training docker image
"AlgorithmSpecification": {"TrainingImage": training_image, "TrainingInputMode": "File"},
"RoleArn": role,
"OutputDataConfig": {"S3OutputPath": "s3://{}/{}/output".format(bucket, job_name_prefix)},
"ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.p2.xlarge", "VolumeSizeInGB": 50},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model),
},
"StoppingCondition": {"MaxRuntimeInSeconds": 360000},
# Training data should be inside a subdirectory called "train"
# Validation data should be inside a subdirectory called "validation"
# The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
],
}
print("Training job name: {}".format(job_name))
print(
"\nInput Data Location: {}".format(
training_params["InputDataConfig"][0]["DataSource"]["S3DataSource"]
)
)
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name="sagemaker")
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)["TrainingJobStatus"]
print("Training job current status: {}".format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter("training_job_completed_or_stopped").wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
except:
print("Training failed to start")
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
print(training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Deploy The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Batch Transform](BatchTransform) - Create a transform job to perform batch inference.1. [Host the model for realtime inference](HostTheModel) - Create an inference endpoint and perform realtime inference. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name="sagemaker")
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
model_name = "image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info["ModelArtifacts"]["S3ModelArtifacts"]
print(model_data)
hosting_image = get_image_uri(boto3.Session().region_name, "image-classification")
primary_container = {
"Image": hosting_image,
"ModelDataUrl": model_data,
}
create_model_response = sage.create_model(
ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container
)
print(create_model_response["ModelArn"])
###Output
_____no_output_____
###Markdown
Batch transformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction.
###Code
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
batch_job_name = "image-classification-model" + timestamp
batch_input = s3validation + "001.ak47/"
request = {
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {"S3OutputPath": "s3://{}/{}/output".format(bucket, batch_job_name)},
"TransformInput": {
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None",
},
"TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1},
}
print("Transform job name: {}".format(batch_job_name))
print("\nInput Data Location: {}".format(batch_input))
sagemaker = boto3.client("sagemaker")
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while True:
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response["TransformJobStatus"]
if status == "Completed":
print("Transform job ended with status: " + status)
break
if status == "Failed":
message = response["FailureReason"]
print("Transform failed with the following error: {}".format(message))
raise Exception("Transform job failed")
time.sleep(30)
###Output
_____no_output_____
###Markdown
After the job completes, let's check the prediction results.
###Code
from urllib.parse import urlparse
import json
import numpy as np
s3_client = boto3.client("s3")
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content["Key"] for content in response["Contents"]]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split("/")[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data["prediction"])
probability = data["prediction"][index]
print("Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip("/"))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 2 images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]
###Output
_____no_output_____
###Markdown
Realtime inferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps,1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](PerformInference) - Perform inference on some input data using the endpoint.1. [Clean up](CleanUp) - Delete the endpoint and model Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_config_name = job_name_prefix + "-epc-" + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"InstanceType": "ml.p2.xlarge",
"InitialInstanceCount": 1,
"ModelName": model_name,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint configuration name: {}".format(endpoint_config_name))
print("Endpoint configuration arn: {}".format(endpoint_config_response["EndpointConfigArn"]))
###Output
_____no_output_____
###Markdown
Create endpointNext, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_name = job_name_prefix + "-ep-" + timestamp
print("Endpoint name: {}".format(endpoint_name))
endpoint_params = {
"EndpointName": endpoint_name,
"EndpointConfigName": endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print("EndpointArn = {}".format(endpoint_response["EndpointArn"]))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response["EndpointStatus"]
print("EndpointStatus = {}".format(status))
try:
sagemaker.get_waiter("endpoint_in_service").wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Create endpoint ended with status: " + status)
if status != "InService":
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
raise Exception("Endpoint creation did not succeed")
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name="runtime.sagemaker")
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = "/tmp/test.jpg"
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, "rb") as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(
EndpointName=endpoint_name, ContentType="application/x-image", Body=payload
)
result = response["Body"].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format demo1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Set Up Hosting For The Model](Set-up-hosting-for-the-model) 1. [Create model](Create-model) 2. [Create endpoint configuration](Create-endpoint-configuration) 3. [Create endpoint](Create-endpoint) 4. [Perform inference](Perform-inference) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
role = get_execution_role()
bucket='<<bucket-name>>' # customize to your bucket
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'}
training_image = containers[boto3.Session().region_name]
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar')
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py')
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open('example.lst','r')
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/train/'.format(bucket)
s3validation = 's3://{}/validation/'.format(bucket)
s3train_lst = 's3://{}/train_lst/'.format(bucket)
s3validation_lst = 's3://{}/validation_lst/'.format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'sagemaker-imageclassification-notebook'
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
job_name = job_name_prefix + timestamp
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
print (training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Set Up Hosting For The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the topic mixture representing a given document.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](Perform Inference) - Perform inference on some input data using the endpoint. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = containers[boto3.Session().region_name]
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.p2.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
###Output
_____no_output_____
###Markdown
Create endpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
try:
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Create endpoint ended with status: " + status)
if status != 'InService':
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = '/tmp/test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Deploy The Model](Deploy-the-model) 1. [Create model](Create-model) 2. [Batch transform](Batch-transform) 3. [Realtime inference](Realtime-inference) 1. [Create endpoint configuration](Create-endpoint-configuration) 2. [Create endpoint](Create-endpoint) 3. [Perform inference](Perform-inference) 4. [Clean up](Clean-up) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
role = get_execution_role()
bucket='<<bucket-name>>' # customize to your bucket
training_image = get_image_uri(boto3.Session().region_name, 'image-classification')
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar')
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py')
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open('example.lst','r')
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/image-classification/train/'.format(bucket)
s3validation = 's3://{}/image-classification/validation/'.format(bucket)
s3train_lst = 's3://{}/image-classification/train_lst/'.format(bucket)
s3validation_lst = 's3://{}/image-classification/validation_lst/'.format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'sagemaker-imageclassification-notebook'
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
job_name = job_name_prefix + timestamp
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train_lst,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation_lst,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
print (training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Deploy The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Batch Transform](BatchTransform) - Create a transform job to perform batch inference.1. [Host the model for realtime inference](HostTheModel) - Create an inference endpoint and perform realtime inference. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = get_image_uri(boto3.Session().region_name, 'image-classification')
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Batch transformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction.
###Code
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
batch_job_name="image-classification-model" + timestamp
batch_input = s3validation + "001.ak47/"
request = \
{
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, batch_job_name)
},
"TransformInput": {
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": batch_input
}
},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None"
},
"TransformResources": {
"InstanceType": "ml.p2.xlarge",
"InstanceCount": 1
}
}
print('Transform job name: {}'.format(batch_job_name))
print('\nInput Data Location: {}'.format(batch_input))
sagemaker = boto3.client('sagemaker')
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while(True):
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response['TransformJobStatus']
if status == 'Completed':
print("Transform job ended with status: " + status)
break
if status == 'Failed':
message = response['FailureReason']
print('Transform failed with the following error: {}'.format(message))
raise Exception('Transform job failed')
time.sleep(30)
###Output
_____no_output_____
###Markdown
After the job completes, let's check the prediction results.
###Code
from urllib.parse import urlparse
import json
import numpy as np
s3_client = boto3.client('s3')
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content['Key'] for content in response['Contents']]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split('/')[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data['prediction'])
probability = data['prediction'][index]
print("Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip('/'))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 2 images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]
###Output
_____no_output_____
###Markdown
Realtime inferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps,1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](PerformInference) - Perform inference on some input data using the endpoint.1. [Clean up](CleanUp) - Delete the endpoint and model Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.p2.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
###Output
_____no_output_____
###Markdown
Create endpointNext, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
try:
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Create endpoint ended with status: " + status)
if status != 'InService':
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = '/tmp/test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format demo1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Set Up Hosting For The Model](Set-up-hosting-for-the-model) 1. [Create model](Create-model) 2. [Create endpoint configuration](Create-endpoint-configuration) 3. [Create endpoint](Create-endpoint) 4. [Perform inference](Perform-inference) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
role = get_execution_role()
bucket='<<bucket-name>>' # customize to your bucket
training_image = get_image_uri(boto3.Session().region_name, 'image-classification')
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar')
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py')
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open('example.lst','r')
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/train/'.format(bucket)
s3validation = 's3://{}/validation/'.format(bucket)
s3train_lst = 's3://{}/train_lst/'.format(bucket)
s3validation_lst = 's3://{}/validation_lst/'.format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'sagemaker-imageclassification-notebook'
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
job_name = job_name_prefix + timestamp
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
print (training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Set Up Hosting For The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](Perform Inference) - Perform inference on some input data using the endpoint. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = get_image_uri(boto3.Session().region_name, 'image-classification')
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.p2.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
###Output
_____no_output_____
###Markdown
Create endpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
try:
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Create endpoint ended with status: " + status)
if status != 'InService':
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = '/tmp/test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format demo1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Set Up Hosting For The Model](Set-up-hosting-for-the-model) 1. [Create model](Create-model) 2. [Create endpoint configuration](Create-endpoint-configuration) 3. [Create endpoint](Create-endpoint) 4. [Perform inference](Perform-inference) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
role = get_execution_role()
bucket='<<bucket-name>>' # customize to your bucket
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'}
training_image = containers[boto3.Session().region_name]
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar')
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py')
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open('example.lst','r')
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/train/'.format(bucket)
s3validation = 's3://{}/validation/'.format(bucket)
s3train_lst = 's3://{}/train_lst/'.format(bucket)
s3validation_lst = 's3://{}/validation_lst/'.format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'sagemaker-imageclassification-notebook'
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
job_name = job_name_prefix + timestamp
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/train_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": 's3://{}/validation_lst/'.format(bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
print (training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Set Up Hosting For The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](Perform Inference) - Perform inference on some input data using the endpoint. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = containers[boto3.Session().region_name]
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.p2.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
###Output
_____no_output_____
###Markdown
Create endpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
try:
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Create endpoint ended with status: " + status)
if status != 'InService':
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = '/tmp/test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prerequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Deploy The Model](Deploy-the-model) 1. [Create model](Create-model) 2. [Batch transform](Batch-transform) 3. [Realtime inference](Realtime-inference) 1. [Create endpoint configuration](Create-endpoint-configuration) 2. [Create endpoint](Create-endpoint) 3. [Perform inference](Perform-inference) 4. [Clean up](Clean-up) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon SageMaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on ImageNet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using the [Caltech-256 dataset](https://paperswithcode.com/dataset/caltech-256). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prerequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon SageMaker image classification docker image which need not be changed
###Code
%%time
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker import image_uris
role = get_execution_role()
bucket = sagemaker.session.Session().default_bucket()
training_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe Caltech-256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
s3 = boto3.client("s3")
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories.tar",
"256_ObjectCategories.tar",
)
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download("https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py")
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open("example.lst", "r")
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = "s3://{}/image-classification/train/".format(bucket)
s3validation = "s3://{}/image-classification/validation/".format(bucket)
s3train_lst = "s3://{}/image-classification/train_lst/".format(bucket)
s3validation_lst = "s3://{}/image-classification/validation_lst/".format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internally by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to set up the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this sample but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for the Caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. ImageNet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For Caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon SageMaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client("s3")
# create unique job name
job_name_prefix = "sagemaker-imageclassification-notebook"
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = job_name_prefix + timestamp
training_params = {
# specify the training docker image
"AlgorithmSpecification": {"TrainingImage": training_image, "TrainingInputMode": "File"},
"RoleArn": role,
"OutputDataConfig": {"S3OutputPath": "s3://{}/{}/output".format(bucket, job_name_prefix)},
"ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.p2.xlarge", "VolumeSizeInGB": 50},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model),
},
"StoppingCondition": {"MaxRuntimeInSeconds": 360000},
# Training data should be inside a subdirectory called "train"
# Validation data should be inside a subdirectory called "validation"
# The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation_lst,
"S3DataDistributionType": "FullyReplicated",
}
},
"ContentType": "application/x-image",
"CompressionType": "None",
},
],
}
print("Training job name: {}".format(job_name))
print(
"\nInput Data Location: {}".format(
training_params["InputDataConfig"][0]["DataSource"]["S3DataSource"]
)
)
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name="sagemaker")
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)["TrainingJobStatus"]
print("Training job current status: {}".format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter("training_job_completed_or_stopped").wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
except:
print("Training failed to start")
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info["TrainingJobStatus"]
print("Training job ended with status: " + status)
print(training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Deploy The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Batch Transform](BatchTransform) - Create a transform job to perform batch inference.1. [Host the model for realtime inference](HostTheModel) - Create an inference endpoint and perform realtime inference. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name="sagemaker")
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
model_name = "image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info["ModelArtifacts"]["S3ModelArtifacts"]
print(model_data)
hosting_image = image_uris.retrieve(
region=boto3.Session().region_name, framework="image-classification"
)
primary_container = {
"Image": hosting_image,
"ModelDataUrl": model_data,
}
create_model_response = sage.create_model(
ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container
)
print(create_model_response["ModelArn"])
###Output
_____no_output_____
###Markdown
Batch transformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction.
###Code
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
batch_job_name = "image-classification-model" + timestamp
batch_input = s3validation + "001.ak47/"
request = {
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {"S3OutputPath": "s3://{}/{}/output".format(bucket, batch_job_name)},
"TransformInput": {
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None",
},
"TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1},
}
print("Transform job name: {}".format(batch_job_name))
print("\nInput Data Location: {}".format(batch_input))
sagemaker = boto3.client("sagemaker")
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while True:
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response["TransformJobStatus"]
if status == "Completed":
print("Transform job ended with status: " + status)
break
if status == "Failed":
message = response["FailureReason"]
print("Transform failed with the following error: {}".format(message))
raise Exception("Transform job failed")
time.sleep(30)
###Output
_____no_output_____
###Markdown
After the job completes, let's check the prediction results.
###Code
from urllib.parse import urlparse
import json
import numpy as np
s3_client = boto3.client("s3")
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content["Key"] for content in response["Contents"]]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split("/")[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data["prediction"])
probability = data["prediction"][index]
print("Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip("/"))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 2 images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]
###Output
_____no_output_____
###Markdown
Realtime inferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps,1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](PerformInference) - Perform inference on some input data using the endpoint.1. [Clean up](CleanUp) - Delete the endpoint and model Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_config_name = job_name_prefix + "-epc-" + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"InstanceType": "ml.p2.xlarge",
"InitialInstanceCount": 1,
"ModelName": model_name,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint configuration name: {}".format(endpoint_config_name))
print("Endpoint configuration arn: {}".format(endpoint_config_response["EndpointConfigArn"]))
###Output
_____no_output_____
###Markdown
Create endpointNext, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime())
endpoint_name = job_name_prefix + "-ep-" + timestamp
print("Endpoint name: {}".format(endpoint_name))
endpoint_params = {
"EndpointName": endpoint_name,
"EndpointConfigName": endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print("EndpointArn = {}".format(endpoint_response["EndpointArn"]))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take a few minutes to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response["EndpointStatus"]
print("EndpointStatus = {}".format(status))
try:
sagemaker.get_waiter("endpoint_in_service").wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Create endpoint ended with status: " + status)
if status != "InService":
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)["FailureReason"]
print("Training failed with the following error: {}".format(message))
raise Exception("Endpoint creation did not succeed")
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name="runtime.sagemaker")
###Output
_____no_output_____
###Markdown
Download test image
###Code
file_name = "/tmp/test.jpg"
s3.download_file(
"sagemaker-sample-files",
"datasets/image/caltech-256/256_ObjectCategories/008.bathtub/008_0007.jpg",
file_name,
)
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, "rb") as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(
EndpointName=endpoint_name, ContentType="application/x-image", Body=payload
)
result = response["Body"].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = [
"ak47",
"american-flag",
"backpack",
"baseball-bat",
"baseball-glove",
"basketball-hoop",
"bat",
"bathtub",
"bear",
"beer-mug",
"billiards",
"binoculars",
"birdbath",
"blimp",
"bonsai-101",
"boom-box",
"bowling-ball",
"bowling-pin",
"boxing-glove",
"brain-101",
"breadmaker",
"buddha-101",
"bulldozer",
"butterfly",
"cactus",
"cake",
"calculator",
"camel",
"cannon",
"canoe",
"car-tire",
"cartman",
"cd",
"centipede",
"cereal-box",
"chandelier-101",
"chess-board",
"chimp",
"chopsticks",
"cockroach",
"coffee-mug",
"coffin",
"coin",
"comet",
"computer-keyboard",
"computer-monitor",
"computer-mouse",
"conch",
"cormorant",
"covered-wagon",
"cowboy-hat",
"crab-101",
"desk-globe",
"diamond-ring",
"dice",
"dog",
"dolphin-101",
"doorknob",
"drinking-straw",
"duck",
"dumb-bell",
"eiffel-tower",
"electric-guitar-101",
"elephant-101",
"elk",
"ewer-101",
"eyeglasses",
"fern",
"fighter-jet",
"fire-extinguisher",
"fire-hydrant",
"fire-truck",
"fireworks",
"flashlight",
"floppy-disk",
"football-helmet",
"french-horn",
"fried-egg",
"frisbee",
"frog",
"frying-pan",
"galaxy",
"gas-pump",
"giraffe",
"goat",
"golden-gate-bridge",
"goldfish",
"golf-ball",
"goose",
"gorilla",
"grand-piano-101",
"grapes",
"grasshopper",
"guitar-pick",
"hamburger",
"hammock",
"harmonica",
"harp",
"harpsichord",
"hawksbill-101",
"head-phones",
"helicopter-101",
"hibiscus",
"homer-simpson",
"horse",
"horseshoe-crab",
"hot-air-balloon",
"hot-dog",
"hot-tub",
"hourglass",
"house-fly",
"human-skeleton",
"hummingbird",
"ibis-101",
"ice-cream-cone",
"iguana",
"ipod",
"iris",
"jesus-christ",
"joy-stick",
"kangaroo-101",
"kayak",
"ketch-101",
"killer-whale",
"knife",
"ladder",
"laptop-101",
"lathe",
"leopards-101",
"license-plate",
"lightbulb",
"light-house",
"lightning",
"llama-101",
"mailbox",
"mandolin",
"mars",
"mattress",
"megaphone",
"menorah-101",
"microscope",
"microwave",
"minaret",
"minotaur",
"motorbikes-101",
"mountain-bike",
"mushroom",
"mussels",
"necktie",
"octopus",
"ostrich",
"owl",
"palm-pilot",
"palm-tree",
"paperclip",
"paper-shredder",
"pci-card",
"penguin",
"people",
"pez-dispenser",
"photocopier",
"picnic-table",
"playing-card",
"porcupine",
"pram",
"praying-mantis",
"pyramid",
"raccoon",
"radio-telescope",
"rainbow",
"refrigerator",
"revolver-101",
"rifle",
"rotary-phone",
"roulette-wheel",
"saddle",
"saturn",
"school-bus",
"scorpion-101",
"screwdriver",
"segway",
"self-propelled-lawn-mower",
"sextant",
"sheet-music",
"skateboard",
"skunk",
"skyscraper",
"smokestack",
"snail",
"snake",
"sneaker",
"snowmobile",
"soccer-ball",
"socks",
"soda-can",
"spaghetti",
"speed-boat",
"spider",
"spoon",
"stained-glass",
"starfish-101",
"steering-wheel",
"stirrups",
"sunflower-101",
"superman",
"sushi",
"swan",
"swiss-army-knife",
"sword",
"syringe",
"tambourine",
"teapot",
"teddy-bear",
"teepee",
"telephone-box",
"tennis-ball",
"tennis-court",
"tennis-racket",
"theodolite",
"toaster",
"tomato",
"tombstone",
"top-hat",
"touring-bike",
"tower-pisa",
"traffic-light",
"treadmill",
"triceratops",
"tricycle",
"trilobite-101",
"tripod",
"t-shirt",
"tuning-fork",
"tweezer",
"umbrella-101",
"unicorn",
"vcr",
"video-projector",
"washing-machine",
"watch-101",
"waterfall",
"watermelon",
"welding-mask",
"wheelbarrow",
"windmill",
"wine-bottle",
"xylophone",
"yarmulke",
"yo-yo",
"zebra",
"airplanes-101",
"car-side-101",
"faces-easy-101",
"greyhound",
"tennis-shoes",
"toad",
"clutter",
]
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____
###Markdown
Image classification training with image format1. [Introduction](Introduction)2. [Prerequisites and Preprocessing](Prequisites-and-Preprocessing) 1. [Permissions and environment variables](Permissions-and-environment-variables) 2. [Prepare the data](Prepare-the-data)3. [Fine-tuning The Image Classification Model](Fine-tuning-the-Image-classification-model) 1. [Training parameters](Training-parameters) 2. [Training](Training)4. [Deploy The Model](Deploy-the-model) 1. [Create model](Create-model) 2. [Batch transform](Batch-transform) 3. [Realtime inference](Realtime-inference) 1. [Create endpoint configuration](Create-endpoint-configuration) 2. [Create endpoint](Create-endpoint) 3. [Perform inference](Perform-inference) 4. [Clean up](Clean-up) IntroductionWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. Prequisites and Preprocessing Permissions and environment variablesHere we set up the linkage and authentication to AWS services. There are three parts to this:* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook* The S3 bucket that you want to use for training and model data* The Amazon sagemaker image classification docker image which need not be changed
###Code
%%time
import boto3
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
role = get_execution_role()
bucket='<<bucket-name>>' # customize to your bucket
training_image = get_image_uri(boto3.Session().region_name, 'image-classification')
###Output
_____no_output_____
###Markdown
Fine-tuning the Image classification model Prepare the dataThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).
###Code
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# Caltech-256 image files
download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar')
!tar -xf 256_ObjectCategories.tar
# Tool for creating lst file
download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py')
%%bash
mkdir -p caltech_256_train_60
for i in 256_ObjectCategories/*; do
c=`basename $i`
mkdir -p caltech_256_train_60/$c
for j in `ls $i/*.jpg | shuf | head -n 60`; do
mv $j caltech_256_train_60/$c/
done
done
python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/
python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/
###Output
_____no_output_____
###Markdown
A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows.
###Code
!head -n 3 ./caltech-256-60-train.lst > example.lst
f = open('example.lst','r')
lst_content = f.read()
print(lst_content)
###Output
_____no_output_____
###Markdown
When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.
###Code
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/image-classification/train/'.format(bucket)
s3validation = 's3://{}/image-classification/validation/'.format(bucket)
s3train_lst = 's3://{}/image-classification/train_lst/'.format(bucket)
s3validation_lst = 's3://{}/image-classification/validation_lst/'.format(bucket)
# upload the image files to train and validation channels
!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet
!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet
!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet
###Output
_____no_output_____
###Markdown
Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).
###Code
%%bash
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/
python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/
###Output
_____no_output_____
###Markdown
After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. Fine-tuning the Image Classification Model Training parametersThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.* **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.* **epochs**: Number of training epochs.* **learning_rate**: Learning rate for training.* **top_k**: Report the top-k accuracy during training.* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.* **checkpoint_frequency**: Period to store model parameters (in number of epochs).* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.
###Code
# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200
# For this training, we will use 18 layers
num_layers = 18
# we need to specify the input image shape for the training data
image_shape = "3,224,224"
# we also need to specify the number of training samples in the training set
num_training_samples = 15240
# specify the number of output classes
num_classes = 257
# batch size for training
mini_batch_size = 128
# number of epochs
epochs = 6
# learning rate
learning_rate = 0.01
# report top_5 accuracy
top_k = 5
# resize image before training
resize = 256
# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6
checkpoint_frequency = 2
# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be
# initialized with pre-trained weights
use_pretrained_model = 1
###Output
_____no_output_____
###Markdown
TrainingRun the training using Amazon sagemaker CreateTrainingJob API
###Code
%%time
import time
import boto3
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'sagemaker-imageclassification-notebook'
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
job_name = job_name_prefix + timestamp
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate),
"top_k": str(top_k),
"resize": str(resize),
"checkpoint_frequency": str(checkpoint_frequency),
"use_pretrained_model": str(use_pretrained_model)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "train_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3train_lst,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
},
{
"ChannelName": "validation_lst",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3validation_lst,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-image",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
print (training_info)
###Output
_____no_output_____
###Markdown
If you see the message,> `Training job ended with status: Completed`then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. Deploy The ModelA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.This section involves several steps,1. [Create model](CreateModel) - Create model for the training output1. [Batch Transform](BatchTransform) - Create a transform job to perform batch inference.1. [Host the model for realtime inference](HostTheModel) - Create an inference endpoint and perform realtime inference. Create modelWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
###Code
%%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name="image-classification-model" + timestamp
print(model_name)
info = sage.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
hosting_image = get_image_uri(boto3.Session().region_name, 'image-classification')
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Batch transformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction.
###Code
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
batch_job_name="image-classification-model" + timestamp
batch_input = s3validation + "001.ak47/"
request = \
{
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, batch_job_name)
},
"TransformInput": {
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": batch_input
}
},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None"
},
"TransformResources": {
"InstanceType": "ml.p2.xlarge",
"InstanceCount": 1
}
}
print('Transform job name: {}'.format(batch_job_name))
print('\nInput Data Location: {}'.format(batch_input))
sagemaker = boto3.client('sagemaker')
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while(True):
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response['TransformJobStatus']
if status == 'Completed':
print("Transform job ended with status: " + status)
break
if status == 'Failed':
message = response['FailureReason']
print('Transform failed with the following error: {}'.format(message))
raise Exception('Transform job failed')
time.sleep(30)
###Output
_____no_output_____
###Markdown
After the job completes, let's check the prediction results.
###Code
from urllib.parse import urlparse
import json
import numpy as np
s3_client = boto3.client('s3')
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content['Key'] for content in response['Contents']]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split('/')[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data['prediction'])
probability = data['prediction'][index]
print("Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip('/'))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 2 images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]
###Output
_____no_output_____
###Markdown
Realtime inferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps,1. [Create endpoint configuration](CreateEndpointConfiguration) - Create a configuration defining an endpoint.1. [Create endpoint](CreateEndpoint) - Use the configuration to create an inference endpoint.1. [Perform inference](PerformInference) - Perform inference on some input data using the endpoint.1. [Clean up](CleanUp) - Delete the endpoint and model Create endpoint configurationAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
###Code
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.p2.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
###Output
_____no_output_____
###Markdown
Create endpointNext, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
###Code
%%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
###Output
_____no_output_____
###Markdown
Finally, now the endpoint can be created. It may take sometime to create the endpoint...
###Code
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
try:
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Create endpoint ended with status: " + status)
if status != 'InService':
message = sagemaker.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
If you see the message,> `Endpoint creation ended with EndpointStatus = InService`then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.We will finally create a runtime object from which we can invoke the endpoint. Perform inferenceFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
###Code
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
###Output
_____no_output_____
###Markdown
Download test image
###Code
!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg
file_name = '/tmp/test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
###Output
_____no_output_____
###Markdown
Clean upWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model
###Code
sage.delete_endpoint(EndpointName=endpoint_name)
###Output
_____no_output_____ |
jb/20210308/womens-day-2021.ipynb | ###Markdown
%%% metadatalayout: postcurrent: postcover: navigation: Truetitle: UBI as a means of reducing gendered povertydate: 2020-03-08tags: [blog, gender, poverty]class: post-templatesubclass: 'post'author: [katarina, max]%%% This year's [International Women's Day](https://www.internationalwomensday.com/) asks its celebrants to challenge the world in order to change it ([ChooseToChallenge](https://twitter.com/search?q=%23ChooseToChallenge)).The following simulation seeks to challenge the fact that women in the United States are more likely to be impoverished than men.In our analysis, we evaluate a Universal Basic Income (UBI), funded by a flat income tax, as a possible solution for both alleviating poverty and narrowing the gendered poverty gap.[^modeling][^modeling]: Data was gathered from the US Census Bureau’s March Supplement, which covers economic circumstances in 2019. We use the Supplemental Poverty Measure, which incorporates taxes and transfers (including in-kind benefits like SNAP) and adjusts for local housing costs. The flat income tax is applied on positive adjusted gross income.Poverty rates for children are roughly equal between genders, while adult women experience poverty at rates 17 percent greater than adult men: 12.4 percent vs. 10.6 percent.As a general trend, the difference in poverty rates between women and men increases with age.For example, 20.8 percent of women aged 85 or older are in poverty, compared to only 14.5 percent of men of the same age.Among people aged 85 or older, women experience deep poverty at twice the rate of men.
###Code
import microdf as mdf
import numpy as np
import pandas as pd
import plotly.express as px
import ubicenter
df = pd.read_csv(
"https://github.com/MaxGhenis/datarepo/raw/master/pppub20.csv.gz",
usecols=[
"MARSUPWT",
"SPM_RESOURCES",
"SPM_POVTHRESHOLD",
"SPM_WEIGHT",
"SPM_NUMPER",
"A_SEX",
"A_AGE",
"SPM_ID",
"AGI",
],
)
df.columns = df.columns.str.lower()
df["weight"] = df.marsupwt / 100
df["spm_weight"] = df.spm_weight / 100
df["female"] = df.a_sex == 2
df["poverty"] = df.spm_resources < df.spm_povthreshold
df["deep_poverty"] = df.spm_resources < (df.spm_povthreshold / 2)
df["sex"] = np.where(df.female, "Female", "Male")
spm = df.groupby(
["spm_id", "spm_resources", "spm_weight", "spm_povthreshold", "spm_numper"]
)[["agi"]].sum()
spm["agi_pos"] = np.maximum(spm.agi, 0)
spm.reset_index(inplace=True)
total_population = df.weight.sum()
total_agi_pos = mdf.weighted_sum(spm, "agi_pos", "spm_weight")
# Bin into ages aligning with 18 year old threshold.
DARK_PURPLE = "#46296E" # Official Intl Womens Day Color.
LIGHT_PURPLE = "#907EA8" # Lightened version.
DARK_GREY = "#9E9E9E" # Gray 500 from Material Design.
LIGHT_GREY = "#BDBDBD" # Gray 400.
COLOR_MAP = {
"Female": DARK_PURPLE,
"Male": LIGHT_GREY,
"Female poverty": DARK_PURPLE,
"Female deep poverty": LIGHT_PURPLE,
"Male poverty": DARK_GREY,
"Male deep poverty": LIGHT_GREY,
"Poverty": DARK_PURPLE,
"Deep poverty": LIGHT_PURPLE,
}
df["age_group"] = pd.cut(df.a_age + 1, np.arange(0, 91, 5), labels=np.arange(0, 86, 5))
pov_age = mdf.weighted_mean(
df, ["poverty", "deep_poverty"], "marsupwt", groupby=["age_group", "sex"]
)
pov_age = pov_age.round(3)
pov_age.reset_index(inplace=True)
pov_age = pov_age.melt(["age_group", "sex"], ["poverty", "deep_poverty"])
pov_age["label"] = (
pov_age.sex
+ " "
+ np.where(pov_age.variable == "poverty", "poverty", "deep poverty")
)
fig = px.line(
pov_age, x="age_group", y="value", color="label", color_discrete_map=COLOR_MAP
)
fig.update_layout(
title="Poverty by gender and age",
xaxis_title="Age (in 5-year bins)",
yaxis_title="SPM poverty rate (2019)",
legend_title="",
yaxis_tickformat="%",
yaxis_range=[0, pov_age.value.max() * 1.1] #fig.update_xaxes(range=[1.5, 4.5])
)
fig.update_traces(mode="markers+lines", hovertemplate=None)
fig = ubicenter.format_fig(fig, show = False)
fig.show()
pov_age_diff = pov_age[pov_age.variable == "poverty"].pivot_table(
values="value", index="age_group", columns="sex"
)
pov_age_diff["female_minus_male"] = pov_age_diff.Female - pov_age_diff.Male
pov_age_diff.female_minus_male.plot()
###Output
_____no_output_____
###Markdown
With a significant difference in rates of poverty for adult women as compared to men, how might a UBI help close that gap?Our past modeling has shown that a UBI would lessen poverty across demographic groups, and also shrink poverty disparities by [race](https://blog.ubicenter.org/20210118/racial-poverty-disparities-mlk-day-2021.html), [Indigenous heritage](https://blog.ubicenter.org/20201012/indigenous.html), and [disability status](https://blog.ubicenter.org/20200731/ada30.html).Our research here shows that it would do the same by gender.After receiving a UBI, the poverty rates for women and men converge as the monthly amount increases (we've focused on adults here).Consistent with our past simulations, a \$300 monthly UBI would halve the rate of female poverty and female deep poverty.
###Code
def pov(female, monthly_ubi):
# Calculate poverty rates for a gender subset given a monthly UBI amount.
cost = monthly_ubi * total_population * 12
tax_rate = cost / total_agi_pos # Divide by positive AGI.
spm["new_resources"] = (
spm.spm_resources - tax_rate * spm.agi_pos + (12 * monthly_ubi * spm.spm_numper)
)
person = df[(df.female == female) & (df.a_age > 17)].merge(
spm[["spm_id", "new_resources"]], on="spm_id"
)
return pd.Series(
dict(
pov=mdf.poverty_rate(
person, income="new_resources", threshold="spm_povthreshold", w="weight"
),
deep_pov=mdf.deep_poverty_rate(
person, income="new_resources", threshold="spm_povthreshold", w="weight"
),
)
)
def pov_row(row):
return pov(row.female, row.monthly_ubi)
gender_ubi = mdf.cartesian_product(
{"female": [True, False], "monthly_ubi": np.arange(0, 1001, 100)}
)
gender_ubi = pd.concat([gender_ubi, gender_ubi.apply(pov_row, axis=1)], axis=1)
gender_ubi_long = gender_ubi.melt(
id_vars=["female", "monthly_ubi"], value_vars=["pov", "deep_pov"]
)
gender_ubi_long["label"] = (
pd.Series(np.where(gender_ubi_long.female, "Female", "Male"))
+ " "
+ pd.Series(np.where(gender_ubi_long.variable == "pov", "poverty", "deep poverty"))
)
gender_ubi_long.value = gender_ubi_long.value.round(3)
fig = px.line(
gender_ubi_long,
x="monthly_ubi",
y="value",
color="label",
color_discrete_map=COLOR_MAP,
)
fig.update_layout(
title="Poverty by gender and UBI amount",
xaxis_title="Monthly universal basic income amount (funded by flat income tax)",
xaxis_tickprefix="$",
yaxis_title="SPM poverty rate (2019)",
legend_title="",
yaxis_tickformat="%",
)
fig.update_traces(mode="markers+lines", hovertemplate=None)
fig = ubicenter.format_fig(fig, show = False)
fig.show()
###Output
_____no_output_____
###Markdown
UBI significantly narrows the ratio of poverty and deep poverty rates for adult women compared to adult men.With a monthly UBI of \$500, the rate of women compared to men in poverty is more than halved and the rate of women compared to men in deep poverty is nearly equal.
###Code
gender_ratio = gender_ubi.pivot_table(
values="pov", index=["monthly_ubi"], columns="female"
).reset_index()
gender_ratio.rename({True: "female", False: "male"}, axis=1, inplace=True)
gender_ratio["ratio"] = gender_ratio.female / gender_ratio.male
gender_ratio["poverty_type"] = "Poverty"
deep_gender_ratio = gender_ubi.pivot_table(
values="deep_pov", index=["monthly_ubi"], columns="female"
).reset_index()
deep_gender_ratio.rename({True: "female", False: "male"}, axis=1, inplace=True)
deep_gender_ratio["ratio"] = deep_gender_ratio.female / deep_gender_ratio.male
deep_gender_ratio["poverty_type"] = "Deep poverty"
gender_ratios = pd.concat([gender_ratio, deep_gender_ratio])
gender_ratios.ratio = gender_ratios.ratio.round(2)
fig = px.line(
gender_ratios,
x="monthly_ubi",
y="ratio",
color="poverty_type",
color_discrete_map=COLOR_MAP,
)
fig.update_yaxes(range=[0.94, 1.18])
fig.add_shape(
type="line",
x0=0,
y0=1,
x1=1000,
y1=1,
line=dict(color=LIGHT_GREY, dash="dash"),
xref="x",
yref="y",
)
fig.update_layout(
title="Poverty disparities by gender with respect to UBI",
xaxis_title="Monthly universal basic income (funded by flat income tax)",
xaxis_tickprefix="$",
yaxis_title="Ratio of women to men SPM poverty rate (2019)",
legend_title="",
)
fig.update_traces(mode="markers+lines", hovertemplate=None)
fig = ubicenter.format_fig(fig, show = False)
fig.show()
###Output
_____no_output_____ |
docs/examples/hsv_example.ipynb | ###Markdown
HSV Operator Example This example presents HSV manipulation operation within DALI and how to easily set up a pipeline with it. HSV Color SpaceHSV Color space is an alternative to RGB (and many more color spaces) for representation of images. It somewhat more intuitive than RGB, since the model better fits human perception of colors. For more information on this, just use [Wikipedia](https://en.wikipedia.org/wiki/HSL_and_HSV). Step-by-step guide1. Let's start from importing handful of utils and DALI itself.
###Code
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import matplotlib.pyplot as plt
batch_size = 1
image_filename = "images"
###Output
_____no_output_____
###Markdown
used `batch_size` is `1`, to keep things simple. You can always extend it on your own.2. Next, let's implement the pipelines. We've presented 2 versions of a pipeline. The CPU one does all the processing (i.e. reading file, decoding it and HSV manipulation) on the CPU, while the other pipeline conducts decoding and HSV manipulation on the GPU.
###Code
class HsvCpuPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HsvCpuPipeline, self).__init__(batch_size, num_threads, device_id, seed=42)
self.input = ops.FileReader(device="cpu", file_root=image_filename)
self.decode = ops.ImageDecoder(device="cpu", output_type=types.RGB)
self.hsv = ops.Hsv(device="cpu", hue=120, saturation=1, value=.4, output_type=types.RGB)
def define_graph(self):
read, _ = self.input()
image = self.decode(read)
converted = self.hsv(image)
return image, converted
class HsvGpuPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HsvGpuPipeline, self).__init__(batch_size, num_threads, device_id, seed=42)
self.input = ops.FileReader(device="cpu", file_root=image_filename)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.hsv = ops.Hsv(device="gpu", hue=+120, saturation=2, value=1, output_type=types.RGB)
def define_graph(self):
read, _ = self.input()
image = self.decode(read)
converted = self.hsv(image.gpu())
return image, converted
###Output
_____no_output_____
###Markdown
The function below is used to actually display result of HSV manipulation in DALI. Since the pipelines we set up return 2 outputs: modified image and original image, the function aquires both of them from the output and displays them. Additional flag (`cpu`) is specified, to determine, whether the pipeline output comes from CPU or GPU. In the latter case, we have to tell the output, to return a CPU-accessible copy of the data.
###Code
def display(output, cpu = True):
i = 0 # Tweak that to have various images from batch
img1 = output[0].at(i) if cpu else output[0].as_cpu().at(i)
img2 = output[1].at(i) if cpu else output[1].as_cpu().at(i)
fig, ax = plt.subplots(1,2)
ax[0].imshow(img1);
ax[1].imshow(img2);
###Output
_____no_output_____
###Markdown
3. Now let's just build the pipelines, run them and display the results. First the GPU one:
###Code
pipegpu = HsvGpuPipeline(batch_size=batch_size, num_threads=1, device_id=0)
pipegpu.build()
gpu_output = pipegpu.run()
display(gpu_output, cpu=False)
###Output
_____no_output_____
###Markdown
4. And the CPU:
###Code
pipecpu = HsvCpuPipeline(batch_size=batch_size, num_threads=1, device_id=0)
pipecpu.build()
cpu_output = pipecpu.run()
display(cpu_output)
###Output
_____no_output_____
###Markdown
HSV Operator Example This example demonstrates the use of HSV operator which manipulates the hue, saturation and value (brighness) aspects of the image. Introduction HSV Color spaceHSV represents colors by separating hue, saturation and brightness. In this color space, the hue is represented as an angle on the color circle. Saturation goes from 0 (greyscale) to 100% (fully saturated colors) and value goes from 0 (black) to 1 (full brightness). For details see:[Wikipedia](https://en.wikipedia.org/wiki/HSL_and_HSV). A note on implementationFor performance reasons, DALI doesn't use the exact definition of HSV and approximates the operations in HSV space by linear (matrix) operations on RGB colors. This greatly improves performance at the cost of modest loss of fidelity. Step-by-step guide1. Let's start from importing handful of utils and DALI itself.
###Code
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import matplotlib.pyplot as plt
batch_size = 10
image_filename = "images"
###Output
_____no_output_____
###Markdown
Batch size is greater than one to facilitate switching between images at the end of the notebook.2. Next, let's implement the pipelines. We've presented 2 versions of a pipeline. The CPU one does all the processing (i.e. reading file, decoding it and HSV manipulation) on the CPU, while the other pipeline conducts decoding and HSV manipulation on the GPU.
###Code
class HsvCpuPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HsvCpuPipeline, self).__init__(batch_size, num_threads, device_id, seed=42)
self.input = ops.FileReader(device="cpu", file_root=image_filename)
self.decode = ops.ImageDecoder(device="cpu", output_type=types.RGB)
self.hsv = ops.Hsv(device="cpu", hue=120, saturation=1, value=.4, dtype=types.UINT8)
def define_graph(self):
read, _ = self.input()
image = self.decode(read)
converted = self.hsv(image)
return image, converted
class HsvGpuPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HsvGpuPipeline, self).__init__(batch_size, num_threads, device_id, seed=42)
self.input = ops.FileReader(device="cpu", file_root=image_filename)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.hsv = ops.Hsv(device="gpu", hue=+120, saturation=2, value=1, dtype=types.UINT8)
def define_graph(self):
read, _ = self.input()
image = self.decode(read)
converted = self.hsv(image.gpu())
return image, converted
###Output
_____no_output_____
###Markdown
The function below is used to actually display result of HSV manipulation in DALI. Since the pipelines we set up return 2 outputs: modified image and original image, the function aquires both of them from the output and displays them. Additional flag (`cpu`) is specified, to determine, whether the pipeline output comes from CPU or GPU. In the latter case, we have to tell the output, to return a CPU-accessible copy of the data.
###Code
def display(output, cpu = True):
i = 0 # Tweak that to have various images from batch
img1 = output[0].at(i) if cpu else output[0].as_cpu().at(i)
img2 = output[1].at(i) if cpu else output[1].as_cpu().at(i)
fig, ax = plt.subplots(1,2)
ax[0].imshow(img1);
ax[1].imshow(img2);
###Output
_____no_output_____
###Markdown
3. Now let's just build the pipelines, run them and display the results. First the GPU one:
###Code
pipegpu = HsvGpuPipeline(batch_size=batch_size, num_threads=1, device_id=0)
pipegpu.build()
gpu_output = pipegpu.run()
display(gpu_output, cpu=False)
###Output
_____no_output_____
###Markdown
4. And the CPU:
###Code
pipecpu = HsvCpuPipeline(batch_size=batch_size, num_threads=1, device_id=0)
pipecpu.build()
cpu_output = pipecpu.run()
display(cpu_output)
###Output
_____no_output_____ |
nbs/00_data.image.ipynb | ###Markdown
Image> Basic functionalities to work with geospatial image data (geotiff). Work in process
###Code
#hide
from nbdev.showdoc import *
# export
import rasterio as rio
import numpy as np
import matplotlib.pyplot as plt
from typing import List
import pandas as pd
import skimage
from skimage.feature import greycomatrix, greycoprops
from itertools import product
# hide
from pathlib import Path
import os
import random
import re
from fastcore.test import *
# export
def open_geotiff(fn, bands:List[int]=None) -> np.ndarray:
"""Open geotiff image from path, cast it to float and scale it to 0-1 range, optionally with only `bands` input bands."
Returns numpy array of shape (C,W,H)
"""
with rio.open(str(fn)) as f:
data = f.read()
data = data.astype(np.float32)
data /= 255.
if bands is not None: data = data[bands]
return data
data_path = Path('data/')
example = '914153.tif'
tiffile = open_geotiff(data_path/example, bands=[0])
test_eq(type(tiffile), np.ndarray)
test_eq(tiffile.shape[0], 1)
assert tiffile.min() >= 0
assert tiffile.max() <= 1
tiffile = open_geotiff(data_path/example, bands=[0,1])
test_eq(type(tiffile), np.ndarray)
test_eq(tiffile.shape[0], 2)
assert tiffile.min() >= 0
assert tiffile.max() <= 1
tiffile = open_geotiff(data_path/example)
test_eq(type(tiffile), np.ndarray)
test_eq(tiffile.shape[0], 3)
assert tiffile.min() >= 0
assert tiffile.max() <= 1
fig, axs = plt.subplots(1,3, figsize=(14,3), dpi=200, sharey=True, sharex=True)
for i in range(3):
axs[i].hist(tiffile[i].ravel(), bins=20)
axs[i].grid()
###Output
_____no_output_____
###Markdown
Spectral indices
###Code
# export
def calc_normalized_spectral_index(im:np.ndarray, band_1:int, band_2:int) -> np.ndarray:
"Calculate normalized spectral index (band_1 - band_2)/(band_1 + band_2). Can be used with NDVI and such simple indices"
return (im[band_1] - im[band_2]) / (im[band_1] + im[band_2])
def calc_avi(im:np.ndarray, nir:int, red:int) -> np.ndarray:
"Calculate AVI (nir *(1-red) * (nir-red))"
return im[nir] * (1 - im[red]) * (nir - red)
def calc_savi(im:np.ndarray, nir:int, red:int, l:float=0.5) -> np.ndarray:
"Calculate Soil Adjusted Vegetation Index ((nir-red)/(nir+red+l)) * (1+l). Default uses Landsat coefficient L"
return ((im[nir] - im[red]) / (im[nir] + im[red] + l)) * (1 + l)
def calc_gci(im:np.ndarray, nir:int, green:int) -> np.ndarray:
"Calculate Green Clorophyll Index nir/green - 1"
return im[nir] / im[green] - 1
###Output
_____no_output_____
###Markdown
It's possible to add indices with `np.vstack`
###Code
tiffile_ndvi = np.vstack((tiffile, calc_normalized_spectral_index(tiffile, 1, 0)[None]))
tiffile_ndvi.shape
###Output
_____no_output_____
###Markdown
Show what the different bands and indices look like. Example image is NIR-Red-Green false color image, so for instance NDVI is calculated with indices 0 and 1.
###Code
fig, axs = plt.subplots(figsize=(8,8), dpi=300)
axs.set_xticks([])
axs.set_yticks([])
plt.imshow(tiffile.swapaxes(0,2).swapaxes(0,1))
fig, axs = plt.subplots(3,3, figsize=(12,12), gridspec_kw={})
for a in axs.flatten():
a.set_xticks([])
a.set_yticks([])
#NIR
im = axs[0,0].imshow(tiffile[0], vmin=0, vmax=1, cmap='RdYlGn_r')
axs[0,0].set_title('NIR')
#RED
axs[0,1].imshow(tiffile[1], vmin=0, vmax=1, cmap='RdYlGn_r')
axs[0,1].set_title('RED')
#GREEN
axs[0,2].imshow(tiffile[2], vmin=0, vmax=1, cmap='RdYlGn_r')
axs[0,2].set_title('GREEN')
#False color
axs[1,0].imshow(tiffile.swapaxes(0,2).swapaxes(0,1))
axs[1,0].set_title('False color')
fig.colorbar(im, ax=axs[0,:].ravel().tolist())
#NDVI
idx = axs[1,1].imshow(calc_normalized_spectral_index(tiffile, 0, 1), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[1,1].set_title('NDVI')
#GNDVI
axs[1,2].imshow(calc_normalized_spectral_index(tiffile, 0, 2), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[1,2].set_title('GNDVI')
#AVI
axs[2,0].imshow(calc_avi(tiffile, 0, 1), #vmin=-1, vmax=1,
cmap='RdYlGn_r')
axs[2,0].set_title('AVI')
#SAVI
axs[2,1].imshow(calc_savi(tiffile, 0, 1), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[2,1].set_title('SAVI')
#GCI
axs[2,2].imshow(calc_gci(tiffile, 0, 2), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[2,2].set_title('GCI')
fig.colorbar(idx, ax=axs[1:,:].ravel().tolist())
plt.show()
###Output
_____no_output_____
###Markdown
Circular maskingFunction to mask all pixels from images. Example resolution is ~30cm, so 31 pixel radius more or less corresponds to 9m radius
###Code
# export
def mask_plot_from_image(data:np.ndarray, radius:float=31) -> np.ndarray:
"Select only data from within field plot of radius (radius-1) pixels"
center = (int(data.shape[1]/2), int(data.shape[2]/2))
Y, X = np.ogrid[:data.shape[1], :data.shape[2]]
dist_from_center = np.sqrt((X-center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
data[:,~mask] = np.nan
return data
tiffile = open_geotiff(data_path/example, bands=[0])
tiffile = mask_plot_from_image(tiffile)
test_eq(type(tiffile), np.ndarray)
test_eq(tiffile.shape[0], 1)
assert np.nanmin(tiffile) >= 0
assert np.nanmax(tiffile) <= 1
tiffile = open_geotiff(data_path/example, bands=[0,1])
tiffile = mask_plot_from_image(tiffile)
test_eq(type(tiffile), np.ndarray)
test_eq(tiffile.shape[0], 2)
assert np.nanmin(tiffile) >= 0
assert np.nanmax(tiffile) <= 1
tiffile = open_geotiff(data_path/example)
tiffile = mask_plot_from_image(tiffile)
test_eq(type(tiffile), np.ndarray)
test_eq(tiffile.shape[0], 3)
assert np.nanmin(tiffile) >= 0
assert np.nanmax(tiffile) <= 1
###Output
_____no_output_____
###Markdown
Test that circular mask works
###Code
fig, axs = plt.subplots(3,3, figsize=(12,12), gridspec_kw={})
for a in axs.flatten():
a.set_xticks([])
a.set_yticks([])
#NIR
im = axs[0,0].imshow(tiffile[0], vmin=0, vmax=1, cmap='RdYlGn_r')
axs[0,0].set_title('NIR')
#RED
axs[0,1].imshow(tiffile[1], vmin=0, vmax=1, cmap='RdYlGn_r')
axs[0,1].set_title('RED')
#GREEN
axs[0,2].imshow(tiffile[2], vmin=0, vmax=1, cmap='RdYlGn_r')
axs[0,2].set_title('GREEN')
#False color
axs[1,0].imshow(tiffile.swapaxes(0,2).swapaxes(0,1))
axs[1,0].set_title('False color')
fig.colorbar(im, ax=axs[0,:].ravel().tolist())
#NDVI
idx = axs[1,1].imshow(calc_normalized_spectral_index(tiffile, 0, 1), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[1,1].set_title('NDVI')
#GNDVI
axs[1,2].imshow(calc_normalized_spectral_index(tiffile, 0, 2), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[1,2].set_title('GNDVI')
#AVI
axs[2,0].imshow(calc_avi(tiffile, 0, 1), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[2,0].set_title('AVI')
#SAVI
axs[2,1].imshow(calc_savi(tiffile, 0, 1), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[2,1].set_title('SAVI')
#GCI
axs[2,2].imshow(calc_gci(tiffile, 0, 2), vmin=-1, vmax=1, cmap='RdYlGn_r')
axs[2,2].set_title('GCI')
fig.colorbar(idx, ax=axs[1:,:].ravel().tolist())
plt.show()
###Output
_____no_output_____
###Markdown
Process image data into tabular models
###Code
#export
def image_metrics(fn, mask_plot:bool=True, radius:int=31) -> dict:
"Calculate metrics from NIR-red-green -images"
image = open_geotiff(fn)
if mask_plot == True: image = mask_plot_from_image(image, radius=radius)
# Max, mean, std and coefficient of variation
features = {}
features['nir_pix_max'] = np.nanmax(image[0])
features['nir_pix_min'] = np.nanmin(image[0])
features['nir_pix_mean'] = np.nanmean(image[0])
features['nir_pix_std'] = np.nanstd(image[0])
features['nir_pix_var'] = np.nanvar(image[0])
features['red_pix_max'] = np.nanmax(image[1])
features['red_pix_min'] = np.nanmin(image[1])
features['red_pix_mean'] = np.nanmean(image[1])
features['red_pix_std'] = np.nanstd(image[1])
features['red_pix_var'] = np.nanvar(image[1])
features['green_pix_max'] = np.nanmax(image[2])
features['green_pix_min'] = np.nanmin(image[2])
features['green_pix_mean'] = np.nanmean(image[2])
features['green_pix_std'] = np.nanstd(image[2])
features['green_pix_var'] = np.nanvar(image[2])
# spectral indices
# NDVI
ndvi = calc_normalized_spectral_index(image, 0, 1)
features['ndvi_pix_max'] = np.nanmax(ndvi)
features['ndvi_pix_min'] = np.nanmin(ndvi)
features['ndvi_pix_mean'] = np.nanmean(ndvi)
features['ndvi_pix_std'] = np.nanstd(ndvi)
features['ndvi_pix_var'] = np.nanvar(ndvi)
return features
# TODO more tests
example_metrics = image_metrics(data_path/example)
test_eq(type(example_metrics), dict)
example_metrics
#export
def glcm_xplusy(glcm, k, distance, angle):
"sum each element where the indices of the glcm sum to k"
s = 0
for c in range(0, glcm.shape[0]):
targ = k - c
if targ >= 0 and targ < glcm.shape[0]: s += glcm[targ, c, distance, angle]
if targ > k: return s
return s
def glcm_xminusy(glcm, k, distance, angle):
"sum each element where the difference of the indices is k"
s = 0
for c in range(0, glcm.shape[0]):
targ = k + c
if targ < glcm.shape[0]: s += glcm[targ, c, distance, angle]
if k == 0: return s
return s*2
def textural_features(fn,
band_names:List=['nir', 'red', 'green', 'ndvi'],
distances:List[int]=[8],
angles:List[float]=[0, np.pi/4, np.pi/2, 3*np.pi/4],
n_grey:int=20) -> dict:
"""Get textural features from images. Works close to R package radiomics `GLCMFeatures` functions.
However skimage makes glcm a bit differently"""
tex_features = {}
im = open_geotiff(fn)
# add NDVI
im = np.vstack((im, calc_normalized_spectral_index(im, 1, 0)[None]))
for b in range(im.shape[0]):
pref = band_names[b]
# bin image to at maximum n_grey levels
bins = np.linspace(im[b].min(),im[b].max(), min(n_grey, len(np.unique(im[b]))) + 1)
binned_im = np.digitize(im[b], bins) - 1
n_levels = binned_im.max() + 1
# get glcm. Note that skimage makes glcm differently than radiomics
glcm = greycomatrix(binned_im,
distances=distances,
angles=angles,
levels=n_levels,
normed=True, symmetric=True)
# greycoprops gives some features easily
# Others not so much
means = np.zeros((len(distances), len(angles)))
variances = np.zeros((len(distances), len(angles)))
autocorrelations = np.zeros((len(distances), len(angles)))
cluster_prominences = np.zeros((len(distances), len(angles)))
cluster_shades = np.zeros((len(distances), len(angles)))
cluster_tendencies = np.zeros((len(distances), len(angles)))
diff_entropies = np.zeros((len(distances), len(angles)))
energies = np.zeros((len(distances), len(angles)))
entropies = np.zeros((len(distances), len(angles)))
hgn1s = np.zeros((len(distances), len(angles)))
idmns = np.zeros((len(distances), len(angles)))
idns = np.zeros((len(distances), len(angles)))
inverse_variances = np.zeros((len(distances), len(angles)))
sum_averages = np.zeros((len(distances), len(angles)))
sum_entropies = np.zeros((len(distances), len(angles)))
sum_variances = np.zeros((len(distances), len(angles)))
# Todo chech that multiple distances work
for d,a in product(range(len(distances)), range(len(angles))):
# means
means[d,a] = np.sum(np.sum(glcm[:,:,d,a], axis=1) * np.arange(1,n_levels+1))
scale_matrix = np.empty((n_levels, n_levels))
for i, j in product(range(n_levels), range(n_levels)):
# variance
variances[d,a] += (((i) - means[d,a])**2) * glcm[i,j,d,a]
# cluster metrix
cluster_prominences[d,a] += ((i + j - 2*means[d,a])**4) * glcm[i,j,d,a]
cluster_shades[d,a] += ((i+j - 2*means[d,a])**3)*glcm[i,j,0,a]
cluster_tendencies[d,a] += ((i + j - 2 * means[d,a])**2) * glcm[i,j,d,a]
# scale matrix for autocorrelations
scale_matrix[i,j] = (i+1) * (j+1)
# homogeneity 1
hgn1s[d,a] += glcm[i,j,d,a] / (1 + (np.abs(i-j)))
# IDM normalized
idmns[d,a] += glcm[i,j,d,a] / (1 + ((np.abs(i-j)**2)/(n_levels)**2))
# ID normalized
idns[d,a] += glcm[i,j,d,a] / (1 + (np.abs(i-j)/n_levels))
# Inverse variance
if i != j: inverse_variances[d,a] += glcm[i,j,d,a] / np.abs(i-j)**2
# autocorrelations
autocorrelations[d,a] = np.sum(glcm[:,:,0,a]*scale_matrix)
# diff_entropy
for i in range(n_levels-1):
pxy = glcm_xminusy(glcm, k=i, distance=d, angle=a)
if pxy > 0: diff_entropies[d,a] += pxy * np.log2(pxy)
diff_entropies[d,a] *= -1
# energy
energies[d,a] = np.sum(np.square(glcm[...,d,a]))
# entropy
entropies[d,a] = skimage.measure.shannon_entropy(glcm[...,d,a])
for i in range(2*(n_levels)-1):
# sum averages
pxy = glcm_xplusy(glcm, k=i, distance=d, angle=a)
sum_averages[d,a] += (i+2) * pxy
# sum entropies
if pxy > 0: sum_entropies[d,a] += pxy * np.log2(pxy)
sum_entropies[d,a] *= -1
for i in range(2*(n_levels) - 1):
# sum variances
pxy = glcm_xplusy(glcm, k=i-1, distance=d, angle=a)
sum_variances[d,a] += ((i+2 - sum_entropies[d,a])**2) * pxy
# Average all the angles
tex_features[f'{pref}_mean'] = np.mean(means)
tex_features[f'{pref}_var'] = np.mean(variances)
tex_features[f'{pref}_ac'] = np.mean(autocorrelations)
tex_features[f'{pref}_cProminence'] = np.mean(cluster_prominences)
tex_features[f'{pref}_cShade'] = np.mean(cluster_shades)
tex_features[f'{pref}_cTendency'] = np.mean(cluster_tendencies)
tex_features[f'{pref}_contrast'] = np.mean(greycoprops(glcm, 'contrast'))
tex_features[f'{pref}_corr'] = np.mean(greycoprops(glcm, 'correlation'))
tex_features[f'{pref}_diffentropy'] = np.mean(diff_entropies)
tex_features[f'{pref}_dissimilarity'] = np.mean(greycoprops(glcm, 'dissimilarity'))
tex_features[f'{pref}_energy'] = np.mean(energies)
tex_features[f'{pref}_ent'] = np.mean(entropies)
tex_features[f'{pref}_homogeneity1'] = np.mean(hgn1s)
tex_features[f'{pref}_homogeneity2'] = np.mean(greycoprops(glcm, 'homogeneity'))
tex_features[f'{pref}_idmn'] = np.mean(idmns)
tex_features[f'{pref}_idn'] = np.mean(idns)
tex_features[f'{pref}_iv'] = np.mean(inverse_variances)
tex_features[f'{pref}_maxProb'] = np.mean(glcm.max(axis=(0,1)))
tex_features[f'{pref}_sumaverage'] = np.mean(sum_averages)
tex_features[f'{pref}_sumentropy'] = np.mean(sum_entropies)
tex_features[f'{pref}_sumvariance'] = np.mean(sum_variances)
# Information measures of correlation TODO
#tex_features[f'{pref}_icm1'] = None
#tex_features[f'{pref}_icm2'] = None
return tex_features
textural_features(data_path/example, n_grey=20)
# export
def process_image_features(fn:str, mask_plot:bool=True, radius:int=31):
"Process rasters to tabular format. Todo Textural features parasm"
image_features = image_metrics(fn, mask_plot=mask_plot, radius=radius)
texture_features = textural_features(fn)
features = {**image_features, **texture_features}
return features
len(process_image_features(data_path/example))
# hide
from nbdev.export import notebook2script
notebook2script()
!nbdev_build_docs
###Output
Converted 00_data.image.ipynb.
Converted 01_data.las.ipynb.
Converted 02_tabular.preprocessing.ipynb.
Converted 03_model.inception3dv3.ipynb.
Converted 04_interpretation.ipynb.
Converted 05_metrics.ipynb.
Converted 06_model.ensemble.ipynb.
Converted 07_model.alexnet.ipynb.
Converted index.ipynb.
converting: /mnt/d/Users/E1005164/enveco/nbs/01_data.las.ipynb
converting: /mnt/d/Users/E1005164/enveco/nbs/00_data.image.ipynb
converting /mnt/d/Users/E1005164/enveco/nbs/index.ipynb to README.md
|
Examples/03Jiboa.ipynb | ###Markdown
Jiboa Case Study This code is prepared to Run the distributed model for jiboa rover in El Salvadorwher the catchment is consisted os a ustream lake and a volcanic area- you have to make the root directory to the examples folder to enable the code from reading input files Download DataFor the data of this case study you have to download this folder [Jiboa Data](https://drive.google.com/drive/folders/1yy6xWwx8Ucc-O72FgVxsuvJBdk3u0sVa?usp=sharing) from Google Drive and set it as the working directory instead of the Path defined in the next cell Import modules
###Code
import os
Path = "F:/02Case studies/El Salvador"
os.chdir(Path)
#%library
import gdal
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# HAPI modules
from Hapi.run import RunHAPIwithLake
import Hapi.hbv as HBV
import Hapi.performancecriteria as Pf
import Hapi.raster as Raster
# the beginning of the simulation of the calibration data
start = dt.datetime(2012,6,14,19,00,00)
end = dt.datetime(2014,11,17,00,00,00)
calib_end = dt.datetime(2013,12,23,00,00,00)
# paths to the meteorological data
PrecPath = prec_path = "inputs/Hapi/meteodata/4000/calib/prec_clipped"
Evap_Path = evap_path = "inputs/Hapi/meteodata/4000/calib/evap_clipped"
TempPath = temp_path = "inputs/Hapi/meteodata/4000/calib/temp_clipped"
#DemPath = path+"GIS/4000/dem4000.tif"
FlowAccPath = "inputs/Hapi/GIS/4000_matched/acc4000.tif"
FlowDPath = "inputs/Hapi/GIS/4000_matched/fd4000.tif"
ParPath = "inputs/Hapi/meteodata/4000/parameters/"
#ParPath = "inputs/Hapi/meteodata/4000/"+"parameters.txt"
Paths=[PrecPath, Evap_Path, TempPath, FlowAccPath, FlowDPath, ]
#p2=[24, 1530]
#init_st=[0,5,5,5,0]
init_st = np.loadtxt("inputs/Hapi/meteodata/Initia-jiboa.txt", usecols=0).tolist()
snow = 0
# lake meteorological data
ind = pd.date_range(start, end, freq = "H" )
lakedata = pd.read_csv("inputs/Hapi/meteodata/lakedata.csv", index_col = 0)
lakedata.index = ind
lakeCalib = lakedata.loc[start:calib_end]
lakeValid = lakedata.loc[calib_end:end]
# convert the dataframe into array
lakeCalibArray = lakeCalib.values
# take only the plake, et, t and tm columns and exclude the last column
lakeCalibArray = lakeCalibArray[:,0:-1]
# where the lake discharges its flow (give the indices of the cell)
lakecell = [2,1] # 4km
#lakecell = [4,2] # 2km
#lakecell = [10,4] # 1km
#lakecell = [19,10] # 500m
LakeParameters = np.loadtxt("inputs/Hapi/meteodata/4000/Lakeparameters.txt").tolist()
StageDischargeCurve = np.loadtxt("inputs/Hapi/meteodata/curve.txt")
p2 = [1, 227.31, 133.98, 70.64]
Lake_init_st = np.loadtxt("inputs/Hapi/meteodata/Initia-lake.txt", usecols=0).tolist()
###Output
_____no_output_____
###Markdown
Run the model
###Code
Sim =pd.DataFrame(index = lakeCalib.index)
st, Sim['Q'], q_uz_routed, q_lz_trans = RunHAPIwithLake(HBV, Paths, ParPath, p2, init_st,
snow, lakeCalibArray, StageDischargeCurve,
LakeParameters, lakecell,Lake_init_st)
###Output
meteorological data are read successfully
GIS data are read successfully
Parameters are read successfully
###Markdown
Evaluate model performance
###Code
WS = {}
WS['type'] = 1
WS['N'] = 3
ModelMetrics=dict()
ModelMetrics['CalibErrorHf']=Pf.RMSEHF(lakeCalib['Q'],Sim['Q'],WS['type'],WS['N'],0.75)
ModelMetrics['CalibErrorLf']=Pf.RMSELF(lakeCalib['Q'],Sim['Q'],WS['type'],WS['N'],0.75)
ModelMetrics['CalibNSEHf']=Pf.NSE(lakeCalib['Q'],Sim['Q'])
ModelMetrics['CalibNSELf']=Pf.NSE(np.log(lakeCalib['Q']),np.log(Sim['Q']))
ModelMetrics['CalibRMSE']=Pf.RMSE(lakeCalib['Q'],Sim['Q'])
ModelMetrics['CalibKGE']=Pf.KGE(lakeCalib['Q'],Sim['Q'])
ModelMetrics['CalibWB']=Pf.WB(lakeCalib['Q'],Sim['Q'])
print("RMSE(HF) = " + str(round(ModelMetrics['CalibErrorHf'],2)))
print("RMSE(LF) = " + str(round(ModelMetrics['CalibErrorLf'],2)))
print("RMSE = " + str(round(ModelMetrics['CalibRMSE'],2)))
print("NSE(HF) = " + str(round(ModelMetrics['CalibNSEHf'],2)))
print("NSE(LF) = " + str(round(ModelMetrics['CalibNSELf'],2)))
print("KGE = " + str(round(ModelMetrics['CalibKGE'],2)))
print("WB = " + str(round(ModelMetrics['CalibWB'],2)))
plt.figure(50,figsize=(15,8))
Sim.Q.plot(color=[(0,0.3,0.7)],linewidth=2.5,label="Simulated data", zorder = 10)
ax1=lakeCalib['Q'].plot(color='#DC143C',linewidth=2.8,label='Observed data')
ax1.annotate("Model performance" ,xy=('2012-12-01 00:00:00',20),fontsize=15)
ax1.annotate("RMSE = " + str(round(ModelMetrics['CalibRMSE'],3)),xy=('2012-12-01 00:00:00',20-1.5),fontsize=15)
ax1.annotate("NSE = " + str(round(ModelMetrics['CalibNSEHf'],2)),xy=('2012-12-01 00:00:00',20-3),fontsize=15)
plt.legend()
###Output
_____no_output_____
###Markdown
Store the result into rasters
###Code
# create list of names
src=gdal.Open(FlowAccPath)
index=pd.date_range(start,calib_end,freq="1H")
resultspath="results/upper_zone_discharge/4000/"
names=[resultspath+str(i)[:-6] for i in index]
names=[i.replace("-","_") for i in names]
names=[i.replace(" ","_") for i in names]
names=[i+".tif" for i in names]
Raster.RastersLike(src,q_uz_routed[:,:,:-1],names)
###Output
_____no_output_____ |
jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_6_Codes/Code/Reinforcement_Learning.ipynb | ###Markdown
Reinforcement LearningReinforcement learning is a goal oriented learning method based on interaction with its environment. The objective is of getting an agent to act in an environment in order to maximize its rewards. Here the agent is an intelligent program, environment is the external condition. Reinforcement learning is like teaching your dog a trick. For example, consider teaching a dog a new trick, you cannot tell it what to do, but you can reward or punish it if it does the right or wrong thing respectively. It has to figure out what it did that made it get the reward or punishment, which is known as the credit assignment problem. We can use a similar method to train computers to do many tasks, such as playing chess or any other games, scheduling jobs, and controlling robot limbs.
###Code
from IPython.display import Image
Image(filename='../Chapter 6 Figures/Reinforcement_Learning.png', width=600)
###Output
_____no_output_____
###Markdown
According to the paper publish by Deepmind Technologies in 2013, Q-learning rule for updating status is given by: Q[s,a]new = Q[s,a]prev + α * (r + ƴ*max(s,a) – Q[s,a]prev), where* α is the learning rate, * r is reward for latest action, * ƴ is the discounted factor, and* max(s,a) is the estimate of new value from best action. If the optimal value Q[s,a] of the sequence s’ at the next time step was known for all possible actions a’, then the optimal strategy is to select the action a’ maximizing the expected value of r + ƴ*max(s,a) – Q[s,a]prev. ExampleLet’s consider an example where an agent is trying to come out of a maze. It can move one random square or area in any direction, and get a reward if exits. The most common way to formalize a reinforcement problem is to represent it as Markov decision process. Assume the agent is in state b (maze area) and the target is to reach state f. So within one step agent can reach from b to f, let’s put a reward of 100 (otherwise 0) for links between nodes that allows agents to reach target state.
###Code
from IPython.display import Image
Image(filename='../Chapter 6 Figures/Maze_Markov.png', width=800)
import numpy as np
from random import randint
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
# defines the reward/link connection graph
R = np.matrix([[-1, -1, -1, -1, 0, -1],
[-1, -1, -1, 0, -1, 100],
[-1, -1, -1, 0, -1, -1],
[-1, 0, 0, -1, 0, -1],
[ 0, -1, -1, 0, -1, 100],
[-1, 0, -1, -1, 0, 100]])
Q = np.zeros_like(R)
###Output
_____no_output_____
###Markdown
The -1's in the table means there isn't a link between nodes. For example, State 'a' cannot go to State 'b'.
###Code
# learning parameter
gamma = 0.8
# Initialize random_state
initial_state = randint(0,4)
# This function returns all available actions in the state given as an argument
def available_actions(state):
current_state_row = R[state,]
av_act = np.where(current_state_row >= 0)[1]
return av_act
# This function chooses at random which action to be performed within the range
# of all the available actions.
def sample_next_action(available_actions_range):
next_action = int(np.random.choice(available_act,1))
return next_action
# This function updates the Q matrix according to the path selected and the Q
# learning algorithm
def update(current_state, action, gamma):
max_index = np.where(Q[action,] == np.max(Q[action,]))[1]
if max_index.shape[0] > 1:
max_index = int(np.random.choice(max_index, size = 1))
else:
max_index = int(max_index)
max_value = Q[action, max_index]
# Q learning formula
Q[current_state, action] = R[current_state, action] + gamma * max_value
# Get available actions in the current state
available_act = available_actions(initial_state)
# Sample next action to be performed
action = sample_next_action(available_act)
###Output
_____no_output_____
###Markdown
Training
###Code
# Train over 100 iterations, re-iterate the process above).
for i in range(100):
current_state = np.random.randint(0, int(Q.shape[0]))
available_act = available_actions(current_state)
action = sample_next_action(available_act)
update(current_state,action,gamma)
# Normalize the "trained" Q matrix
print "Trained Q matrix: \n", Q/np.max(Q)*100
###Output
Trained Q matrix:
[[ 0 0 0 0 0 0]
[ 0 0 0 0 0 100]
[ 0 0 0 0 0 0]
[ 0 0 0 0 0 0]
[ 0 0 0 0 0 0]
[ 0 0 0 0 0 100]]
###Markdown
Testing
###Code
current_state = 2
steps = [current_state]
while current_state != 5:
next_step_index = np.where(Q[current_state,] == np.max(Q[current_state,]))[1]
if next_step_index.shape[0] > 1:
next_step_index = int(np.random.choice(next_step_index, size = 1))
else:
next_step_index = int(next_step_index)
steps.append(next_step_index)
current_state = next_step_index
# Print selected sequence of steps
print "Best sequence path: ", steps
###Output
Best sequence path: [2, 3, 1, 5]
|
Exploratory Data Analysis with Chocolate.ipynb | ###Markdown
Exploratory Data Analysis with Chocolate Analysing the dataset of Chocolate Bar Ratings to unearth key insights hidden within the data ContextChocolate is one of the most popular candies in the world. Each year, residents of the United States collectively eat more than 2.8 billions pounds. However, not all chocolate bars are created equal! This dataset contains expert ratings of over 1,700 individual chocolate bars, along with information on their regional origin, percentage of cocoa, the variety of chocolate bean used and where the beans were grown.Rating System 5= Elite (Transcending beyond the ordinary limits) 4= Premium (Superior flavor development, character and style) 3= Satisfactory(3.0) to praiseworthy(3.75) (well made with special qualities) 2= Disappointing (Passable but contains at least one significant flaw) 1= Unpleasant (mostly unpalatable)AcknowledgementsThe dataset used here have been acquired from Rachael Tatman's Chocolate Bar Ratings dataset on Kaggle.The original ratings were compiled by Brady Brelinski, Founding Member of the Manhattan Chocolate Society. For up-to-date information, as well as additional content (including interviews with craft chocolate makers), please see his website: Flavors of Cacao Loading Data
###Code
# Import necessary libraries
import pandas as pd #data-wrangling library
import matplotlib.pyplot as plt #data-visualization library
import seaborn as sns #data-visualization library
# load the dataset from local storage
df=pd.read_csv("Dataset/flavors_of_cacao.csv")
# Understanding the basic ground information of my data
def all_about_my_data(df):
print("Here is some Basic Ground Info about your Data:\n")
# Shape of the dataframe
print("Number of Instances:",df.shape[0])
print("Number of Features:",df.shape[1])
# Summary Stats
print("\nSummary Stats:")
print(df.describe())
# Missing Value Inspection
print("\nMissing Values:")
print(df.isna().sum())
all_about_my_data(df)
###Output
Here is some Basic Ground Info about your Data:
Number of Instances: 1795
Number of Features: 9
Summary Stats:
REF Review\nDate Rating
count 1795.000000 1795.000000 1795.000000
mean 1035.904735 2012.325348 3.185933
std 552.886365 2.927210 0.478062
min 5.000000 2006.000000 1.000000
25% 576.000000 2010.000000 2.875000
50% 1069.000000 2013.000000 3.250000
75% 1502.000000 2015.000000 3.500000
max 1952.000000 2017.000000 5.000000
Missing Values:
Company \n(Maker-if known) 0
Specific Bean Origin\nor Bar Name 0
REF 0
Review\nDate 0
Cocoa\nPercent 0
Company\nLocation 0
Rating 0
Bean\nType 1
Broad Bean\nOrigin 1
dtype: int64
###Markdown
The Tantrums of the Feature NamesImagine an unsuspecting analyst runs the **df.head()** command for this dataset and then tries to view the first 5 entries of the **Review Date** feature based on the **head()** command's output. What does he get?
###Code
# Using a try-except error handling function
try :
df["Review Date"].head()
except :
print("An error occurred!")
###Output
An error occurred!
###Markdown
**This is what he gets!!**This error in reality is a "Feature not found error" i.e the command fails to identify **Review Date** as a feature (The large error statement has been replaced with the simpler output statement for ease of understanding)What went wrong?
###Code
# Inspect feature names
df.dtypes
###Output
_____no_output_____
###Markdown
The above cell's output makes a revelation about our data and it is not a very pleasant one!The feature names are a bit messy as the names have the "\n" or "newline" character amidst them (as describe by our **df.dtypes** command)and this will lead to unidentifiable errors and if identified, they will need excruciating methods of rectification(Nobody prefers going to each feature name and renaming it explicitly!).
###Code
# Cleaning our feature names
cols = list(df.columns)
# Function to replace newline characters and spaces in the feature names
def rec_features(feature_names):
rec_feat = []
for f in feature_names:
rec_feat.append(((f.casefold()).replace("\n","_")).replace(" ","_"))
return rec_feat
print("Feature Names before Cleaning:")
print(cols)
print("\nFeature Names after Cleaning:")
print(rec_features(cols))
###Output
Feature Names before Cleaning:
['Company\xa0\n(Maker-if known)', 'Specific Bean Origin\nor Bar Name', 'REF', 'Review\nDate', 'Cocoa\nPercent', 'Company\nLocation', 'Rating', 'Bean\nType', 'Broad Bean\nOrigin']
Feature Names after Cleaning:
['company\xa0_(maker-if_known)', 'specific_bean_origin_or_bar_name', 'ref', 'review_date', 'cocoa_percent', 'company_location', 'rating', 'bean_type', 'broad_bean_origin']
###Markdown
Now, our features look much safer than they were before. However, the **"company\x..." feature still looks very convoluted**. Let's edit that with some manual removal.Finally, we shall re-assign the new feature names to our dataframe.
###Code
# Manual Removal
new_feature_names = rec_features(cols)
new_feature_names[0] = "company"
# Re-assigning feature names
df=df.rename(columns=dict(zip(df.columns,new_feature_names)))
df.dtypes
###Output
_____no_output_____
###Markdown
**The features names look a lot more friendly now**! Are we Missing Something? Identifying missing values within our dataset and solving the problem
###Code
# Checking out if we have missing values
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1795 entries, 0 to 1794
Data columns (total 9 columns):
company 1795 non-null object
specific_bean_origin_or_bar_name 1795 non-null object
ref 1795 non-null int64
review_date 1795 non-null int64
cocoa_percent 1795 non-null object
company_location 1795 non-null object
rating 1795 non-null float64
bean_type 1794 non-null object
broad_bean_origin 1794 non-null object
dtypes: float64(1), int64(2), object(6)
memory usage: 126.3+ KB
###Markdown
As per the above output, there are just two missing values in our dataset.
###Code
df[['bean_type', 'broad_bean_origin']].head()
###Output
_____no_output_____
###Markdown
*BUT WAIT!*The **bean_type** feature clearly has more empty values according to the above cell's output even though the **df.info()** command only depicts 1 missing value! So, why this conundrum?Let's check it out with a bit of **"Intuitively Written Test Code"**.
###Code
# What are these missing values in "bean_type" encoded as?
print(df['bean_type'].value_counts().head())
print("Missing Spaces encoded as:")
list(df['bean_type'][0:10])
###Output
887
Trinitario 419
Criollo 153
Forastero 87
Forastero (Nacional) 52
Name: bean_type, dtype: int64
Missing Spaces encoded as:
###Markdown
Oops...so we have **887 instances** in which "bean_type" is encoded as **space** or **\xa0**.
###Code
# Replace the weird spaces with None (Symbolizes no data)
def repl_space(x):
if(x is "\xa0"):
return "None"
# apply()
df['bean_type'] = df['bean_type'].apply(repl_space)
df.head()
###Output
_____no_output_____
###Markdown
Thus, we have filled those weird ambiguous missing values with a much better alternative.**NOTE :** Imputing the missing values with **None** does not offer great advantage from the viewpoint of analysis. However, it helps us maintain a *much cleaner dataset* which I feel is as important as keeping the visualizations clean. Convert Cocoa_percent to numerical values The % notation in 'cocoa_percent' is going to be a perpetual pain later on as it masks a numerical feature to be of an object dtype. So, let's make that conversion next.
###Code
# Making that much needed conversion
df['cocoa_percent']=df['cocoa_percent'].str.replace('%','').astype(float)/100
df.head()
###Output
_____no_output_____
###Markdown
The Effect of Time - How did the Quality of Chocolate change over the years?
###Code
# Cocoa Percentage patterns over the years
d5 = df.groupby('review_date').aggregate({'cocoa_percent':'mean'})
d5 = d5.reset_index()
# Plotting
sns.set()
plt.figure(figsize=(15, 4))
ax = sns.lineplot(x='review_date', y='cocoa_percent', data=d5)
ax.set(xticks=d5.review_date.values)
plt.xlabel("\nDate of Review")
plt.ylabel("Average Cocoa Percentage")
plt.title("Cocoa Percentage patterns over the years \n")
plt.show()
###Output
_____no_output_____
###Markdown
Percentage of Cocoa over the years (Taking the average amounts per year)* The highest percentage of cocoa in a chocolate bar came in 2008 and was about 73%.* The lowest percentage of cocoa followed in the very next year, 2009 and hit 69%.* There was a steep rise in the amount of cocoa in chocolate from 2009 to 2013 where it rose to about 72.2% from 69%.* From 2014, a steady decline in cocoa percentage in chocolate bars have been noticed and in 2017, it stands at just above 71.5%.
###Code
# Rating patterns over the years
d6 = df.groupby('review_date').aggregate({'rating':'mean'})
d6 = d6.reset_index()
# Plotting
sns.set()
plt.figure(figsize=(15, 4))
ax = sns.lineplot(x='review_date', y='rating', data=d6)
ax.set(xticks=d6.review_date.values)
plt.xlabel("\nDate of Review")
plt.ylabel("Average Rating")
plt.title("Average Rating over the years \n")
plt.show()
###Output
_____no_output_____
###Markdown
Rating over the years (Taking the average amounts per year)* The lowest ever average rating was around 3 and it came in 2008.* Since then to 2011, there was a steady increase in average ratings and in 2011 it was at 3.26.* From 2011 to 2017, there have been several fluctuations in the ratings, and in 2017 the rating lies at its apex at around 3.31. The Year 2008 - Year of Coincidence or something more than that?* The highest average cocoa percent was in 2008* The lowest average ratings came in 2008The next year 2009 saw two major changes from the previous year :* There was a drastic reduce in cocoa content on an average* The average rating across the world had an increase from 3.00 to 3.08 in 2008Is this an indication of how chocolate producers tried reducing their cocoa content to make better chocolate? **OR**Was this just coincidence?**Let's leave that to your speculation!** The Chocolate Companies - The Best, The Patterns
###Code
# Top 5 companies in terms of chocolate bars in this dataset
d = df['company'].value_counts().sort_values(ascending=False).head(5)
d = pd.DataFrame(d)
d = d.reset_index() # dataframe with top 5 companies
# Plotting
sns.set()
plt.figure(figsize=(10,4))
sns.barplot(x='index', y='company', data=d)
plt.xlabel("\nChocolate Company")
plt.ylabel("Number of Bars")
plt.title("Top 5 Companies in terms of Chocolate Bars\n")
plt.show()
###Output
_____no_output_____
###Markdown
* Soma has the highest number of chocolate bars in this dataset with 47.
###Code
# Distribution of Chocolate Bars
sns.set()
plt.figure(figsize=(8,6))
sns.countplot(df['company'].value_counts().sort_values(ascending=False))
plt.xlabel("\nCount of chocolate bars")
plt.ylabel("Number of Companies")
plt.title("Distribution of Chocolate Bars")
plt.show()
###Output
_____no_output_____
###Markdown
* **120+ companies** have just one entry in this dataset.
###Code
# Top 5 companies in terms of average ratings
d2 = df.groupby('company').aggregate({'rating':'mean'})
d2 = d2.sort_values('rating', ascending=False).head(5)
d2 = d2.reset_index()
# Plotting
sns.set()
plt.figure(figsize=(20, 6))
sns.barplot(x='company', y='rating', data=d2)
plt.xlabel("\nChocolate Company")
plt.ylabel("Average Rating")
plt.title("Top 5 Companies in terms of Average Ratings \n")
plt.show()
###Output
_____no_output_____
###Markdown
* Tobago Estate (Pralus) has a rating of 4.0 (the highest), however it has only one chocolate bar entry in this dataset.* These top 5 companies have very high ratings, however they have very low chocolate bars in the dataset.* Amedei has 13. Rest all have under 5.
###Code
# Top 5 companies in terms of average Cocoa Percentage
d2 = df.groupby('company').aggregate({'cocoa_percent':'mean'})
d2 = d2.sort_values('cocoa_percent', ascending=False).head(5)
d2 = d2.reset_index()
# Plotting
sns.set()
plt.figure(figsize=(15, 4))
sns.barplot(x='company', y='cocoa_percent', data=d2)
plt.xlabel("\nChocolate Company")
plt.ylabel("Average Cocoa Percentage")
plt.title("Top 5 Companies in terms of Average Cocoa Percentage \n")
plt.show()
###Output
_____no_output_____
###Markdown
* All these companies produce chocolate with very high cocoa percentage (more than 80%)
###Code
# Average rating over the years (Top 5)
top5_dict = {}
for element in list(d['index']):
temp = df[df['company']==element]
top5_dict[element]=temp
top5_list = list(top5_dict.keys())
### Rating patterns over the years
d7 = df.groupby(['review_date', 'company']).aggregate({'rating':'mean'})
d7 = d7.reset_index()
d7 = d7[d7['company'].isin(top5_list)]
# Plotting
sns.set()
plt.figure(figsize=(15, 4))
ax = sns.lineplot(x='review_date', y='rating', hue="company", data=d7, palette="husl")
ax.set(xticks=d6.review_date.values)
plt.xlabel("\nDate of Review")
plt.ylabel("Average Rating")
plt.title("Average Rating over the years (Top 5 Producer Companies)\n")
plt.show()
###Output
_____no_output_____
###Markdown
Time and the Chocolate Companies* Pralus and Bonnat were the earliest companies among these top 5 to be reviewed in 2006, while A. Morin was the latest at 2012* Both Bonnat and Pralus started around with the same average rating in 2006 of around 3.40, but in the very next year of 2007, whle Pralus hit it's highest ever rating of 4.00, Bonnat slumped to it's lowest of 2.50. As of 2016, Bonnat stands 0.25 rating points clear of Pralus on the yearly average* The worst rating among these top 5 came in 2009 when Pralus scored only a 2.00 on average. This was a result of Pralus's steady decline from 4.00 in 2007 to 2.00 in 2009 (The company could really use this insight to understand what went wrong during that period of 2 years)* Coincidentally, the highest rating was just a year back, 2008 when Bonnat hit 4.00 (a feat Pralus had achieved in 2007)* From 2011 to 2015, Pralus has shown consistency in the average ratings* A. Morin was reviewed only for the years 2012, 2013, 2014, 2015 and 2016. As of 2016, it's got the highest average rating at 3.75* Fresco has not been reviewed after 2014, and its last review gave it around 3.30 on average rating* Soma, the largest producer of chocolate bars, showcases constant fluctuations* Soma was first reviewed in 2009 where it got around 3.42. In it's latest review in 2016, it has a 3.61* Soma's lowest rating came in 2009 (3.42) and this is still higher than the lowest ratings other companies have got over all years Following the Largest Chocolate Bar Producer (In terms of quantity) - Soma
###Code
# Preparing Soma for analysis
soma = df[df['company']=='Soma']
### Where does Soma get it's beans from ?
d3 = soma['broad_bean_origin'].value_counts().sort_values(ascending=False).head(5)
d3 = pd.DataFrame(d3)
d3 = d3.reset_index()
# Plotting
sns.set()
plt.figure(figsize=(10, 6))
sns.barplot(x='index', y='broad_bean_origin', data=d3)
plt.xlabel("\nBroad Bean Origin")
plt.ylabel("Number of Chocolate Bars")
plt.title("Where does Soma get it's beans from? \n")
plt.show()
###Output
_____no_output_____
###Markdown
* Venezuela is the largest provider of Soma's beans.
###Code
### How are ratings of Chocolate bars by Soma ?
sns.kdeplot(soma['rating'], legend=False, color="brown", shade=True)
plt.xlabel("\nRating of the Chocolate Bar")
plt.ylabel("Proportion of Chocolate Bars")
plt.title("Ratings of Chocolate produced by Soma\n")
plt.show()
###Output
C:\Users\Ramshankar Yadhunath\Anaconda3\lib\site-packages\scipy\stats\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
###Markdown
* Soma has a major proportion of its bars rated from satisfactory levels to really high. So, they do produce some **good** chocolate.
###Code
### Soma's performance over the years
d4 = soma.groupby('review_date').aggregate({'rating':'mean'})
d4 = d4.reset_index()
# Plotting
plt.figure(figsize=(10, 6))
sns.lineplot(x='review_date', y='rating', data=d4)
plt.xlabel("\nDate of Review")
plt.ylabel("Average Rating")
plt.title("Soma's Average Rating over the years\n")
plt.show()
###Output
_____no_output_____
###Markdown
Re-analyzing Soma Ratings through Time* The worst average rating Soma ever got came in the year 2009 at 3.42, when it was first reviewed* The highest average rating achieved came in 2010 at 3.75 (a significant rise from it's nadir the previous year)* Between 2012 and 2014, Soma's average rating saw a slump which revived after* 3.75 was achieved in 2015 again; it slumped to 3.61 in 2016
###Code
# Soma's performance over the years
d4 = soma.groupby('review_date').aggregate({'cocoa_percent':'mean'})
d4 = d4.reset_index()
# Plotting
plt.figure(figsize=(10, 6))
sns.lineplot(x='review_date', y='cocoa_percent', data=d4)
plt.xlabel("\nDate of Review")
plt.ylabel("Percentage of Cocoa")
plt.title("Soma's Percentage of Cocoa over the years\n")
plt.show()
###Output
_____no_output_____
###Markdown
Cocoa percent in Soma chocolates over Time* First review in 2009 showed 70% cocoa* The lowest percentage of cocoa in a Soma bar was in 2011 at 69%* In 2015, Soma had the highest ever cocoa percent in their chocolate bar at 72.5%* Latest review in 2016 discloses 69.6% cocoa in Soma's chocolate bars Categorizing Chocolate based on Ratings How many Chocolate bars are above or below 'Satisfactory levels' ?
###Code
# Chocolate Bar levels
unsatisfactory = df[df['rating'] < 3.0]
satisfactory = df[(df['rating'] >= 3.0) & (df.rating < 4)]
pre_elite = df[df['rating'] >= 4.0]
label_names=['Unsatisfactory','Above Satisfactory (Excludes Premium and Elite)','Premium and Elite']
sizes = [unsatisfactory.shape[0],satisfactory.shape[0],pre_elite.shape[0]]
# Now let's make the donut plot
explode = (0.05,0.05,0.05)
my_circle=plt.Circle((0,0),0.7,color='white')
plt.figure(figsize=(7,7))
plt.pie(sizes,labels=label_names,explode=explode,autopct='%1.1f%%',pctdistance=0.85,startangle=90,shadow=True)
fig=plt.gcf()
fig.gca().add_artist(my_circle)
plt.axis('equal')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
* This donut plot affirms that premium and elite chocolate is very rare, at only 5.6%.* 75% of the chocolate bars in the study belong to 'Above Satisfactory'('premium and elite' are also a part of this category).* And, 25% of the chocolate bars that have been rated have ratings under 3.0. Rating Distributions
###Code
# The counts of each rating
r=list(df['rating'].value_counts())
rating=df['rating'].value_counts().index.tolist()
rat=dict(zip(rating,r))
for key,val in rat.items():
print ('Rating:',key,'Reviews:',val)
plt.figure(figsize=(10,5))
sns.countplot(x='rating',data=df)
plt.xlabel('Rating of chocolate bar',size=12,color='blue')
plt.ylabel('Number of Chocolate bars',size=12,color='blue')
plt.show()
###Output
Rating: 3.5 Reviews: 392
Rating: 3.0 Reviews: 341
Rating: 3.25 Reviews: 303
Rating: 2.75 Reviews: 259
Rating: 3.75 Reviews: 210
Rating: 2.5 Reviews: 127
Rating: 4.0 Reviews: 98
Rating: 2.0 Reviews: 32
Rating: 2.25 Reviews: 14
Rating: 1.5 Reviews: 10
Rating: 1.0 Reviews: 4
Rating: 1.75 Reviews: 3
Rating: 5.0 Reviews: 2
###Markdown
* Most bars have been rated at 3.5.* Only 2 bars are rated at 5.0 (elite). Both belong to **Amedei**. Number of Chocolate bars per percentage of Cocoa
###Code
# Cocoa percent and choco bars
plt.figure(figsize=(10,5))
df['cocoa_percent'].value_counts().head(10).sort_index().plot.bar(color=['#d9d9d9','#b3b3b3','#808080','#000000','#404040','#d9d9d9','#b3b3b3','#404040','#b3b3b3'])
plt.xlabel('Percentage of Cocoa',size=12,color='black')
plt.ylabel('Number of Chocolate bars',size=12,color='black')
plt.show()
###Output
_____no_output_____
###Markdown
* The above plot has the top 10 cocoa percentages in terms of number of chocolate bars.* The vast majority of bars have 70% cocoa, followed by 75% and 72%. What is the relation between 'Cocoa Percent' and 'Rating'? Is there any correlation between Cocoa Percent and Rating of the bar? If it is, is that a positive correlation or a negative one? Can we predict rating of a bar given it's cocoa percentage?
###Code
# Cocoa Percent and Rating
sns.lmplot(x='cocoa_percent',y='rating',fit_reg=False,scatter_kws={"color":"darkred","alpha":0.3,"s":100},data=df)
plt.xlabel('Percentage of Cocoa',size=12,color='darkred')
plt.ylabel('Expert Rating of the Bar',size=12,color='darkred')
plt.show()
###Output
_____no_output_____
###Markdown
Cocoa Percent versus Rating - Reading the Scatterplot above* No evident correlation. A numerical correlation gives a weak negative correlation coefficient of -0.16* The density of the graph is highest between 65% and 80% of cocoa* Chocolate bars with low cocoa percentage(less than 50%) and high cocoa percentage(above 90%) are less in number, but the most important fact is that most of these chocolate bars have a rating of less than 3,i.e they have been deemed 'Unsatisfactory'* **Seems like people do not prefer very low or very high cocoa percentages in their chocolate!** From the scatter plot above, we can infer that it would not be a good idea to guess a chocolate's rating based on its Cocoa Percentage. Where are the Best Cocoa Beans grown?
###Code
#to get the indices
countries=df['broad_bean_origin'].value_counts().index.tolist()[:5]
# countries has the top 5 countries in terms of reviews
satisfactory={} # empty dictionary
for j in countries:
c=0
b=df[df['broad_bean_origin']==j]
br=b[b['rating']>=3] # rating more than 4
for i in br['rating']:
c+=1
satisfactory[j]=c
# Code to visualize the countries that give best cocoa beans
li=satisfactory.keys()
plt.figure(figsize=(10,5))
plt.bar(range(len(satisfactory)), satisfactory.values(), align='center',color=['#a22a2a','#511515','#e59a9a','#d04949','#a22a2a'])
plt.xticks(range(len(satisfactory)), list(li))
plt.xlabel('\nCountry')
plt.ylabel('Number of chocolate bars')
plt.title("Top 5 Broad origins of the Chocolate Beans with a Rating above 3.0\n")
plt.show()
print(satisfactory)
###Output
_____no_output_____
###Markdown
Venezuela has the largest number of chocolate bars rated above 3.0
###Code
#to get the indices
countries=df['broad_bean_origin'].value_counts().index.tolist()[:5]
# countries has the top 5 countries in terms of reviews
best_choc={} # empty dictionary
for j in countries:
c=0
b=df[df['broad_bean_origin']==j]
br=b[b['rating']>=4] # rating more than 4
for i in br['rating']:
c+=1
best_choc[j]=c
# Code to visualize the countries that give best cocoa beans
li=best_choc.keys()
plt.figure(figsize=(10,5))
plt.bar(range(len(best_choc)), best_choc.values(), align='center',color=['#a22a2a','#511515','#a22a2a','#d04949','#e59a9a'])
plt.xticks(range(len(best_choc)), list(li))
plt.xlabel('Country')
plt.ylabel('Number of chocolate bars')
plt.title("Top 5 Broad origins of the Chocolate Beans with a Rating above 4.0\n")
plt.show()
print(best_choc)
###Output
_____no_output_____
###Markdown
* So, here we see that the best cocoa beans are also grown in Venezuela.* There are 21 bars from Venezuela that have a rating of 4 and above. Analysis of the Producing Countries!!
###Code
# Countries
print ('Top Chocolate Producing Countries in the World\n')
country=list(df['company_location'].value_counts().head(10).index)
choco_bars=list(df['company_location'].value_counts().head(10))
prod_ctry=dict(zip(country,choco_bars))
print(df['company_location'].value_counts().head())
plt.figure(figsize=(10,5))
plt.hlines(y=country,xmin=0,xmax=choco_bars,color='skyblue')
plt.plot(choco_bars,country,"o")
plt.xlabel('Country')
plt.ylabel('Number of chocolate bars')
plt.title("Top Chocolate Producing Countries in the World")
plt.show()
###Output
Top Chocolate Producing Countries in the World
U.S.A. 764
France 156
Canada 125
U.K. 96
Italy 63
Name: company_location, dtype: int64
###Markdown
U.S.A produces way more chocolate companies than any other country has according to this dataWould it seem like a decent guess if we said that U.S.A consumes most chocolate as *'More the demand, more the production!'*.**Let's leave that to speculation!**
###Code
#reusing code written before
countries=country
best_choc={} # empty dictionary
for j in countries:
c=0
b=df[df['company_location']==j]
br=b[b['rating']>=4] # rating more than 4
for i in br['rating']:
c+=1
best_choc[j]=c
# Code to visualize the countries that produce the best choclates
li=best_choc.keys()
# The lollipop plot
plt.hlines(y=li,xmin=0,xmax=best_choc.values(),color='darkgreen')
plt.plot(best_choc.values(),li,"o")
plt.xlabel('Country')
plt.ylabel('Number of chocolate bars')
plt.title("Top Chocolate Producing Countries in the World (Ratings above 4.0)")
plt.show()
print(best_choc)
###Output
_____no_output_____ |
wine/wine_model.ipynb | ###Markdown
Loading the datasets into the notebook.
###Code
df_train = pd.read_csv('train_wine.csv')
df_test = pd.read_csv('test_wine.csv')
df_ss = pd.read_csv('sample_submission.csv')
df_test = pd.merge(df_test, df_ss, on='id')
df = df_train.copy()
df_train.head()
df_test.head()
df_train.dtypes
f, axes = plt.subplots(1, 2, figsize=(20, 7))
sns.countplot(x = 'class',data = df_train, ax=axes[0])
sns.countplot(x = 'class',data = df_ss, ax=axes[1])
# df_train = df_train[(df_train['class'] != 'poor') & (df_train['class'] != 'excellent')]
# f, axes = plt.subplots(1, 2, figsize=(20, 7))
# sns.countplot(x = 'class',data = df_train, ax=axes[0])
# sns.countplot(x = 'class',data = df_ss, ax=axes[1])
df['class'].unique()
###Output
_____no_output_____
###Markdown
From the above count plots we can see that we have a **imbalanced dataset**. In the later part of this notebook we will oversample our dataset to have the same number of samples for each class. Encoding feature classes which type is an object.In order to train the model it is obligatory to convert strings and other types of objects into the integers.
###Code
from sklearn import preprocessing
df = df_train.copy()
labelencoders = {}
for o in df_train.select_dtypes('object').columns:
labelencoders[o] = preprocessing.LabelEncoder()
labelencoders[o].fit(df[o].unique())
print('------------ '+o+' ------------')
print(df[o].unique())
print(labelencoders[o].transform(df[o].unique()))
df[o] = labelencoders[o].transform(df_train[o].values)
df_test[o] = labelencoders[o].transform(df_test[o].values)
df.head()
###Output
_____no_output_____
###Markdown
Correlation matrix - Selecting the right features for training.Displaying correlation matrix for each feature and checking which ones has the biggest influence on the wine class.
###Code
plt.figure(figsize=(10,10))
c= df.corr()
sns.heatmap(c, cmap="RdBu_r")
cor_matrix_sorted = c['class'].sort_values()
print(cor_matrix_sorted)
###Output
alcohol -0.379473
color -0.101465
free.sulfur.dioxide -0.078976
citric.acid -0.072700
sulfur.taste -0.036649
pH -0.033551
sulphates -0.028867
acidity.variance -0.027221
id -0.016626
chlor.class -0.013276
condition 0.008383
vineyard 0.018006
residual.sugar 0.021089
acid.sulfur 0.023454
total.sulfur.dioxide 0.024315
acid.taste 0.034170
fixed.acidity 0.075804
chlorides 0.176626
density 0.254921
volatile.acidity 0.262451
class 1.000000
Name: class, dtype: float64
###Markdown
Choosing the most correlated features with the target (quality of the wine) is significant in order to achieve well generalized model.
###Code
# labels of interes
loi = cor_matrix_sorted[:4].index.tolist() + cor_matrix_sorted[-4:-1].index.tolist() + ['class']
print(loi)
df[loi].head()
###Output
_____no_output_____
###Markdown
Data visualization and preprocessing In this training dataset there is more red wines than white wines for each class .
###Code
print((df_train.groupby('class')['color'].value_counts()))
sns.countplot(x="class", hue="color", data=df_train)
###Output
class color
excellent white 119
red 14
good white 2054
red 570
medium white 985
red 448
poor white 123
red 42
Name: color, dtype: int64
###Markdown
Citric acid histogramWe can clearly see, that there are some outliers above value 0.8
###Code
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['citric.acid'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'citric.acid', data=df, ax=axes[1])
###Output
_____no_output_____
###Markdown
Alcohol distribution
###Code
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['alcohol'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'alcohol', data=df, ax=axes[1])
# Rework this data ?
###Output
_____no_output_____
###Markdown
As we can see on the above bar plot, for the excellent quality wines alcohol is much higher than the other. If the range would exceed 15 we could simply say that this can be an effect of fortified wines in the dataset, which has 16-24% and are aged in wood casks exclusively so that may the reason of their quality level. Free sulfur dioxide distribution
###Code
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df_train['free.sulfur.dioxide'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'free.sulfur.dioxide', data=df_train, ax=axes[1])
df = inter_quartile_range(df,'free.sulfur.dioxide')
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['free.sulfur.dioxide'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'free.sulfur.dioxide', data=df, ax=axes[1])
###Output
_____no_output_____
###Markdown
Fixed acidity distribution
###Code
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df_train['fixed.acidity'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'fixed.acidity', data=df_train, ax=axes[1])
###Output
_____no_output_____
###Markdown
Density distribution
###Code
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df_train['density'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'density', data=df_train, ax=axes[1])
df = inter_quartile_range(df,'density')
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['density'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'density', data=df, ax=axes[1])
###Output
_____no_output_____
###Markdown
Volatile acidity distribution
###Code
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['volatile.acidity'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'volatile.acidity', data=df, ax=axes[1])
df = inter_quartile_range(df,'volatile.acidity')
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['volatile.acidity'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'volatile.acidity', data=df, ax=axes[1])
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['citric.acid'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'citric.acid', data=df, ax=axes[1])
df = inter_quartile_range(df,'citric.acid')
f, axes = plt.subplots(1, 2, figsize=(15, 7))
sns.distplot(df['citric.acid'], bins=50, kde=True, ax=axes[0])
sns.boxplot('class', 'citric.acid', data=df, ax=axes[1])
loi
###Output
_____no_output_____
###Markdown
Basically, looking at the visualizations above we can see that we are dealing with the dataset that has been already preprocessed. In a few of them we can see some little outliers, but we will try to train the model not deleting them. Oversampling imbalanced dataset
###Code
df = df[loi]
df.head()
df_t = df_test[loi]
df_t.head()
from sklearn.preprocessing import StandardScaler
# Separating data into the x and y datasets
X_train = df.drop(columns=['class'])
y_train = df['class']
X_test = df_t.drop(columns=['class'])
y_test = df_t['class']
# Standaraziation
normalizer = StandardScaler()
X_train = normalizer.fit_transform(X_train)
X_test = normalizer.transform(X_test)
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X_train, y_train = ros.fit_resample(X_train, y_train)
f, axes = plt.subplots(1, 2, figsize=(20, 7))
sns.countplot(x = 'class',data = df_train, ax=axes[0])
sns.countplot(y_train, ax=axes[1])
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train, y_train)
print('Accuracy of SVM classifier on training set: {:.2f}'
.format(svm.score(X_train, y_train)))
print('Accuracy of SVM classifier on test set: {:.2f}'
.format(svm.score(X_test, y_test)))
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)
print('Accuracy of GNB classifier on training set: {:.2f}'
.format(gnb.score(X_train, y_train)))
print('Accuracy of GNB classifier on test set: {:.2f}'
.format(gnb.score(X_test, y_test)))
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print('Accuracy of Logistic regression classifier on training set: {:.2f}'
.format(logreg.score(X_train, y_train)))
print('Accuracy of Logistic regression classifier on test set: {:.2f}'
.format(logreg.score(X_test, y_test)))
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier().fit(X_train, y_train)
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
###Output
Accuracy of Decision Tree classifier on training set: 1.00
Accuracy of Decision Tree classifier on test set: 0.65
###Markdown
Neural Network implementation
###Code
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
from keras import optimizers
from keras.utils import to_categorical
#one-hot encode target column
train_y_2 = to_categorical(y_train)
test_y_2 = to_categorical(y_test, num_classes = 4)
#vcheck that target column has been converted
print(train_y_2[0:4])
print('---------------')
print(test_y_2[0:6])
adam = optimizers.Adam(learning_rate=0.00005, beta_1=0.9, beta_2=0.999, amsgrad=False)
#set early stopping monitor so the model stops training when it won't improve anymore
early_stopping_monitor = EarlyStopping(patience=3)
# create model
model = Sequential()
#get number of columns in training data
n_cols = X_train.shape[1]
#add model layers
model.add(Dense(60, activation='relu', input_dim = n_cols))
model.add(Dense(60, activation='relu'))
model.add(Dense(60, activation='relu'))
model.add(Dense(60, activation='relu'))
model.add(Dense(60, activation='relu'))
model.add(Dense(4, activation='softmax'))
#compile model using accuracy to measure model performance
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
#train model , callbacks=[early_stopping_monitor]
history = model.fit(X_train, train_y_2, epochs=100, batch_size=32, validation_data=(X_test, test_y_2))
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
For 2 classes "good" and "medium"Below we can see model accuracy and loss from the model with deleted classes "excellent" and "poor"
###Code
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
###Output
_____no_output_____ |
3_Logistic_Proceptron/4b_perceptron.ipynb | ###Markdown
Perceptron The aim of this worksheet is to review the steps involved in *perceptron* training algorithm, and to assess how this method can behave in practical scenarios. As a first step required for both parts, setup the ipython notebook environment to include numpy, scipy, matplotlib etc.
###Code
%pylab inline
import numpy as np
import matplotlib.pyplot as plt
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
In this tutorial, we are going to use synthetic data. The advantage of using synthetic data is that we have control over the shape of the data, which is useful in studying properties of machine learning methods.We are going to generate data using a function defined below. This function produces S-shaped dataset which is mostly separable, but not necessarily linearly separable. We can control the degree of separability. The resulting dataset is going to be two-dimensional (so that we can plot it) with a binary label. That is, the dataset is a $N\times2$ array of instances coupled with an $N\times1$ of labels. The classes are encoded as $-1$ and $1$.Since the dataset is a tuple of two arrays, we are going to use a special data structure called *named tuple* from a Python module called *collections*.
###Code
import collections
def generate_s_shaped_data(gap=3):
x = np.random.randn(80, 2)
x[10:20] += np.array([3, 4])
x[20:30] += np.array([0, 8])
x[30:40] += np.array([3, 12])
x[40:50] += np.array([gap, 0])
x[50:60] += np.array([3 + gap, 4])
x[60:70] += np.array([gap, 8])
x[70:80] += np.array([3 + gap, 12])
t = np.hstack([-np.ones(40), np.ones(40)])
d = collections.namedtuple('Dataset', ['x', 't'])
d.x = x
d.t = t
return d
###Output
_____no_output_____
###Markdown
A perceptron is a linear classifier. Therefore, we will aim to generate linearly separable data.We start with generating training data.
###Code
d = generate_s_shaped_data(8)
print(d.x)
print(d.t)
x = d.x
y = d.t
plt.plot(x[y==-1,0], x[y==-1,1], "o")
plt.plot(x[y==1,0], x[y==1,1], "o")
###Output
[[ -9.70576856e-01 -1.21783402e+00]
[ -5.22086732e-01 -1.32132156e+00]
[ 3.48133223e-01 -1.14587810e+00]
[ 5.31941369e-04 -7.88825086e-01]
[ 5.52237333e-01 6.67452388e-01]
[ 8.23342847e-01 1.69446203e+00]
[ -1.78365722e-01 1.16728948e+00]
[ -1.40854597e+00 -6.10354643e-02]
[ 3.33896408e-02 -3.78739983e-01]
[ 9.31117005e-02 -3.24659230e-01]
[ 4.22549826e+00 4.60741779e+00]
[ 1.95581028e+00 4.67840027e+00]
[ 2.02719192e+00 4.00057089e+00]
[ 1.69068383e+00 4.04530397e+00]
[ 3.22424177e+00 3.10260329e+00]
[ 1.35929307e+00 3.94525841e+00]
[ 3.84475051e+00 4.72571747e+00]
[ 2.17720950e+00 3.14047071e+00]
[ 4.53712199e+00 4.46563426e+00]
[ 2.44911453e+00 2.73031954e+00]
[ -2.19663525e+00 8.40183246e+00]
[ 1.49454547e+00 8.33310721e+00]
[ -1.85957914e+00 7.90151226e+00]
[ -3.58638622e-01 7.27651480e+00]
[ -9.73159853e-01 8.08281332e+00]
[ 2.01214848e-01 6.52471862e+00]
[ 5.90897746e-01 5.90756584e+00]
[ 6.36751922e-01 8.01493445e+00]
[ -5.76948748e-01 5.74466926e+00]
[ 1.31466220e+00 8.60335729e+00]
[ 2.89089370e+00 1.04432227e+01]
[ 2.92490013e+00 1.12826108e+01]
[ 2.64939081e+00 1.01774238e+01]
[ 3.75327806e+00 1.30875842e+01]
[ 3.61260536e+00 1.10839840e+01]
[ 5.04711307e+00 1.10932748e+01]
[ 3.77352399e+00 1.31446308e+01]
[ 2.94418195e+00 1.33860582e+01]
[ 5.27584001e+00 1.02215987e+01]
[ 1.35389984e+00 1.35996816e+01]
[ 7.77855307e+00 1.31214064e-02]
[ 9.16542403e+00 1.48556882e-01]
[ 9.08669616e+00 5.04002350e-01]
[ 6.38793085e+00 8.28561270e-01]
[ 6.94930523e+00 2.95707097e-01]
[ 8.08985219e+00 -3.24926363e-01]
[ 8.08360216e+00 1.07506992e+00]
[ 7.23254516e+00 -6.24850160e-01]
[ 8.38632343e+00 1.64392389e-01]
[ 8.89038334e+00 1.50422031e+00]
[ 9.57677900e+00 4.49812158e+00]
[ 1.24403066e+01 4.36697767e+00]
[ 1.08602405e+01 3.58802636e+00]
[ 1.24699310e+01 3.52573140e+00]
[ 1.02090184e+01 2.98198773e+00]
[ 1.15592463e+01 4.50220669e+00]
[ 1.02214149e+01 3.99002258e+00]
[ 1.01140609e+01 4.50876063e+00]
[ 1.08957724e+01 4.80070047e+00]
[ 1.13562199e+01 3.30568330e+00]
[ 8.92436404e+00 8.64724502e+00]
[ 7.73836714e+00 8.24160477e+00]
[ 9.65606649e+00 9.57389013e+00]
[ 6.64373824e+00 7.29261074e+00]
[ 6.39272289e+00 8.51107421e+00]
[ 8.93055338e+00 8.48674493e+00]
[ 6.58089400e+00 7.57376621e+00]
[ 9.07862235e+00 9.39234843e+00]
[ 8.04599555e+00 7.32946885e+00]
[ 7.99454876e+00 8.37269346e+00]
[ 1.02720489e+01 1.30028999e+01]
[ 1.11405788e+01 1.23527975e+01]
[ 1.10104654e+01 1.29688852e+01]
[ 1.21021986e+01 1.15601582e+01]
[ 9.84677346e+00 1.22362306e+01]
[ 1.03515340e+01 1.20297636e+01]
[ 1.18352573e+01 1.24594133e+01]
[ 1.09930460e+01 1.15107302e+01]
[ 1.35225507e+01 1.27878903e+01]
[ 1.09269093e+01 1.12696807e+01]]
[-1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1.
-1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1. -1.
-1. -1. -1. -1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.
1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.
1. 1. 1. 1. 1. 1. 1. 1.]
###Markdown
Perceptron algorithm Next we will train a binary classifier on this data. For this we’ll use the perceptron algorithm, whichyou should recall takes a model of the form$$\begin{align*} s(\mathbf{x}) &= w_0 + \mathbf{w}' \mathbf{x} \\ predict(\mathbf{x}) &= \left\{ \begin{array}{cc} 1, & \mbox{if $s(\mathbf{x}) \geq 0$} \\-1, & \mbox{otherwise}\end{array} \right .\end{align*}$$ For simplicity, we will use the standard trick to incorporate the bias term $w_0$ into the weights $\mathbf{w}$ by using a basis function $\phi(x_1, x_2) = [1~x_1~x_2]'$ which adds an extra constant dimension. The model becomes$$ s(\mathbf{x}) = \mathbf{w}' \phi(\mathbf{x}) $$To do this, simply concatenate a column of 1s to the data matrix.
###Code
Phi = np.column_stack([np.ones(x.shape[0]), x])
print (Phi.shape, Phi.min(),Phi.max())
###Output
(80, 3) -2.19663525046 13.59968158
###Markdown
Note that Phi now has $3$ columns. In this array, each training instance is a row and each column is a feature. From now on we will use Phi instead of x. Each row represents $\phi(\mathbf{x})$ for a training instance. Prediction function Next, write the prediction function (aka discriminant). This takes as input a data point (a row from Phi, i.e., a vector of 3 numbers) and the model parameters ($\mathbf{w}$) and outputs predicted label $1$ or $-1$. Recall that if $s(\mathbf{x})=0$, the predicted class is $1$.
###Code
def perc_pred(phi, w):
# s = np.dot(phi, w) # over to you
s = np.sign(np.sign(np.dot(phi, w))+0.5)
# s = [1 if x>=0 else -1 for x in s]
return s
###Output
_____no_output_____
###Markdown
Don't forget to test your prediction function with some examples! Note that it's more useful if it can support phi inputs both as vectors (returning a scalar, either +1/-1) and as matrices (returning a vector of +1/-1 values). The latter allows for you to supply a full dataset in one call.
###Code
print(perc_pred([1, 0, 1], [1, 2, 3]))
print(perc_pred(Phi, [1,2,3]))
###Output
[-1. -1. -1. -1. 1. 1. 1. -1. -1. 1. 1. 1. 1. 1. 1. 1. 1. 1.
1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.
1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.
1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.
1. 1. 1. 1. 1. 1. 1. 1.]
###Markdown
Training algorithm Now for training algorithm which fits the weights, $\mathbf{w}$, to the training data. Recall that this is an online training algorithm, and we are going to iterate through the training examples one by one. Moreover, we are going to do several cycles, called *epochs*, such that we iterate through the entire training set within one epoch. Write a function called *train* which takes the basis data matrix *Phi*, the labels *t* and a number of epochs. This should implement the following pseudo-code: > initialise weights to zero > repeat epoch times> > for each x and t pair in the training set> > > if model prediction and y differ, make weight update> return weights The weight update in the inner loop is $\mathbf{w} \leftarrow \mathbf{w} + y \phi(\mathbf{x})$.What is the purpose of this update?
###Code
def train(data, target, epochs, w , eta= 1.):
for e in range(epochs):
for i in range(data.shape[0]):
yhat = perc_pred(data[i,:], w) # over to you?????
if yhat != target[i]:
w = w + np.dot(np.dot(eta,target[i]),data[i]) # over to you
return w
###Output
_____no_output_____
###Markdown
Run your training algorithm for 5 epochs to learn the weights
###Code
w = np.zeros(Phi.shape[1])
w = train(Phi, y, 5, w)
w
###Output
_____no_output_____
###Markdown
Evaluation We are going to use the proportion of misclassified cases as the quality measure.
###Code
Accuracy = np.sum(perc_pred(Phi,w)==y) / float(y.shape[0]) # over to you
print(Accuracy)
###Output
0.7125
###Markdown
Rerun your training with a larger number of epochs (10, 100, 1000), and evaluate how the accuracy changes. Heldout evaluation Evaluating on the training data is not a good idea in general, other than for debugging your algorithms. (Can you explain why?) We are going to generate another synthetic data thus essentially creating a fresh *heldout set*. What is the accuracy on this heldout data, and how does this compare to training accuracy?
###Code
d_held = generate_s_shaped_data(8)
x_heldout = d_held.x
y_heldout = d_held.t
plt.plot(x[y==-1,0], x[y==-1,1], "o")
plt.plot(x[y==1,0], x[y==1,1], "o")
# plot the heldout data points
plt.plot(x_heldout[y_heldout==-1,0], x_heldout[y_heldout==-1,1], "x")
plt.plot(x_heldout[y_heldout==1,0], x_heldout[y_heldout==1,1], "x")
Phi_heldout = np.column_stack([np.ones(x_heldout.shape[0]), x_heldout])
###Output
_____no_output_____
###Markdown
Now, let's calculate the accuracy measure for the held-out set
###Code
Accuracy = np.sum(perc_pred(Phi_heldout,w)==y_heldout) / float(y_heldout.shape[0]) # over to you
print(Accuracy)
###Output
0.725
###Markdown
Inspect the weights learnt in training. Do these match your intuitions? Plot the decision boundary represented by the weights, $\mathbf{w}' \phi(\mathbf{x}) = 0$. Solving for $x_2$ as a function of $x_1$ yields $x_2 = -\frac{w_0}{w_2} - \frac{w_1}{w_2} x_1$. Note that you can *linspace* and *plot* for displaying the line.
###Code
x1 = np.linspace(0, 6, 100)
print(w)
x2 = - (w[0] / w[2]) - ((w[1] / w[2]) * x1)
# plot the training data points
plt.plot(x[y==-1,0], x[y==-1,1], "o")
plt.plot(x[y==1,0], x[y==1,1], "o")
# plot the heldout data points
plt.plot(x_heldout[y_heldout==-1,0], x_heldout[y_heldout==-1,1], "x")
plt.plot(x_heldout[y_heldout==1,0], x_heldout[y_heldout==1,1], "x")
# plot the decision boundary
plt.plot(x1, x2)
xlabel('x1')
ylabel('x2')
###Output
[-11. 18.08020698 -0.89515782]
###Markdown
How well does the decision boundary separate the points in the two classes? Where do you think the decision boundary should go? And how does the boundary change as you train for longer (more epochs)? Plot train and heldout errors as a function of number epochs. Note that careful tuning of the learning rate is needed to get sensible behaviour. Using $\eta = \frac{1}{1+e}$ where $e$ is the epoch number often works well.
###Code
w_hat = np.zeros(Phi.shape[1])
T = 60
train_error = np.zeros(T)
heldout_error = np.zeros(T)
for e in range(T):
# here we use a learning rate, which decays with each epoch
lr = 1./(1+e)
w_hat = ... # over to you
train_error[e] = ... # over to you
heldout_error[e] = ... # over to you
plot(train_error, label = 'Train Error')
plot(heldout_error, label = 'Held-out Error')
plt.legend()
xlabel('Epochs')
ylabel('Error')
###Output
_____no_output_____
###Markdown
Does the heldout error track the training error closely? Is the model (i.e., weights at a given epoch) on the training set the same as the best model on the heldout set?Now, let's plot the decision boundary using w_hat
###Code
x1 = np.linspace(2, 10, 100)
print(w_hat)
x2 = - (w_hat[0] / w_hat[2]) - ((w_hat[1] / w_hat[2]) * x1)
# plot the training data points
plt.plot(x[y==-1,0], x[y==-1,1], "o")
plt.plot(x[y==1,0], x[y==1,1], "o")
# plot the heldout data points
plt.plot(x_heldout[y_heldout==-1,0], x_heldout[y_heldout==-1,1], "x")
plt.plot(x_heldout[y_heldout==1,0], x_heldout[y_heldout==1,1], "x")
# plot the decision boundary
plt.plot(x1, x2)
xlabel('x1')
ylabel('x2')
###Output
_____no_output_____ |
run_fem_frame.ipynb | ###Markdown
**Just run the below cell and enter the correct inputs in the required simple format** You will get the complete analysis along with the External and Reaction force diagram as shown in the output cell*In case you are stuck, please follow the example cell* **Note:** Make sure you have *fem_frame_complete_analysis.ipynb* file in the same folder as *run_fem_frame.ipynb* file
###Code
%run fem_frame_complete_analysis.ipynb
###Output
Enter total number of nodes: 3
Enter (x,y) coordinates of node-1 (Example: 2,-3.7): 0,0
Enter support type at node-1 ---> write 'P/p' for Planar and 'I/i' for Inclined: p
Enter constraint type at node-1 ---> write 'f' for Fixed, 'fr' for Free, 'h' for hinged and 'r' for Roller support: f
Enter (x,y) coordinates of node-2 (Example: 2,-3.7): 6,0
Enter support type at node-2 ---> write 'P/p' for Planar and 'I/i' for Inclined: p
Enter constraint type at node-2 ---> write 'f' for Fixed, 'fr' for Free, 'h' for hinged and 'r' for Roller support: h
Enter external force Mz at node-2 (Example: 0.75): -4
Enter (x,y) coordinates of node-3 (Example: 2,-3.7): 3,4
Enter support type at node-3 ---> write 'P/p' for Planar and 'I/i' for Inclined: p
Enter constraint type at node-3 ---> write 'f' for Fixed, 'fr' for Free, 'h' for hinged and 'r' for Roller support: fr
Enter external force (Fx,Fy,Mz) at node-3 (Example: 1.2,-2,4.76 or 0,2.75,0): 3,-4,-6
Enter total number of elements: 2
Enter (From_node, To_node) for element-1 (Example: 2,3): 1,3
Enter Young's modulus of element-1 (Example: 2.27e10 if E = 2.27*10^10 or 2.7e10*(2**(0.5)) if E = 2.7*sqrt(2)*10^10): 1.5e2
Enter area of element-1 (Example: 2.27e-3 if A = 2.27*10^-3 or 2.7*(2**(0.5)) if A = 2.7*sqrt(2)): 1e-2
Enter Izz of element-1 (Example: 2.27e-3 if I = 2.27*10^-3 or 2.7*(2**(0.5)) if I = 2.7*sqrt(2)): 2.7e-2*sqrt(2)
Enter (From_node, To_node) for element-2 (Example: 2,3): 2,3
Enter Young's modulus of element-2 (Example: 2.27e10 if E = 2.27*10^10 or 2.7e10*(2**(0.5)) if E = 2.7*sqrt(2)*10^10): 1
Enter area of element-2 (Example: 2.27e-3 if A = 2.27*10^-3 or 2.7*(2**(0.5)) if A = 2.7*sqrt(2)): 2e-4
Enter Izz of element-2 (Example: 2.27e-3 if I = 2.27*10^-3 or 2.7*(2**(0.5)) if I = 2.7*sqrt(2)): 3e-1
LOCAL STIFFNESS MATRIX k1:
[[ 0.4599 -0.1199 -1.0997 -0.4599 0.1199 -1.0997]
[-0.1199 0.3899 0.8248 0.1199 -0.3899 0.8248]
[-1.0997 0.8248 4.5821 1.0997 -0.8248 2.291 ]
[-0.4599 0.1199 1.0997 0.4599 -0.1199 1.0997]
[ 0.1199 -0.3899 -0.8248 -0.1199 0.3899 -0.8248]
[-1.0997 0.8248 2.291 1.0997 -0.8248 4.5821]]
LOCAL STIFFNESS MATRIX k2:
[[ 0.0184 0.0138 -0.0576 -0.0184 -0.0138 -0.0576]
[ 0.0138 0.0104 -0.0432 -0.0138 -0.0104 -0.0432]
[-0.0576 -0.0432 0.24 0.0576 0.0432 0.12 ]
[-0.0184 -0.0138 0.0576 0.0184 0.0138 0.0576]
[-0.0138 -0.0104 0.0432 0.0138 0.0104 0.0432]
[-0.0576 -0.0432 0.12 0.0576 0.0432 0.24 ]]
GLOBAL STIFFNESS MATRIX K:
[[ 0.4599 -0.1199 -1.0997 0. 0. 0. -0.4599 0.1199 -1.0997]
[-0.1199 0.3899 0.8248 0. 0. 0. 0.1199 -0.3899 0.8248]
[-1.0997 0.8248 4.5821 0. 0. 0. 1.0997 -0.8248 2.291 ]
[ 0. 0. 0. 0.0184 0.0138 -0.0576 -0.0184 -0.0138 -0.0576]
[ 0. 0. 0. 0.0138 0.0104 -0.0432 -0.0138 -0.0104 -0.0432]
[ 0. 0. 0. -0.0576 -0.0432 0.24 0.0576 0.0432 0.12 ]
[-0.4599 0.1199 1.0997 -0.0184 -0.0138 0.0576 0.4783 -0.1061 1.1573]
[ 0.1199 -0.3899 -0.8248 -0.0138 -0.0104 0.0432 -0.1061 0.4003 -0.7816]
[-1.0997 0.8248 2.291 -0.0576 -0.0432 0.12 1.1573 -0.7816 4.8221]]
TRANSFORMED GLOBAL STIFFNESS MATRIX K:
[[ 0.4599 -0.1199 -1.0997 0. 0. 0. -0.4599 0.1199 -1.0997]
[-0.1199 0.3899 0.8248 0. 0. 0. 0.1199 -0.3899 0.8248]
[-1.0997 0.8248 4.5821 0. 0. 0. 1.0997 -0.8248 2.291 ]
[ 0. 0. 0. 0.0184 0.0138 -0.0576 -0.0184 -0.0138 -0.0576]
[ 0. 0. 0. 0.0138 0.0104 -0.0432 -0.0138 -0.0104 -0.0432]
[ 0. 0. 0. -0.0576 -0.0432 0.24 0.0576 0.0432 0.12 ]
[-0.4599 0.1199 1.0997 -0.0184 -0.0138 0.0576 0.4783 -0.1061 1.1573]
[ 0.1199 -0.3899 -0.8248 -0.0138 -0.0104 0.0432 -0.1061 0.4003 -0.7816]
[-1.0997 0.8248 2.291 -0.0576 -0.0432 0.12 1.1573 -0.7816 4.8221]]
DISPLACEMENT MATRIX Q:
[[ 0. ]
[ 0. ]
[ 0. ]
[ 0. ]
[ 0. ]
[-13.7474]
[ 34.3609]
[-25.2507]
[-13.2415]]
REACTION FORCE R:
[[-4.2693]
[ 3.046 ]
[28.2759]
[ 1.2693]
[ 0.954 ]
[ 0. ]
[ 0. ]
[ 0. ]
[ 0. ]]
External Forces
###Markdown
Example
###Code
'''
Enter total number of nodes: 3
Enter (x,y) coordinates of node-1 (Example: 2,-3.7): 0,0
Enter support type at node-1 ---> write 'P/p' for Planar and 'I/i' for Inclined: p
Enter constraint type at node-1 ---> write 'f' for Fixed, 'fr' for Free, 'h' for hinged and 'r' for Roller support: f
Enter (x,y) coordinates of node-2 (Example: 2,-3.7): 6,0
Enter support type at node-2 ---> write 'P/p' for Planar and 'I/i' for Inclined: p
Enter constraint type at node-2 ---> write 'f' for Fixed, 'fr' for Free, 'h' for hinged and 'r' for Roller support: h
Enter external force Mz at node-2 (Example: 0.75): -3
Enter (x,y) coordinates of node-3 (Example: 2,-3.7): 3,4
Enter support type at node-3 ---> write 'P/p' for Planar and 'I/i' for Inclined: p
Enter constraint type at node-3 ---> write 'f' for Fixed, 'fr' for Free, 'h' for hinged and 'r' for Roller support: fr
Enter external force (Fx,Fy,Mz) at node-3 (Example: 1.2,-2,4.76 or 0,2.75,0): 3,-6,4
Enter total number of elements: 2
Enter (From_node, To_node) for element-1 (Example: 2,3): 1,3
Enter Young's modulus of element-1 (Example: 2.27e10 if E = 2.27*10^10 or 2.7e10*(2**(0.5)) if E = 2.7*sqrt(2)*10^10): 1
Enter area of element-1 (Example: 2.27e-3 if A = 2.27*10^-3 or 2.7*(2**(0.5)) if A = 2.7*sqrt(2)): 1
Enter Izz of element-1 (Example: 2.27e-3 if I = 2.27*10^-3 or 2.7*(2**(0.5)) if I = 2.7*sqrt(2)): 1
Enter (From_node, To_node) for element-2 (Example: 2,3): 2,3
Enter Young's modulus of element-2 (Example: 2.27e10 if E = 2.27*10^10 or 2.7e10*(2**(0.5)) if E = 2.7*sqrt(2)*10^10): 1
Enter area of element-2 (Example: 2.27e-3 if A = 2.27*10^-3 or 2.7*(2**(0.5)) if A = 2.7*sqrt(2)): 1
Enter Izz of element-2 (Example: 2.27e-3 if I = 2.27*10^-3 or 2.7*(2**(0.5)) if I = 2.7*sqrt(2)): 2.7*(2**(0.5))
'''
print("Done")
###Output
Done
|
misc/deep_learning_notes/pytorch_notes/Torch Vanilla RNN Learning to speak Shakespeare.ipynb | ###Markdown
OverviewA simple vanilla RNN implementation of a Shakespeare generator with pyTorch. The generator is trained character by character, using a single hidden layer unit. The result is mediocre, some parameter tweaking might make it perform better. Surprises Todo:- [ ] play with hyper parameters Done:- [x] Add temperature to generator- [x] get training to work- [x] use optim and Adam- [x] add self-feeding generator
###Code
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils import forward_tracer, backward_tracer, Char2Vec, num_flat_features
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
from tqdm import tqdm
from IPython.display import clear_output
source = "";
with open('./data/shakespeare.txt', 'r') as f:
for line in f:
source += line + "\n"
source +=" " * 606
print([source[:60]])
len(source)
class VanillaRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(VanillaRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.xhh = torch.nn.Linear(input_size + hidden_size, hidden_size)
self.ho = torch.nn.Linear(hidden_size, output_size)
self.softmax = F.softmax
def forward(self, x, hidden):
xs = x.chunk(x.size()[0])
outputs = []
for row_vec in xs:
hidden = self.xhh(torch.cat((row_vec, hidden), 1))
outputs.append(self.ho(hidden))
output = torch.cat(outputs, 0)
return output, hidden
def init_hidden(self, random=False):
if random:
return Variable(torch.randn(1, self.hidden_size))
else:
return Variable(torch.zeros(1, self.hidden_size))
"""
rnn = VanillaRNN(100, 120, 90)
hidden = rnn.init_hidden()
rnn(Variable(torch.randn(1, 100)), hidden, 10)"""
class Shakespeare():
def __init__(self, model):
self.model = model
self.char2vec = Char2Vec()
self.loss = 0
self.losses = []
def init_hidden_(self, random=False):
self.hidden = model.init_hidden(random)
return self
def save(self, fn="Vanilla_RNN_Shakespeare.tar"):
torch.save({
"hidden": self.hidden,
"state_dict": model.state_dict(),
"losses": self.losses
}, fn)
def load(self, fn):
checkpoint = torch.load(fn)
self.hidden = checkpoint['hidden']
model.load_state_dict(checkpoint['state_dict'])
self.losses = checkpoint['losses']
def setup_training(self, learning_rate):
self.optimizer = optim.Adam(model.parameters(), lr=learning_rate)
self.loss_fn = nn.CrossEntropyLoss()
self.init_hidden_()
def reset_loss(self):
self.loss = 0
def forward(self, input_text, target_text):
self.hidden.detach_()
self.optimizer.zero_grad()
self.next_(input_text)
target_vec = Variable(self.char2vec.char_code(target_text))
self.loss += self.loss_fn(self.output, target_vec)
def descent(self):
self.loss.backward()
self.optimizer.step()
self.losses.append(self.loss.cpu().data.numpy())
self.reset_loss()
def embed(self, input_data):
self.embeded = Variable(self.char2vec.one_hot(input_data))
return self.embeded
def next_(self, input_text):
self.output, self.hidden = self.model(
self.embed(input_text)#.view(1, -1, self.model.input_size)
, self.hidden)
return self
def softmax_(self, temperature=0.5):
self.softmax = self.model.softmax(self.output/temperature)
return self
def text(self, start=None, end=None):
indeces = torch.multinomial(self.softmax[start:end]).view(-1)
return self.char2vec.vec2str(indeces)
input_size = 100 # len(char2vec.chars)
hidden_size = input_size
model = VanillaRNN(input_size, hidden_size, input_size)
william = Shakespeare(model)
# william.load('./data/Vanilla_RNN_Shakespeare.tar')
learning_rate = 0.2e-4
william.setup_training(learning_rate)
model.zero_grad()
william.reset_loss()
seq_length = 50
batches = int(len(source)/seq_length)
for epoch_num in range(70):
for step in tqdm(range(batches)):
source_ = source[step*seq_length:(step+1)*seq_length]
william.forward(source_, source_[1:] + " ")
#william.descent()
if step%50 == 49:
william.descent()
if step%1000 == 999:
clear_output(wait=True)
print('Epoch {:d}'.format(epoch_num))
william.softmax_()
plt.figure(figsize=(12, 9))
plt.subplot(131)
plt.title("Input")
plt.imshow(william.embeded[:130].data.byte().numpy(), cmap="Greys_r", interpolation="none")
plt.subplot(132)
plt.title("Output")
plt.imshow(william.output[:130].data.byte().numpy(), cmap="Greys_r", interpolation="none")
plt.subplot(133)
plt.title("Softmax Output")
plt.imshow(william.softmax[:130].cpu().data.numpy(), cmap="Greys_r", interpolation="none")
plt.show()
plt.figure(figsize=(10, 3))
plt.title('Training loss')
plt.plot(william.losses, label="loss", linewidth=3, alpha=0.4)
plt.show()
print(william.text(0,150))
###Output
Epoch 69
###Markdown
Now use the network to generate text!
###Code
william.save('./data/Vanilla_RNN_Shakespeare.tar')
from ipywidgets import widgets
from IPython.display import display
def predict_next(input_text, gen_length=None, temperature=0.05):
if gen_length is None:
gen_length = len(input_text)
clear_output(wait=True)
william = Shakespeare(model).init_hidden_(random=True)
william.next_(input_text)
william.softmax_()
string_output = william.text()
for i in range(1, gen_length - len(input_text)):
last_char = string_output[-1]
william.next_(last_char)
william.softmax_(temperature)
string_output += william.text()
print(string_output)
plt.figure(figsize=(12, 9))
plt.subplot(131)
plt.title("Input")
plt.imshow(william.embeded[:130].data.byte().numpy(), cmap="Greys_r", interpolation="none")
plt.subplot(132)
plt.title("Output")
plt.imshow(william.output[:130].data.byte().numpy(), cmap="Greys_r", interpolation="none")
plt.subplot(133)
plt.title("Softmax Output")
plt.imshow(william.softmax[:130].cpu().data.numpy(), cmap="Greys_r", interpolation="none")
plt.show()
predict_next("Ge Yang:\n", 200, 1)
text_input = widgets.Text()
display(text_input)
def handle_submit(sender):
predict_next(text_input.value, 1000, temperature=0.25)
text_input.on_submit(handle_submit)
###Output
dce,hwovhmeegjoure..
Four.
Buringelave..
And averenge..
And there..
And, areare.
yourd.
Youthesther.;
Whare.
Wheres.
Whathere.
Wheres.
Whathererefoure.
Wherererering.
Whatheres.
Whatherestinge.
Thererour; beather.
Wherengherengor.
Whave thangere.
Sous are.
Whed merererere.
Shathengind.
There, fore, boure fore.
Singereathenganger.
Andereererereangeres pore.
Whe therereroure.
And bole, areangeres.
Thime mathere.
Southeresthere berengerengere;
here there.
Whe thereingere mere therengerengere.
Wherenghereres.
And oulle seres.
And foure the the beathe lase there houres ine there the the be thes and beathe coure ine the thethe patheng outhind and fous our be thengeather oure angese the there the that the the mend he seres and there be pore the 's be ther fore foure tous and his the the the there the theange the fither the thes and poure ther ino he the the mere the ther the the poure the the this a
|
notebooks/RandomForest2.ipynb | ###Markdown
###Code
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
path = '/content/drive/Shareddrives/A-ICE/Data/AIS+ICE/data.csv'
df = pd.read_csv(path)
df = df[df.DWT != 0]
df['PWRDWTRATIO'] = df['POWER']/df['DWT']
df.dropna(axis=0,inplace=True)
def create_random_column(length, mean, std):
return list(np.random.normal(loc=mean,scale=std,size=length))
random_0_1 = create_random_column(len(df), 0, 1)
random_0_100 = create_random_column(len(df), 0, 100)
random_100_1 = create_random_column(len(df), 100, 1)
random_neg100_1 = create_random_column(len(df), -100, 1)
random_100_100 = create_random_column(len(df), 100, 100)
random_neg100_100 = create_random_column(len(df), -100, 100)
df['random1'] = random_0_1
df['random2'] = random_0_100
df['random3'] = random_100_1
df['random4'] = random_neg100_1
df['random5'] = random_100_100
df['random6'] = random_neg100_100
def mean_error(y_test, pred):
return np.mean(np.abs(pred-y_test))
def error_percent(y_test, pred):
percent = np.divide((y_test-pred), y_test).astype(float)
return percent
IMOS = df.IMO.unique()
errors = []
importance = []
for imo in IMOS:
train_set = df.loc[df.IMO==imo]
test_set = df.loc[df.IMO!=imo]
x_train = train_set[['POWER', 'DRAFT', 'LENGTH', 'BREADTH', 'SPEED',
'GRT', 'NRT', 'DWT', 'PWRDWTRATIO', 'KEMI_WIND_DIR', 'KEMI_WIND_SPEED',
'KEMI_TEMPERATURE_WEEK', 'KEMI_TEMPERATURE_MONTH', 'KALAJOKI_WIND_DIR',
'KALAJOKI_WIND_SPEED', 'KALAJOKI_TEMPERATURE_WEEK',
'KALAJOKI_TEMPERATURE_MONTH', 'ICEACT', 'ICETCK', 'ICEMAX', 'ICEMIN',
'TYYPPI','random1','random2','random3','random4','random5','random6']]
x_test = test_set[['POWER', 'DRAFT', 'LENGTH', 'BREADTH', 'SPEED',
'GRT', 'NRT', 'DWT','PWRDWTRATIO', 'KEMI_WIND_DIR', 'KEMI_WIND_SPEED',
'KEMI_TEMPERATURE_WEEK', 'KEMI_TEMPERATURE_MONTH', 'KALAJOKI_WIND_DIR',
'KALAJOKI_WIND_SPEED', 'KALAJOKI_TEMPERATURE_WEEK',
'KALAJOKI_TEMPERATURE_MONTH', 'ICEACT', 'ICETCK', 'ICEMAX', 'ICEMIN',
'TYYPPI','random1','random2','random3','random4','random5','random6']]
y_train = train_set['KNOTS_OVER_GROUND']
y_test = test_set['KNOTS_OVER_GROUND']
y_train = np.ravel(y_train)
model = RandomForestRegressor(n_estimators=1000)
model.fit(x_train, y_train)
prediction = model.predict(x_test)
#prediction = np.reshape(prediction, (len(prediction),1))
errors.append([mean_error(y_test,prediction)])
importance.append(model.feature_importances_)
plt.pcolormesh(importance)
plt.colorbar()
###Output
_____no_output_____ |
tutorials/W1D3_MultiLayerPerceptrons/student/W1D3_Tutorial2.ipynb | ###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay, Kelson Shilling-Scrivo__Content editors:__ Gagana B, Kelson Shilling-Scrivo, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Kelson Shilling-Scrivo, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2','https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import make_grid
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks*Time estimate: ~45 mins*
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_7d616b5c.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs*Time estimate: ~55 mins*
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_9605a4e9.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer(q3, text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: Ethical aspects*Time estimate: ~20 mins*
###Code
# @title Video 3: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 4: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus: The need for good initializationIn this section, we derive principles for initializing deep networks. We will see that if the weights are too large, then the forward propagation of signals will be chaotic, and the backpropagation of error gradients will explode. On the other hand, if the weights are too small, the forward propagation of signals will be ordered, and the backpropagation of error gradients will vanish. The key idea behind initialization is to choose the weights to be just right, i.e., at the edge between order and chaos. In this section, we derive this edge and show how to compute the correct initial variance of the weights. Many of the typical initialization schemes in existing deep learning frameworks implicitly employ this principle of initialization at the edge of chaos. So this section can be safely skipped on a first pass and **is a bonus section**.
###Code
# @title Video 5: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
Neuromatch Academy: Week 1, Day 3, Tutorial 2 Multi-layer Perceptrons__Content creators:__ Arash Ash__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang. __Content editors:__ Gagana B, Spiros Chavlis.__Production editors:__ Anoop Kulkarni, Spiros Chavlis. --- Tutorial ObjectivesIn the second tutorial of Week 3, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can evolve linearly in weights * the case of deep vs. wide* dependant on transfer functions* sensitive to initialization
###Code
#@markdown Tutorial slides
# you should link the slides for all tutorial videos here (we will store pdfs on osf)
from IPython.display import HTML
HTML('<iframe src="https://docs.google.com/presentation/d/e/2PACX-1vSPvHqDTmMq4GyQy6lieNEFxq4qz1SmqC2RNoeei3_niECH53zneh8jJVYOnBIdk0Uaz7y2b9DK8V1t/embed?start=false&loop=false&delayms=3000" frameborder="0" width="960" height="569" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>')
###Output
_____no_output_____
###Markdown
--- Setup
###Code
# @title Imports
import random
import pathlib
import random, time
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from torchvision.utils import make_grid
from IPython.display import HTML, display
dev = "cpu"
# @title Seeding for reproducibility
seed = 522
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
def seed_worker(worker_id):
worker_seed = seed % (worker_id+1)
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Dataset download
%%capture
!rm -r AnimalFaces32x32/
!git clone https://github.com/arashash/AnimalFaces32x32
!rm -r afhq/
!unzip ./AnimalFaces32x32/afhq_32x32.zip
# @title Figure Settings
import ipywidgets as widgets
%matplotlib inline
fig_w, fig_h = (8, 6)
plt.rcParams.update({'figure.figsize': (fig_w, fig_h)})
%config InlineBackend.figure_format = 'retina'
my_layout = widgets.Layout()
# @title Helper functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def color_grad(grad, M=500, x_max=1):
"""Plot gradient
"""
grad = grad.detach().cpu()
grad_colors = grad[:, 0]
grad_colors = (grad_colors / grad_colors.max() * 1e3).int() % 10
grad_colors = grad_colors.view(M, M).cpu().numpy()
return grad_colors
def progress(epoch, loss, epochs=100):
return HTML("""
<label for="file">Training loss: {loss}</label>
<progress
value='{epoch}'
max='{epochs}',
style='width: 100%'
>
{epoch}
</progress>
""".format(loss=loss, epoch=epoch, epochs=epochs))
# @title Part 1 Code
class Net(nn.Module):
def __init__(self, actv, num_inputs, hidden_units, num_outputs):
super(Net, self).__init__()
exec('self.actv = nn.%s'%actv)
self.layers = nn.ModuleList()
for i in range(len(hidden_units)):
next_num_inputs = hidden_units[i]
self.layers += [nn.Linear(num_inputs, next_num_inputs)]
num_inputs = next_num_inputs
self.out = nn.Linear(num_inputs, num_outputs)
def forward(self, x):
# flattening
x = x.view(x.shape[0], -1)
for layer in self.layers:
x = self.actv(layer(x))
x = self.out(x)
return x
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
# Shuffling
shuffled_indeces = torch.randperm(K*N)
X = X[shuffled_indeces]
y = y[shuffled_indeces]
# Test Train splitting
test_size = int(0.2*N)
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
batch_size = 128
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data, batch_size=batch_size, drop_last=True,
shuffle=True, num_workers=0, worker_init_fn=seed_worker)
def train_test_classification(net, criterion, optimizer,
train_loader, test_loader,
num_epochs=1, verbose=True,
training_plot=False):
if verbose:
progress_bar = display(progress(0, 0, num_epochs), display_id=True)
net.train()
training_losses = []
for epoch in range(num_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(dev).float()
labels = labels.to(dev).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
running_loss += loss.item()
if i % 10 == 9: # update every 10 mini-batches
progress_bar.update(progress(epoch+1, running_loss / 10, num_epochs))
running_loss = 0.0
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(dev).float()
labels = labels.to(dev).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print('Accuracy on the %d training samples: %0.2f %%' % (train_total, train_acc))
print('Accuracy on the %d testing samples: %0.2f %%' % (test_total, test_acc))
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
def sample_grid(M=500, x_max = 2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test, M=500, x_max = 2.0, eps = 1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks
###Code
#@title Video 1: Cross Entropy
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5AxFjPZGuTc", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
#@title Video 2: Expressivity vs Learnability
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="H18gs1z2PTw", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use `requires_grad` attribute to make sure it's a trainable parameter.After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(dev)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(dev)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer, train_loader,
test_loader, num_epochs=100)
test_scores += [test_acc]
return hidden_layers, test_scores
### Uncomment below to test your function
#max_par_count = 100
#max_hidden_layer = 5
#hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer)
#with plt.xkcd():
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MLP/solutions/W1D3_Tutorial2_Solution_31aa6709.py)*Example output:*
###Code
#@markdown Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
w3_why_three = '' #@param {type:"string"}
###Output
_____no_output_____
###Markdown
Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,$$\text{ of terms} = \frac{(P+1)(P+2)}{2}$$Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree+1)*(poly_degree+2)//2-1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j+i > 0:
if j+i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X[:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data, batch_size=batch_size,
shuffle=False, num_workers=1)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data, batch_size=batch_size,
shuffle=True, num_workers=1)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(dev)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100)
# Test it
X_all = sample_grid().to(dev)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(dev))
# Plot it
with plt.xkcd():
plot_decision_map(X_all, y_pred, X_test, y_test)
plt.show()
return num_features
### Uncomment below to test your function
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree)
print('Number of features: %d'%num_features)
#@markdown Do you think this model is performing well outside its training distribution? Why?
w3_poly_OoD = '' #@param {type:"string"}
###Output
_____no_output_____
###Markdown
--- Section 2: Linear Learning in Wide Multi-Layer Perceptrons (Advanced)Feel free to skip if you just want applications!
###Code
#@title Video 3: Neural Tngent Kernels
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5Lj6kp4k7Sk", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Exercise 2: Motivation for Neural Tangent Kernels (NTKs)lazy training of overcomplete MLPs results in linear changes in weights. Let's try to see it here.We will train a wide MLP step by step with small learning rates (when the learning rate is low, it's called lazy or linear training!) and keep track of some of the weight to see how they change.Note that you could index the network layers like a list (since we defined it with ModuleList). You could access the weights and biases of a `nn.Linear` layer by getting `.weight` and `.bias` attributes.
###Code
def run_lazy_training(num_time_steps, num_select_weights, step_epoch):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the lazy training function")
###################################################################
# Define a wide MLP
net = ...
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
# let's save only couple of parameters at each time step
weights = torch.zeros(num_time_steps, num_select_weights)
for i in range(num_time_steps):
_, _ = train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=step_epoch, verbose=False)
# let's pratice some tensor navigations!
# access the first layer weights
# and index the first column
# and slice upto num_select_weights paramaeters
weights[i] = ...
return weights
### Uncomment below to test your function
#num_select_weights = 10
#num_time_steps = 5
#step_epoch = 50
#weights = run_lazy_training(num_time_steps, num_select_weights, step_epoch)
#with plt.xkcd():
# for k in range(num_select_weights):
# weight = weights[:, k].detach()
# epochs = range(1, 1+num_time_steps*step_epoch, step_epoch)
# plt.plot(epochs, weight, label='weight #%d'%k)
# plt.xlabel('epochs')
# plt.legend()
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MLP/solutions/W1D3_Tutorial2_Solution_6da7a021.py)*Example output:* Section 2.1: Neural Tangent Kernels (NTKs)
###Code
#@title Video 2.1: Neural Tangent Kernels
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="rjbpZCSuMgQ", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
#@markdown How could we speed up lazy training?
w3_fast_NTK = '' #@param {type:"string"}
###Output
_____no_output_____
###Markdown
--- Section 3: Deeper MLPs
###Code
#@title Video 3: Data Augmentation
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="RtcJ8gn2kj0", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Exercise 3: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (RandomRotation)* Random horizontal flipping (RandomHorizontalFlip)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (ToTensor)* normalizes the input in the range [-1, 1] (Normalize)
###Code
def get_data_loaders(batch_size):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset, batch_size=batch_size,
shuffle=True, num_workers=1, worker_init_fn=seed_worker)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset, batch_size=batch_size,
shuffle=False, num_workers=1)
return img_train_loader, img_test_loader
### Uncomment below to test your function
#batch_size = 64
#img_train_loader, img_test_loader = get_data_loaders(batch_size)
## get some random training images
#dataiter = iter(img_train_loader)
#images, labels = dataiter.next()
## show images
#imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MLP/solutions/W1D3_Tutorial2_Solution_7d54bdfd.py)*Example output:*
###Code
#@title Video 3.1: Classifying Animal Faces
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="JHJQZCD0mhA", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# Train it
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(dev)
criterion = nn.MultiMarginLoss(margin=1.0)
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30)
#@title Video 3.1: Map Receptive Fields
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="YcnImVQtaqc", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
# visualize the feature map
fc1_weights = net.layers[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
#@markdown Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
w3_isHierarchical = '' #@param {type:"string"}
###Output
_____no_output_____
###Markdown
--- Section 4: The need for good initialization (optional)
###Code
#@title Video 4: Initialization
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="C7NdUgg40YY", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,$$o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j$$The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] & = \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] = \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \mathrm{Var}[o_i] & = E[o_i^2] - (E[o_i])^2 = \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 = \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] = n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$. We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to$$U\left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)$$This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following a similar steps,$$f(x)=\left\{\begin{array}{ll}a x & \text { for } x<0 \\x & \text { for } x \geq 0\end{array}\right.$$Considering a single layer with activation gives, The expectation of the output is still zero but the variance changes and assuming the probability $P(x < 0) = 0.5$\begin{split}\begin{aligned} \mathrm{Var}[f(o_i)] = E[f(o_i)^2] & = \frac{\mathrm{Var}[o_i] + a^2 \mathrm{Var}[o_i]}{2} = \frac{1+a^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}Therefore following the rest of derivation as before,$$\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \quad gain = \sqrt{\frac{2}{1+a^2}}$$ Exercise 4: Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
for gain in gains:
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = 'LeakyReLU(%f)'%negative_slope
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(dev)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=1, verbose=True)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test acc')
plt.plot(gains, train_accs, label='Train acc')
plt.scatter(best_gain, max(train_accs), label='best gain = %.1f'%best_gain, c='r')
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs), label='theoretical gain = %.2f'%theoretical_gain, c='g')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
--- Conclusion
###Code
#@title Video 5: Wrapping Up Day 3
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Jzc9Ua0isYI", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Submit your responsesPlease run the following cell and then press "Submit" so we can record your responses.
###Code
import time
import numpy as np
from IPython.display import IFrame
#@markdown #Run Cell to Show Airtable Form
#@markdown ##**Confirm your answers and then click "Submit"**
def prefill_form(src, fields: dict):
'''
src: the original src url to embed the form
fields: a dictionary of field:value pairs,
e.g. {"pennkey": my_pennkey, "location": my_location}
'''
prefills = "&".join(["prefill_%s=%s"%(key, fields[key]) for key in fields])
src = src + prefills
src = "+".join(src.split(" "))
return src
#autofill time if it is not present
try: t1;
except NameError: t1 = time.time()
try: t2;
except NameError: t2 = time.time()
try: t3;
except NameError: t3 = time.time()
try: t4;
except NameError: t4 = time.time()
try: t5;
except NameError: t5 = time.time()
try: t6;
except NameError: t6 = time.time()
try: t7;
except NameError: t7 = time.time()
#autofill fields if they are not present
#a missing pennkey and pod will result in an Airtable warning
#which is easily fixed user-side.
try: my_pennkey;
except NameError: my_pennkey = ""
try: my_pod;
except NameError: my_pod = "Select"
try: w3_with_non_linear_loss;
except NameError: w3_with_non_linear_loss = ""
try: w3_poly_OoD;
except NameError: w3_poly_OoD = ""
try: w3_why_three;
except NameError: w3_why_three = ""
try: w3_fast_NTK;
except NameError: w3_fast_NTK = ""
try: w3_isHierarchical;
except NameError: w3_isHierarchical = ""
try: w3_whichActiv;
except NameError: w3_whichActiv = ""
times = np.array([t2,t3,t4,t5,t6,t7])-t1
fields = {"pennkey": my_pennkey,
"pod": my_pod,
"w3_with_non_linear_loss":w3_with_non_linear_loss,
"w3_poly_OoD": w3_poly_OoD,
"w3_why_three":w3_why_three,
"w3_fast_NTK": w3_fast_NTK,
"w3_isHierarchical":w3_isHierarchical,
"w3_whichActiv":w3_whichActiv,
"cumulative_times": times}
src = "https://airtable.com/embed/shrElsLHM2gDYmFnl?"
#now instead of the original source url, we do: src = prefill_form(src, fields)
display(IFrame(src = prefill_form(src, fields), width = 800, height = 400))
###Output
_____no_output_____
###Markdown
FeedbackHow could this session have been better? How happy are you in your group? How do you feel right now?Feel free to use the embeded form below or use this link:https://airtable.com/shrNSJ5ECXhNhsYss
###Code
display(IFrame(src="https://airtable.com/embed/shrNSJ5ECXhNhsYss?backgroundColor=red", width = 800, height = 400))
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay, Kelson Shilling-Scrivo__Content editors:__ Gagana B, Kelson Shilling-Scrivo, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Kelson Shilling-Scrivo, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
These are the slides for all videos in this tutorial. If you want to locally download the slides, click [here](https://osf.io/ed65b/download). --- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2','https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import make_grid
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks*Time estimate: ~45 mins*
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_fd18a44e.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs*Time estimate: ~55 mins*
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with $10$ degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range $[0, 1]$ (`.ToTensor`)* normalizes the input in the range $[-1, 1]$ (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_5c80bef1.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q3', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: Ethical aspects*Time estimate: ~20 mins*
###Code
# @title Video 3: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 4: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus: The need for good initializationIn this section, we derive principles for initializing deep networks. We will see that if the weights are too large, then the forward propagation of signals will be chaotic, and the backpropagation of error gradients will explode. On the other hand, if the weights are too small, the forward propagation of signals will be ordered, and the backpropagation of error gradients will vanish. The key idea behind initialization is to choose the weights to be just right, i.e., at the edge between order and chaos. In this section, we derive this edge and show how to compute the correct initial variance of the weights. Many of the typical initialization schemes in existing deep learning frameworks implicitly employ this principle of initialization at the edge of chaos. So this section can be safely skipped on a first pass and **is a bonus section**.
###Code
# @title Video 5: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation} o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$. Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{align} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{align}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{align}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{align}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot and Bengio, 2010](https://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf). Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation} w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $\mathcal{U}(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$. Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay, Kelson Shilling-Scrivo__Content editors:__ Gagana B, Kelson Shilling-Scrivo, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Kelson Shilling-Scrivo, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
# @markdown If you want to locally download the slides, click [here](https://osf.io/ed65b/download)
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2','https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import make_grid
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks*Time estimate: ~45 mins*
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_7d616b5c.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs*Time estimate: ~55 mins*
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_9605a4e9.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q3', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: Ethical aspects*Time estimate: ~20 mins*
###Code
# @title Video 3: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 4: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus: The need for good initializationIn this section, we derive principles for initializing deep networks. We will see that if the weights are too large, then the forward propagation of signals will be chaotic, and the backpropagation of error gradients will explode. On the other hand, if the weights are too small, the forward propagation of signals will be ordered, and the backpropagation of error gradients will vanish. The key idea behind initialization is to choose the weights to be just right, i.e., at the edge between order and chaos. In this section, we derive this edge and show how to compute the correct initial variance of the weights. Many of the typical initialization schemes in existing deep learning frameworks implicitly employ this principle of initialization at the edge of chaos. So this section can be safely skipped on a first pass and **is a bonus section**.
###Code
# @title Video 5: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay, Kelson Shilling-Scrivo__Content editors:__ Gagana B, Kelson Shilling-Scrivo, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Kelson Shilling-Scrivo, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
# @markdown If you want to locally download the slides, click [here](https://osf.io/ed65b/download)
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2','https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import make_grid
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks*Time estimate: ~45 mins*
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_7d616b5c.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs*Time estimate: ~55 mins*
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_9605a4e9.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q3', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: Ethical aspects*Time estimate: ~20 mins*
###Code
# @title Video 3: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 4: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus: The need for good initializationIn this section, we derive principles for initializing deep networks. We will see that if the weights are too large, then the forward propagation of signals will be chaotic, and the backpropagation of error gradients will explode. On the other hand, if the weights are too small, the forward propagation of signals will be ordered, and the backpropagation of error gradients will vanish. The key idea behind initialization is to choose the weights to be just right, i.e., at the edge between order and chaos. In this section, we derive this edge and show how to compute the correct initial variance of the weights. Many of the typical initialization schemes in existing deep learning frameworks implicitly employ this principle of initialization at the edge of chaos. So this section can be safely skipped on a first pass and **is a bonus section**.
###Code
# @title Video 5: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay__Content editors:__ Gagana B, Spiros Chavlis, Kelson Shilling-Scrivo__Production editors:__ Anoop Kulkarni, Spiros Chavlis, Kelson Shilling-Scrivo **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from torchvision.utils import make_grid
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2',
'https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_7d616b5c.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_9605a4e9.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer(q3, text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: The need for good initialization
###Code
# @title Video 3: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Section 3.2: Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
--- Section 4: Ethical aspects
###Code
# @title Video 4: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 5: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay__Content editors:__ Gagana B, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from torchvision.utils import make_grid
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_04a1c48b.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_a50bebe8.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: The need for good initialization
###Code
# @title Video 3: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Section 3.2: Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
--- Section 4: Ethical aspects
###Code
# @title Video 4: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 5: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay, Kelson Shilling-Scrivo__Content editors:__ Gagana B, Kelson Shilling-Scrivo, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Kelson Shilling-Scrivo, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from torchvision.utils import make_grid
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2',
'https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks*Time estimate: ~45 mins*
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_7d616b5c.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs*Time estimate: ~55 mins*
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_9605a4e9.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer(q3, text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: The need for good initialization*Time estimate: ~20 mins*
###Code
# @title Video 3: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Section 3.2: Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
--- Section 4: Ethical aspects*Time estimate: ~20 mins*
###Code
# @title Video 4: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 5: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay__Content editors:__ B Gagana, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup
###Code
# Imports
import random
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from torchvision.utils import make_grid
from IPython.display import HTML, display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def progress(epoch, loss, epochs=100):
return HTML("""
<label for="file">Training loss: {loss}</label>
<progress
value='{epoch}'
max='{epochs}',
style='width: 100%'
>
{epoch}
</progress>
""".format(loss=loss, epoch=epoch, epochs=epochs))
# @title Dataset download
from IPython.display import clear_output
!rm -r AnimalFaces32x32/
!git clone https://github.com/arashash/AnimalFaces32x32
!rm -r afhq/
!unzip ./AnimalFaces32x32/afhq_32x32.zip
clear_output()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("WARNING: For this notebook to perform best, "
"if possible, in the menu under `Runtime` -> "
"`Change runtime type.` select `GPU` ")
else:
print("GPU is enabled in this notebook.")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Tutorial 1 Codes
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module('Linear_%d'%i, layer) # append layer to the model with a name
actv_layer = eval('nn.%s'%actv) # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module('Activation_%d'%i, actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
SEED = 2021
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
# Shuffling
shuffled_indeces = torch.randperm(K*N)
X = X[shuffled_indeces]
y = y[shuffled_indeces]
# Test Train splitting
test_size = int(0.2*N)
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
def train_test_classification(net, criterion, optimizer,
train_loader, test_loader,
device, num_epochs=1, verbose=True,
training_plot=False):
if verbose:
progress_bar = display(progress(0, 0, num_epochs), display_id=True)
net.train()
training_losses = []
for epoch in range(num_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
running_loss += loss.item()
if i % 10 == 9: # update every 10 mini-batches
progress_bar.update(progress(epoch+1, running_loss / 10, num_epochs))
running_loss = 0.0
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print('Accuracy on the %d training samples: %0.2f %%' % (train_total, train_acc))
print('Accuracy on the %d testing samples: %0.2f %%' % (test_total, test_acc))
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
def sample_grid(M=500, x_max = 2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test, M=500, x_max = 2.0, eps = 1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
DEVICE, num_epochs=100)
test_scores += [test_acc]
return hidden_layers, test_scores
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_67b86937.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device, seed):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader, DEVICE,
num_epochs=100)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print('Number of features: %d'%num_features)
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (RandomRotation)* Random horizontal flipping (RandomHorizontalFlip)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (ToTensor)* normalizes the input in the range [-1, 1] (Normalize)
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_a50bebe8.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader, DEVICE,
num_epochs=30)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: The need for good initialization
###Code
# @title Video 3: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Section 3.2: Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!
###Code
set_seed(seed=SEED)
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
for gain in gains:
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = 'LeakyReLU(%f)'%negative_slope
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
DEVICE, num_epochs=1,
verbose=True)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test acc')
plt.plot(gains, train_accs, label='Train acc')
plt.scatter(best_gain, max(train_accs),
label=f'best gain = {best_gain:.1f}',
c='r')
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain = {theoretical_gain:.2f}',
c='g')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
--- Section 4: Ethical aspects
###Code
# @title Video 4: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 5: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay, Kelson Shilling-Scrivo__Content editors:__ Gagana B, Kelson Shilling-Scrivo, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Kelson Shilling-Scrivo, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2','https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import make_grid
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks*Time estimate: ~45 mins*
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_7d616b5c.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs*Time estimate: ~55 mins*
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_9605a4e9.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer(q3, text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: Ethical aspects*Time estimate: ~20 mins*
###Code
# @title Video 3: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 4: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus: The need for good initializationIn this section, we derive principles for initializing deep networks. We will see that if the weights are too large, then the forward propagation of signals will be chaotic, and the backpropagation of error gradients will explode. On the other hand, if the weights are too small, the forward propagation of signals will be ordered, and the backpropagation of error gradients will vanish. The key idea behind initialization is to choose the weights to be just right, i.e., at the edge between order and chaos. In this section, we derive this edge and show how to compute the correct initial variance of the weights. Many of the typical initialization schemes in existing deep learning frameworks implicitly employ this principle of initialization at the edge of chaos. So this section can be safely skipped on a first pass and **is a bonus section**.
###Code
# @title Video 5: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____
###Markdown
Tutorial 2: Deep MLPs**Week 1, Day 3: Multi Layer Perceptrons****By Neuromatch Academy**__Content creators:__ Arash Ash, Surya Ganguli__Content reviewers:__ Saeed Salehi, Felix Bartsch, Yu-Fang Yang, Melvin Selim Atay, Kelson Shilling-Scrivo__Content editors:__ Gagana B, Kelson Shilling-Scrivo, Spiros Chavlis__Production editors:__ Anoop Kulkarni, Kelson Shilling-Scrivo, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this tutorial, we will dive deeper into MLPs and see more of their mathematical and practical aspects. Today we are going to see why MLPs:* can be deep or wide* dependant on transfer functions* sensitive to initialization
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ed65b/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- SetupThis is a GPU free notebook!
###Code
# @title Install dependencies
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
atform = AirtableForm('appn7VdPRseSoMXEG','W1D3_T2','https://portal.neuromatchacademy.org/api/redirect/to/49e16345-65a5-4616-ba63-568ca06cab78')
# Imports
import pathlib
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import make_grid
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, TensorDataset
from tqdm.auto import tqdm
from IPython.display import display
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions (MLP Tutorial 1 Codes)
# @markdown `Net(nn.Module)`
class Net(nn.Module):
def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):
super(Net, self).__init__()
self.input_feature_num = input_feature_num # save the input size for reshapinng later
self.mlp = nn.Sequential() # Initialize layers of MLP
in_num = input_feature_num # initialize the temporary input feature to each layer
for i in range(len(hidden_unit_nums)): # Loop over layers and create each one
out_num = hidden_unit_nums[i] # assign the current layer hidden unit from list
layer = nn.Linear(in_num, out_num) # use nn.Linear to define the layer
in_num = out_num # assign next layer input using current layer output
self.mlp.add_module(f"Linear_{i}", layer) # append layer to the model with a name
actv_layer = eval(f"nn.{actv}") # Assign activation function (eval allows us to instantiate object from string)
self.mlp.add_module(f"Activation_{i}", actv_layer) # append activation to the model with a name
out_layer = nn.Linear(in_num, output_feature_num) # Create final layer
self.mlp.add_module('Output_Linear', out_layer) # append the final layer
def forward(self, x):
# reshape inputs to (batch_size, input_feature_num)
# just in case the input vector is not 2D, like an image!
x = x.view(-1, self.input_feature_num)
logits = self.mlp(x) # forward pass of MLP
return logits
# @markdown `train_test_classification(net, criterion, optimizer, train_loader, test_loader, num_epochs=1, verbose=True, training_plot=False)`
def train_test_classification(net, criterion, optimizer, train_loader,
test_loader, num_epochs=1, verbose=True,
training_plot=False, device='cpu'):
net.to(device)
net.train()
training_losses = []
for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
if verbose:
training_losses += [loss.item()]
net.eval()
def test(data_loader):
correct = 0
total = 0
for data in data_loader:
inputs, labels = data
inputs = inputs.to(device).float()
labels = labels.to(device).long()
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
return total, acc
train_total, train_acc = test(train_loader)
test_total, test_acc = test(test_loader)
if verbose:
print(f'\nAccuracy on the {train_total} training samples: {train_acc:0.2f}')
print(f'Accuracy on the {test_total} testing samples: {test_acc:0.2f}\n')
if training_plot:
plt.plot(training_losses)
plt.xlabel('Batch')
plt.ylabel('Training loss')
plt.show()
return train_acc, test_acc
# @markdown `shuffle_and_split_data(X, y, seed)`
def shuffle_and_split_data(X, y, seed):
# set seed for reproducibility
torch.manual_seed(seed)
# Number of samples
N = X.shape[0]
# Shuffle data
shuffled_indices = torch.randperm(N) # get indices to shuffle data, could use torch.randperm
X = X[shuffled_indices]
y = y[shuffled_indices]
# Split data into train/test
test_size = int(0.2 * N) # assign test datset size using 20% of samples
X_test = X[:test_size]
y_test = y[:test_size]
X_train = X[test_size:]
y_train = y[test_size:]
return X_test, y_test, X_train, y_train
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def sample_grid(M=500, x_max=2.0):
ii, jj = torch.meshgrid(torch.linspace(-x_max, x_max,M),
torch.linspace(-x_max, x_max, M))
X_all = torch.cat([ii.unsqueeze(-1),
jj.unsqueeze(-1)],
dim=-1).view(-1, 2)
return X_all
def plot_decision_map(X_all, y_pred, X_test, y_test,
M=500, x_max=2.0, eps=1e-3):
decision_map = torch.argmax(y_pred, dim=1)
for i in range(len(X_test)):
indeces = (X_all[:, 0] - X_test[i, 0])**2 + (X_all[:, 1] - X_test[i, 1])**2 < eps # [TO-DO]
decision_map[indeces] = (K + y_test[i]).long()
decision_map = decision_map.view(M, M).cpu()
plt.imshow(decision_map, extent=[-x_max, x_max, -x_max, x_max], cmap='jet')
plt.plot()
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# @title Download of the Animal Faces dataset
# @markdown Animal faces consists of 16,130 32x32 images belonging to 3 classes
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'AnimalFaces32x32'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f"./{name}")
if os.path.exists(fname):
os.remove(fname)
else:
print(f"The file {fname} does not exist")
os.chdir(name)
print("Download completed.")
# @title Data Loader
# @markdown Execute this cell!
K = 4
sigma = 0.4
N = 1000
t = torch.linspace(0, 1, N)
X = torch.zeros(K*N, 2)
y = torch.zeros(K*N)
for k in range(K):
X[k*N:(k+1)*N, 0] = t*(torch.sin(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
X[k*N:(k+1)*N, 1] = t*(torch.cos(2*np.pi/K*(2*t+k)) + sigma**2*torch.randn(N)) # [TO-DO]
y[k*N:(k+1)*N] = k
X_test, y_test, X_train, y_train = shuffle_and_split_data(X, y, seed=SEED)
# DataLoader with random seed
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
test_data = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size,
shuffle=False, num_workers=0,
worker_init_fn=seed_worker,
generator=g_seed,
)
train_data = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_data,
batch_size=batch_size,
drop_last=True,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed,
)
###Output
_____no_output_____
###Markdown
--- Section 1: Wider vs deeper networks*Time estimate: ~45 mins*
###Code
# @title Video 1: Deep Expressivity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19f4y157vG", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"g8JuGrNk9ag", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Deep Expressivity')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Wide vs. Deep while keeping number of parameters sameLet's find the optimal number of hidden layers under a fixed number of parameters constraint!But first, we need a model parameter counter. You could iterate over the model layers by calling `.parameters()` and then use `.numel()` to count the layer parameters. Also, you can use [`requires_grad`](https://pytorch.org/docs/stable/notes/autograd.html) attribute to make sure it's a trainable parameter. E.g.,```pythonx = torch.ones(10, 5, requires_grad=True)```After defining the counter function, we will step by step increase the depth and then iterate over the possible number of hidden units (assuming same for all hidden layers); then using our parameter counter choose the number of hidden units that results in overall close to `max_par_count` parameters.
###Code
def run_depth_optimizer(max_par_count, max_hidden_layer, device):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the depth optimizer function")
###################################################################
def count_parameters(model):
par_count = 0
for p in model.parameters():
if p.requires_grad:
par_count += ...
return par_count
# number of hidden layers to try
hidden_layers = ...
# test test score list
test_scores = []
for hidden_layer in hidden_layers:
# Initialize the hidden units in each hidden layer to be 1
hidden_units = np.ones(hidden_layer, dtype=np.int)
# Define the the with hidden units equal to 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = count_parameters(wide_net)
# increment hidden_units and repeat until the par_count reaches the desired count
while par_count < max_par_count:
hidden_units += 1
wide_net = Net('ReLU()', X_train.shape[1], hidden_units, K).to(device)
par_count = ...
# Train it
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(wide_net.parameters(), lr=1e-3)
_, test_acc = train_test_classification(wide_net, criterion, optimizer,
train_loader, test_loader,
num_epochs=100, device=device)
test_scores += [test_acc]
return hidden_layers, test_scores
# add event to airtable
atform.add_event('Coding Exercise 1: Wide vs. Deep ')
set_seed(seed=SEED)
max_par_count = 100
max_hidden_layer = 5
## Uncomment below to test your function
# hidden_layers, test_scores = run_depth_optimizer(max_par_count, max_hidden_layer, DEVICE)
# plt.xlabel('# of hidden layers')
# plt.ylabel('Test accuracy')
# plt.plot(hidden_layers, test_scores)
# plt.show()
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_7d616b5c.py)*Example output:* Think! 1: Why the tradeoff?Here we see that there is a particular number of hidden layers that is optimum. Why do you think increasing hidden layers after a certain point hurt in this scenario?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1' , text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_4c626e50.py) Section 1.1: Where Wide FailsLet's use the same Spiral dataset generated before with two features. And then add more polynomial features (which makes the first layer wider). And finally, train a single Linear layer. We could use the same MLP network with no hidden layers (though it would not be called an MLP anymore!).Note that we will add polynomial terms upto $P=50$ which means that for every $x_1^n x_2^m$ term, $n+m\leq P$. Now it's fun math excercise to prove why the total number of polynomial features upto $P$ becomes,\begin{equation}\text{ of terms} = \frac{(P+1)(P+2)}{2}\end{equation}Also, we don't need the polynomial term with degree zero (which is the constatnt term) since `nn.Linear` layers have bias terms. Therefore we will have one fewer polynomial feature.
###Code
def run_poly_clasification(poly_degree, device='cpu', seed=0):
def make_poly_features(poly_degree, X):
# Define the number of polynomial features except the bias term
num_features = (poly_degree + 1)*(poly_degree + 2) // 2 - 1
poly_X = torch.zeros((X.shape[0], num_features))
count = 0
for i in range(poly_degree+1):
for j in range(poly_degree+1):
# no need to add zero degree since model has biases
if j + i > 0:
if j + i <= poly_degree:
# Define the polynomial term
poly_X[:, count] = X[:, 0]**i * X [:, 1]**j
count += 1
return poly_X, num_features
poly_X_test, num_features = make_poly_features(poly_degree, X_test)
poly_X_train, _ = make_poly_features(poly_degree, X_train)
batch_size = 128
g_seed = torch.Generator()
g_seed.manual_seed(seed)
poly_test_data = TensorDataset(poly_X_test, y_test)
poly_test_loader = DataLoader(poly_test_data,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
poly_train_data = TensorDataset(poly_X_train, y_train)
poly_train_loader = DataLoader(poly_train_data,
batch_size=batch_size,
shuffle=True,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
# define a linear model using MLP class
poly_net = Net('ReLU()', num_features, [], K).to(device)
# Train it!
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(poly_net.parameters(), lr=1e-3)
_, _ = train_test_classification(poly_net, criterion, optimizer,
poly_train_loader, poly_test_loader,
num_epochs=100, device=DEVICE)
# Test it
X_all = sample_grid().to(device)
poly_X_all, _ = make_poly_features(poly_degree, X_all)
y_pred = poly_net(poly_X_all.to(device))
# Plot it
plot_decision_map(X_all.cpu(), y_pred.cpu(), X_test.cpu(), y_test.cpu())
plt.show()
return num_features
set_seed(seed=SEED)
max_poly_degree = 50
num_features = run_poly_clasification(max_poly_degree, DEVICE, SEED)
print(f'Number of features: {num_features}')
###Output
_____no_output_____
###Markdown
Think! 1.1: Does it generalize well?Do you think this model is performing well outside its training distribution? Why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_13c53198.py) --- Section 2: Deeper MLPs*Time estimate: ~55 mins*
###Code
# @title Video 2: Case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1FL411n7SH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"3g_OJ6dYE8E", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Case study')
display(out)
###Output
_____no_output_____
###Markdown
Coding Exercise 2: Dataloader on a real-world datasetLet's build our first real-world dataset loader with Data Preprocessing and Augmentation! And we will use the Torchvision transforms to do it.We'd like to have a simple data augmentation with the following steps:* Random rotation with 10 degrees (`.RandomRotation`)* Random horizontal flipping (`.RandomHorizontalFlip`)and we'd like a preprocessing that:* makes Pytorch tensors in the range [0, 1] (`.ToTensor`)* normalizes the input in the range [-1, 1] (.`Normalize`)**Hint:** For more info on transform, see the [official documentation](https://pytorch.org/vision/stable/transforms.html).
###Code
def get_data_loaders(batch_size, seed):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define the get data loaders function")
###################################################################
# define the transform done only during training
augmentation_transforms = ...
# define the transform done in training and testing (after augmentation)
preprocessing_transforms = ...
# compose them together
train_transform = transforms.Compose(augmentation_transforms + preprocessing_transforms)
test_transform = transforms.Compose(preprocessing_transforms)
# using pathlib to be compatible with all OS's
data_path = pathlib.Path('.')/'afhq'
# define the dataset objects (they can load one by one)
img_train_dataset = ImageFolder(data_path/'train', transform=train_transform)
img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)
g_seed = torch.Generator()
g_seed.manual_seed(seed)
# define the dataloader objects (they can load batch by batch)
img_train_loader = DataLoader(img_train_dataset,
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed_worker,
generator=g_seed)
# num_workers can be set to higher if running on Colab Pro TPUs to speed up,
# with more than one worker, it will do multithreading to queue batches
img_test_loader = DataLoader(img_test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
worker_init_fn=seed_worker,
generator=g_seed)
return img_train_loader, img_test_loader
# add event to airtable
atform.add_event('Coding Exercise 2: Dataloader on a real-world dataset')
batch_size = 64
set_seed(seed=SEED)
## Uncomment below to test your function
# img_train_loader, img_test_loader = get_data_loaders(batch_size, SEED)
## get some random training images
# dataiter = iter(img_train_loader)
# images, labels = dataiter.next()
## show images
# imshow(make_grid(images, nrow=8))
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_9605a4e9.py)*Example output:*
###Code
# Train it
set_seed(seed=SEED)
net = Net('ReLU()', 3*32*32, [64, 64, 64], 3).to(DEVICE)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
_, _ = train_test_classification(net, criterion, optimizer,
img_train_loader, img_test_loader,
num_epochs=30, device=DEVICE)
# visualize the feature map
fc1_weights = net.mlp[0].weight.view(64, 3, 32, 32).detach().cpu()
fc1_weights /= torch.max(torch.abs(fc1_weights))
imshow(make_grid(fc1_weights, nrow=8))
###Output
_____no_output_____
###Markdown
Think! 2: why first layer features are high level?Even though it's three layers deep, we see distinct animal faces in the first layer feature map. Do you think this MLP has a hierarchical feature representation? why?
###Code
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q3', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial2_Solution_eb2e554f.py) --- Section 3: Ethical aspects*Time estimate: ~20 mins*
###Code
# @title Video 3: Ethics: Hype in AI
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1CP4y1s712", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ou35QzsKsdc", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Ethics: Hype in AI')
display(out)
###Output
_____no_output_____
###Markdown
--- Summary In the second tutorial of this day, we have dived deeper into MLPs and seen more of their mathematical and practical aspects. More specifically, we have learned about different architectures, i.e., deep, wide, and how they are dependent on the transfer function used. Also, we have learned about the importance of initialization, and we mathematically analyzed two methods for smart initialization.
###Code
# @title Video 4: Outro
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Kb4y1r76G", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"2sEPw4sSfSw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Outro')
display(out)
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link end of day Survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus: The need for good initializationIn this section, we derive principles for initializing deep networks. We will see that if the weights are too large, then the forward propagation of signals will be chaotic, and the backpropagation of error gradients will explode. On the other hand, if the weights are too small, the forward propagation of signals will be ordered, and the backpropagation of error gradients will vanish. The key idea behind initialization is to choose the weights to be just right, i.e., at the edge between order and chaos. In this section, we derive this edge and show how to compute the correct initial variance of the weights. Many of the typical initialization schemes in existing deep learning frameworks implicitly employ this principle of initialization at the edge of chaos. So this section can be safely skipped on a first pass and **is a bonus section**.
###Code
# @title Video 5: Need for Good Initialization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Qq4y1H7Px", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"W0V2kwHSuUI", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 5: Need for Good Initialization')
display(out)
###Output
_____no_output_____
###Markdown
Xavier initializationLet us look at the scale distribution of an output (e.g., a hidden variable) $o_i$ for some fully-connected layer without nonlinearities. With $n_{in}$ inputs ($x_j$) and their associated weights $w_{ij}$ for this layer. Then an output is given by,\begin{equation}o_{i} = \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\end{equation}The weights $w_{ij}$ are all drawn independently from the same distribution. Furthermore, let us assume that this distribution has zero mean and variance $\sigma^2$ . Note that this does not mean that the distribution has to be Gaussian, just that the mean and variance need to exist. For now, let us assume that the inputs to the layer $x_j$ also have zero mean and variance $\gamma^2$ and that they are independent of $w_{ij}$ and independent of each other. In this case, we can compute the mean and variance of $o_i$ as follows:\begin{split}\begin{aligned} E[o_i] &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij} x_j] \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w_{ij}] E[x_j] = 0, \\ \\ \\ \mathrm{Var}[o_i] &= E[o_i^2] - (E[o_i])^2 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\ \\ &= \sum_{j=1}^{n_\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\ \\ &= n_\mathrm{in} \sigma^2 \gamma^2\end{aligned}\end{split}One way to keep the variance fixed is to set $n_{in}\sigma^2=1$ . Now consider backpropagation. There we face a similar problem, albeit with gradients being propagated from the layers closer to the output. Using the same reasoning as for forward propagation, we see that the gradients’ variance can blow up unless $n_{out}\sigma^2=1$ , where $n_{out}$ is the number of outputs of this layer. This leaves us in a dilemma: we cannot possibly satisfy both conditions simultaneously. Instead, we simply try to satisfy:\begin{aligned}\frac{1}{2} (n_\mathrm{in} + n_\mathrm{out}) \sigma^2 = 1 \text{ or equivalently }\sigma = \sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}\end{aligned}This is the reasoning underlying the now-standard and practically beneficial Xavier initialization, named after the first author of its creators [Glorot & Bengio, 2010]. Typically, the Xavier initialization samples weights from a Gaussian distribution with zero mean and variance $\sigma^2=\frac{2}{(n_{in}+n_{out})}$,\begin{equation}w_{ij} \sim \mathcal{N} \left (\mu=0, \sigma=\sqrt{\frac{2}{(n_{in}+n_{out})}} \right)\end{equation}We can also adapt Xavier’s intuition to choose the variance when sampling weights from a uniform distribution. Note that the uniform distribution $U(−a,a)$ has variance $\frac{a^2}{3}$. Plugging this into our condition on $\sigma^2$ yields the suggestion to initialize according to\begin{equation}w_{ij} \sim \mathcal{U} \left(-\sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}, \sqrt{\frac{6}{n_\mathrm{in} + n_\mathrm{out}}}\right)\end{equation}This explanation is mainly taken from [here](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html). If you want to see more about initializations and their differences see [here](https://www.deeplearning.ai/ai-notes/initialization/). Initialization with transfer functionLet's derive the optimal gain for LeakyReLU following similar steps.LeakyReLU is described mathematically:\begin{equation}f(x)=\left\{ \begin{array}{ll} \alpha \cdot x & \text { for } x<0 \\ x & \text { for } x \geq 0 \end{array}\right.\end{equation}where $\alpha$ controls the angle of the negative slope.Considering a single layer with this activation function gives,\begin{align}o_{i} &= \sum_{j=1}^{n_\mathrm{in}} w_{ij} x_j\\z_{i} &= f\left( o_{i} \right)\end{align}where $z_i$ denotes the activation of node $i$.The expectation of the output is still zero, i.e., $\mathbb{E}[f(o_i)=0]$, but the variance changes, and assuming that the probability $P(x < 0) = 0.5$, we have that:\begin{align}\mathrm{Var}[f(o_i)] &= \mathbb{E}[f(o_i)^2] - \left( \mathbb{E}[f(o_i)] \right)^{2} \\ \\&= \frac{\mathrm{Var}[o_i] + \alpha^2 \mathrm{Var}[o_i]}{2} \\ \\&= \frac{1+\alpha^2}{2}n_\mathrm{in} \sigma^2 \gamma^2\end{align}where $\gamma$ is the variance of the distribution of the inputs $x_j$ and $\sigma$ is the variance of the distribution of weights $w_{ij}$, as before.Therefore, following the rest of derivation as before,\begin{equation}\sigma = gain\sqrt{\frac{2}{n_\mathrm{in} + n_\mathrm{out}}}, \, \text{where} \,\, gain = \sqrt{\frac{2}{1+\alpha^2}}\end{equation}As we can see from the derived formula of $\sigma$, the transfer function we choose is related with the variance of the distribution of the weights. As the negative slope of the LeakyReLU $\alpha$ becomes larger, the $gain$ becomes smaller and thus, the distribution of the weights is narrower. On the other hand, as $\alpha$ becomes smaller and smaller, the distribution of the weights is wider. Recall that, we initialize our weights, for example, by sampling from a normal distribution with zero mean and variance $\sigma^2$, Best gain for Xavier Initialization with Leaky ReLUYou're probably running out of time, so let me explain what's happening here. We derived a theoretical gain for initialization. But the question is whether it holds in practice? Here we have a setup to confirm our finding. We will try a range of gains and see the empirical optimum and whether it matches our theoretical value!If you have time left, you can change the distribution to sample the initial weights from a uniform distribution. Comment out line 11 and uncomment line 12.
###Code
N = 10 # number of trials
gains = np.linspace(1/N, 3.0, N)
test_accs = []
train_accs = []
mode = 'uniform'
for gain in gains:
print(f'\ngain: {gain}')
def init_weights(m, mode='normal'):
if type(m) == nn.Linear:
torch.nn.init.xavier_normal_(m.weight, gain)
# torch.nn.init.xavier_uniform_(m.weight, gain)
negative_slope = 0.1
actv = f'LeakyReLU({negative_slope})'
set_seed(seed=SEED)
net = Net(actv, 3*32*32, [128, 64, 32], 3).to(DEVICE)
net.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2)
train_acc, test_acc = train_test_classification(net, criterion, optimizer,
img_train_loader,
img_test_loader,
num_epochs=1,
verbose=True,
device=DEVICE)
test_accs += [test_acc]
train_accs += [train_acc]
best_gain = gains[np.argmax(train_accs)]
plt.plot(gains, test_accs, label='Test accuracy')
plt.plot(gains, train_accs, label='Train accuracy')
plt.scatter(best_gain, max(train_accs),
label=f'best gain={best_gain:.1f}',
c='k', marker ='x')
# calculate and plot the theoretical gain
theoretical_gain = np.sqrt(2.0 / (1 + negative_slope ** 2))
plt.scatter(theoretical_gain, max(train_accs),
label=f'theoretical gain={theoretical_gain:.2f}',
c='g', marker ='x')
plt.legend()
plt.plot()
###Output
_____no_output_____ |
2. Introduction to Python/Practice/6.OOP_code_inheritance_probability_distributions/inheritance_probability_distribution.ipynb | ###Markdown
Inheritance with the Gaussian ClassTo give another example of inheritance, take a look at the code in this Jupyter notebook. The Gaussian distribution code is refactored into a generic Distribution class and a Gaussian distribution class. Read through the code in this Jupyter notebook to see how the code works.The Distribution class takes care of the initialization and the read_data_file method. Then the rest of the Gaussian code is in the Gaussian class. You'll later use this Distribution class in an exercise at the end of the lesson.Run the code in each cell of this Jupyter notebook. This is a code demonstration, so you do not need to write any code.
###Code
class Distribution:
def __init__(self, mu=0, sigma=1):
""" Generic distribution class for calculating and
visualizing a probability distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
self.mean = mu
self.stdev = sigma
self.data = []
def read_data_file(self, file_name):
"""Function to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
Args:
file_name (string): name of a file to read from
Returns:
None
"""
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
import math
import matplotlib.pyplot as plt
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# initialize two gaussian distributions
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 2)
# initialize a third gaussian distribution reading in a data efile
gaussian_three = Gaussian()
gaussian_three.read_data_file('numbers.txt')
gaussian_three.calculate_mean()
gaussian_three.calculate_stdev()
# print out the mean and standard deviations
print(gaussian_one.mean)
print(gaussian_two.mean)
print(gaussian_one.stdev)
print(gaussian_two.stdev)
print(gaussian_three.mean)
print(gaussian_three.stdev)
# plot histogram of gaussian three
gaussian_three.plot_histogram_pdf()
# add gaussian_one and gaussian_two together
gaussian_one + gaussian_two
###Output
_____no_output_____ |
notebooks/biocoding_2021_pythonlab_03.ipynb | ###Markdown
Review of String work, and moving on to lists Let's start off with a small challenge to refresh our skills from the previous notebook. Below is some broken code/incomplete; complete the challenge by fixing it so that we print generate the double-stranded DNA sequence of the hiv 'nef' gene Fix the broken code in each cell
###Code
# store the hiv genome as a variable
hiv_genome_rna = uggaagggcuaauucacucccaacgaagacaagauauccuugaucuguggaucuaccacacacaaggcuacuucccugauuagcagaacuacacaccagggccagggaucagauauccacugaccuuuggauggugcuacaagcuaguaccaguugagccagagaaguuagaagaagccaacaaaggagagaacaccagcuuguuacacccugugagccugcauggaauggaugacccggagagagaaguguuagaguggagguuugacagccgccuagcauuucaucacauggcccgagagcugcauccggaguacuucaagaacugcugacaucgagcuugcuacaagggacuuuccgcuggggacuuuccagggaggcguggccugggcgggacuggggaguggcgagcccucagauccugcauauaagcagcugcuuuuugccuguacugggucucucugguuagaccagaucugagccugggagcucucuggcuaacuagggaacccacugcuuaagccucaauaaagcuugccuugagugcuucaaguagugugugcccgucuguugugugacucugguaacuagagaucccucagacccuuuuagucaguguggaaaaucucuagcaguggcgcccgaacagggaccugaaagcgaaagggaaaccagaggagcucucucgacgcaggacucggcuugcugaagcgcgcacggcaagaggcgaggggcggcgacuggugaguacgccaaaaauuuugacuagcggaggcuagaaggagagagaugggugcgagagcgucaguauuaagcgggggagaauuagaucgaugggaaaaaauucgguuaaggccagggggaaagaaaaaauauaaauuaaaacauauaguaugggcaagcagggagcuagaacgauucgcaguuaauccuggccuguuagaaacaucagaaggcuguagacaaauacugggacagcuacaaccaucccuucagacaggaucagaagaacuuagaucauuauauaauacaguagcaacccucuauugugugcaucaaaggauagagauaaaagacaccaaggaagcuuuagacaagauagaggaagagcaaaacaaaaguaagaaaaaagcacagcaagcagcagcugacacaggacacagcaaucaggucagccaaaauuacccuauagugcagaacauccaggggcaaaugguacaucaggccauaucaccuagaacuuuaaaugcauggguaaaaguaguagaagagaaggcuuucagcccagaagugauacccauguuuucagcauuaucagaaggagccaccccacaagauuuaaacaccaugcuaaacacaguggggggacaucaagcagccaugcaaauguuaaaagagaccaucaaugaggaagcugcagaaugggauagagugcauccagugcaugcagggccuauugcaccaggccagaugagagaaccaaggggaagugacauagcaggaacuacuaguacccuucaggaacaaauaggauggaugacaaauaauccaccuaucccaguaggagaaauuuauaaaagauggauaauccugggauuaaauaaaauaguaagaauguauagcccuaccagcauucuggacauaagacaaggaccaaaggaacccuuuagagacuauguagaccgguucuauaaaacucuaagagccgagcaagcuucacaggagguaaaaaauuggaugacagaaaccuuguugguccaaaaugcgaacccagauuguaagacuauuuuaaaagcauugggaccagcggcuacacuagaagaaaugaugacagcaugucagggaguaggaggacccggccauaaggcaagaguuuuggcugaagcaaugagccaaguaacaaauucagcuaccauaaugaugcagagaggcaauuuuaggaaccaaagaaagauuguuaaguguuucaauuguggcaaagaagggcacacagccagaaauugcagggccccuaggaaaaagggcuguuggaaauguggaaaggaaggacaccaaaugaaagauuguacugagagacaggcuaauuuuuuagggaagaucuggccuuccuacaagggaaggccagggaauuuucuucagagcagaccagagccaacagccccaccagaagagagcuucaggucugggguagagacaacaacucccccucagaagcaggagccgauagacaaggaacuguauccuuuaacuucccucaggucacucuuuggcaacgaccccucgucacaauaaagauaggggggcaacuaaaggaagcucuauuagauacaggagcagaugauacaguauuagaagaaaugaguuugccaggaagauggaaaccaaaaaugauagggggaauuggagguuuuaucaaaguaagacaguaugaucagauacucauagaaaucuguggacauaaagcuauagguacaguauuaguaggaccuacaccugucaacauaauuggaagaaaucuguugacucagauugguugcacuuuaaauuuucccauuagcccuauugagacuguaccaguaaaauuaaagccaggaauggauggcccaaaaguuaaacaauggccauugacagaagaaaaaauaaaagcauuaguagaaauuuguacagagauggaaaaggaagggaaaauuucaaaaauugggccugaaaauccauacaauacuccaguauuugccauaaagaaaaaagacaguacuaaauggagaaaauuaguagauuucagagaacuuaauaagagaacucaagacuucugggaaguucaauuaggaauaccacaucccgcaggguuaaaaaagaaaaaaucaguaacaguacuggaugugggugaugcauauuuuucaguucccuuagaugaagacuucaggaaguauacugcauuuaccauaccuaguauaaacaaugagacaccagggauuagauaucaguacaaugugcuuccacagggauggaaaggaucaccagcaauauuccaaaguagcaugacaaaaaucuuagagccuuuuagaaaacaaaauccagacauaguuaucuaucaauacauggaugauuuguauguaggaucugacuuagaaauagggcagcauagaacaaaaauagaggagcugagacaacaucuguugagguggggacuuaccacaccagacaaaaaacaucagaaagaaccuccauuccuuuggauggguuaugaacuccauccugauaaauggacaguacagccuauagugcugccagaaaaagacagcuggacugucaaugacauacagaaguuaguggggaaauugaauugggcaagucagauuuacccagggauuaaaguaaggcaauuauguaaacuccuuagaggaaccaaagcacuaacagaaguaauaccacuaacagaagaagcagagcuagaacuggcagaaaacagagagauucuaaaagaaccaguacauggaguguauuaugacccaucaaaagacuuaauagcagaaauacagaagcaggggcaaggccaauggacauaucaaauuuaucaagagccauuuaaaaaucugaaaacaggaaaauaugcaagaaugaggggugcccacacuaaugauguaaaacaauuaacagaggcagugcaaaaaauaaccacagaaagcauaguaauauggggaaagacuccuaaauuuaaacugcccauacaaaaggaaacaugggaaacaugguggacagaguauuggcaagccaccuggauuccugagugggaguuuguuaauaccccucccuuagugaaauuaugguaccaguuagagaaagaacccauaguaggagcagaaaccuucuauguagauggggcagcuaacagggagacuaaauuaggaaaagcaggauauguuacuaauagaggaagacaaaaaguugucacccuaacugacacaacaaaucagaagacugaguuacaagcaauuuaucuagcuuugcaggauucgggauuagaaguaaacauaguaacagacucacaauaugcauuaggaaucauucaagcacaaccagaucaaagugaaucagaguuagucaaucaaauaauagagcaguuaauaaaaaaggaaaaggucuaucuggcauggguaccagcacacaaaggaauuggaggaaaugaacaaguagauaaauuagucagugcuggaaucaggaaaguacuauuuuuagauggaauagauaaggcccaagaugaacaugagaaauaucacaguaauuggagagcaauggcuagugauuuuaaccugccaccuguaguagcaaaagaaauaguagccagcugugauaaaugucagcuaaaaggagaagccaugcauggacaaguagacuguaguccaggaauauggcaacuagauuguacacauuuagaaggaaaaguuauccugguagcaguucauguagccaguggauauauagaagcagaaguuauuccagcagaaacagggcaggaaacagcauauuuucuuuuaaaauuagcaggaagauggccaguaaaaacaauacauacugacaauggcagcaauuucaccggugcuacgguuagggccgccuguuggugggcgggaaucaagcaggaauuuggaauucccuacaauccccaaagucaaggaguaguagaaucuaugaauaaagaauuaaagaaaauuauaggacagguaagagaucaggcugaacaucuuaagacagcaguacaaauggcaguauucauccacaauuuuaaaagaaaaggggggauugggggguacagugcaggggaaagaauaguagacauaauagcaacagacauacaaacuaaagaauuacaaaaacaaauuacaaaaauucaaaauuuucggguuuauuacagggacagcagaaauccacuuuggaaaggaccagcaaagcuccucuggaaaggugaaggggcaguaguaauacaagauaauagugacauaaaaguagugccaagaagaaaagcaaagaucauuagggauuauggaaaacagauggcaggugaugauuguguggcaaguagacaggaugaggauuagaacauggaaaaguuuaguaaaacaccauauguauguuucagggaaagcuaggggaugguuuuauagacaucacuaugaaagcccucauccaagaauaaguucagaaguacacaucccacuaggggaugcuagauugguaauaacaacauauuggggucugcauacaggagaaagagacuggcauuugggucagggagucuccauagaauggaggaaaaagagauauagcacacaaguagacccugaacuagcagaccaacuaauucaucuguauuacuuugacuguuuuucagacucugcuauaagaaaggccuuauuaggacacauaguuagcccuaggugugaauaucaagcaggacauaacaagguaggaucucuacaauacuuggcacuagcagcauuaauaacaccaaaaaagauaaagccaccuuugccuaguguuacgaaacugacagaggauagauggaacaagccccagaagaccaagggccacagagggagccacacaaugaauggacacuagagcuuuuagaggagcuuaagaaugaagcuguuagacauuuuccuaggauuuggcuccauggcuuagggcaacauaucuaugaaacuuauggggauacuugggcaggaguggaagccauaauaagaauucugcaacaacugcuguuuauccauuuucagaauugggugucgacauagcagaauaggcguuacucgacagaggagagcaagaaauggagccaguagauccuagacuagagcccuggaagcauccaggaagucagccuaaaacugcuuguaccaauugcuauuguaaaaaguguugcuuucauugccaaguuuguuucauaacaaaagccuuaggcaucuccuauggcaggaagaagcggagacagcgacgaagagcucaucagaacagucagacucaucaagcuucucuaucaaagcaguaaguaguacauguaacgcaaccuauaccaauaguagcaauaguagcauuaguaguagcaauaauaauagcaauaguugugugguccauaguaaucauagaauauaggaaaauauuaagacaaagaaaaauagacagguuaauugauagacuaauagaaagagcagaagacaguggcaaugagagugaaggagaaauaucagcacuuguggagauggggguggagauggggcaccaugcuccuugggauguugaugaucuguagugcuacagaaaaauugugggucacagucuauuaugggguaccuguguggaaggaagcaaccaccacucuauuuugugcaucagaugcuaaagcauaugauacagagguacauaauguuugggccacacaugccuguguacccacagaccccaacccacaagaaguaguauugguaaaugugacagaaaauuuuaacauguggaaaaaugacaugguagaacagaugcaugaggauauaaucaguuuaugggaucaaagccuaaagccauguguaaaauuaaccccacucuguguuaguuuaaagugcacugauuugaagaaugauacuaauaccaauaguaguagcgggagaaugauaauggagaaaggagagauaaaaaacugcucuuucaauaucagcacaagcauaagagguaaggugcagaaagaauaugcauuuuuuuauaaacuugauauaauaccaauagauaaugauacuaccagcuauaaguugacaaguuguaacaccucagucauuacacaggccuguccaaagguauccuuugagccaauucccauacauuauugugccccggcugguuuugcgauucuaaaauguaauaauaagacguucaauggaacaggaccauguacaaaugucagcacaguacaauguacacauggaauuaggccaguaguaucaacucaacugcuguuaaauggcagucuagcagaagaagagguaguaauuagaucugucaauuucacggacaaugcuaaaaccauaauaguacagcugaacacaucuguagaaauuaauuguacaagacccaacaacaauacaagaaaaagaauccguauccagagaggaccagggagagcauuuguuacaauaggaaaaauaggaaauaugagacaagcacauuguaacauuaguagagcaaaauggaauaacacuuuaaaacagauagcuagcaaauuaagagaacaauuuggaaauaauaaaacaauaaucuuuaagcaauccucaggaggggacccagaaauuguaacgcacaguuuuaauuguggaggggaauuuuucuacuguaauucaacacaacuguuuaauaguacuugguuuaauaguacuuggaguacugaagggucaaauaacacugaaggaagugacacaaucacccucccaugcagaauaaaacaaauuauaaacauguggcagaaaguaggaaaagcaauguaugccccucccaucaguggacaaauuagauguucaucaaauauuacagggcugcuauuaacaagagauggugguaauagcaacaaugaguccgagaucuucagaccuggaggaggagauaugagggacaauuggagaagugaauuauauaaauauaaaguaguaaaaauugaaccauuaggaguagcacccaccaaggcaaagagaagaguggugcagagagaaaaaagagcagugggaauaggagcuuuguuccuuggguucuugggagcagcaggaagcacuaugggcgcagccucaaugacgcugacgguacaggccagacaauuauugucugguauagugcagcagcagaacaauuugcugagggcuauugaggcgcaacagcaucuguugcaacucacagucuggggcaucaagcagcuccaggcaagaauccuggcuguggaaagauaccuaaaggaucaacagcuccuggggauuugggguugcucuggaaaacucauuugcaccacugcugugccuuggaaugcuaguuggaguaauaaaucucuggaacagauuuggaaucacacgaccuggauggagugggacagagaaauuaacaauuacacaagcuuaauacacuccuuaauugaagaaucgcaaaaccagcaagaaaagaaugaacaagaauuauuggaauuagauaaaugggcaaguuuguggaauugguuuaacauaacaaauuggcugugguauauaaaauuauucauaaugauaguaggaggcuugguagguuuaagaauaguuuuugcuguacuuucuauagugaauagaguuaggcagggauauucaccauuaucguuucagacccaccucccaaccccgaggggacccgacaggcccgaaggaauagaagaagaagguggagagagagacagagacagauccauucgauuagugaacggauccuuggcacuuaucugggacgaucugcggagccugugccucuucagcuaccaccgcuugagagacuuacucuugauuguaacgaggauuguggaacuucugggacgcagggggugggaagcccucaaauauugguggaaucuccuacaguauuggagucaggaacuaaagaauagugcuguuagcuugcucaaugccacagccauagcaguagcugaggggacagauaggguuauagaaguaguacaaggagcuuguagagcuauucgccacauaccuagaagaauaagacagggcuuggaaaggauuuugcuauaagauggguggcaaguggucaaaaaguagugugauuggauggccuacuguaagggaaagaaugagacgagcugagccagcagcagauagggugggagcagcaucucgagaccuggaaaaacauggagcaaucacaaguagcaauacagcagcuaccaaugcugcuugugccuggcuagaagcacaagaggaggaggagguggguuuuccagucacaccucagguaccuuuaagaccaaugacuuacaaggcagcuguagaucuuagccacuuuuuaaaagaaaaggggggacuggaagggcuaauucacucccaaagaagacaagauauccuugaucuguggaucuaccacacacaaggcuacuucccugauuagcagaacuacacaccagggccaggggucagauauccacugaccuuuggauggugcuacaagcuaguaccaguugagccagauaagauagaagaggccaauaaaggagagaacaccagcuuguuacacccugugagccugcaugggauggaugacccggagagagaaguguuagaguggagguuugacagccgccuagcauuucaucacguggcccgagagcugcauccggaguacuucaagaacugcugacaucgagcuugcuacaagggacuuuccgcuggggacuuuccagggaggcguggccugggcgggacuggggaguggcgagcccucagauccugcauauaagcagcugcuuuuugccuguacugggucucucugguuagaccagaucugagccugggagcucucuggcuaacuagggaacccacugcuuaagccucaauaaagcuugccuugagugcuucaaguagugugugcccgucuguugugugacucugguaacuagagaucccucagacccuuuuagucaguguggaaaaucucuagca'
#translate hiv RNA to DNA
hiv_genome = hiv_genome_rna.rep('u', t)
# isolate the nef gene (start:8797, end:9417)
nef_gene = hiv_genome[8797]
# the nef gene as a fasta file using the header 'nef type 1 (HXB2)'
fasta_header = '>nef type 1 (HXB2)'
print(fasta_heade, nef_gene)
#caculate and report the GC content of the nef gene
nef_gc_content = (nef_gene.count('c') + nef_gene.count('g')) / len(nef_gene)
print("The GC content of the nef gene is: ", nef_gc_content * 100, "%")
###Output
_____no_output_____
###Markdown
Introducing listsNow that we have played a bit with strings, it's time to introduce the next variable type. So far, we have worked with several types of variables and data including:* integers* floats* stringsThe next data type is a list. Lists are just what you would expect, a collection. Lists have a few special properties we'll need to understand, lists are:* ordered* indexed* iterableLet's explore these properties by creating our on list, which in Python is done using the ``[]`` brackets.
###Code
my_list = []
###Output
_____no_output_____
###Markdown
Perhaps it seems nothing much has happened, but you should be able to verify that Python thinks that ``my_list`` is a list; please try:
###Code
type(my_list)
###Output
_____no_output_____
###Markdown
So far, we have created ``[]`` - the empty list, and assigned it the name my list. We can start adding thing to ``my_list`` using the ``.append`` method. For example:
###Code
my_list =[]
# We can add a string
my_list.append('gag')
print(my_list)
# We can add another string
my_list.append('pol')
print(my_list)
# We can yet add another string - please add the string 'env'
# We can also declare lists by naming all its members
my_other_list = ['DNA',
'mRNA',
'Protein']
print(my_other_list)
###Output
_____no_output_____
###Markdown
A list, maintains the order of every element of that list. Lists are indexed (starting at 0) in a way that was similar to strings. |Index|List Element||:----|:-----------||0|'gag'||1|'pol'||2|'env'|
###Code
# Print the list of these HIV genes in order given the list below
# The correct order is
# gag, pol, vif, vpr, vpu, env, nef
hiv_gene_names = ['env',
'gag',
'vif',
'pol',
'vpr',
'vpu',
'nef']
###Output
_____no_output_____
###Markdown
Iteration and 'for' loopsThis topic is important enough to get its own section! Not only are we going to talk about iteration, but we are going to introduce a very important concept in computing - a loop. In a loop, we are able to get the computer to repeat a set of instructions without us having to write out every command. This is at the heart of what makes computers useful - being able to carry out repetitive tasks without our input. Let's look at our first for loop; to start we will use a list of nucleic acids:
###Code
nucleic_acids = ['adenine',
'thymine',
'cytosine',
'guanine',
'uracil']
print(nucleic_acids)
###Output
_____no_output_____
###Markdown
If we wanted to, we could print the items in this list one by one using several print statements
###Code
print(nucleic_acids[0])
print(nucleic_acids[1])
print(nucleic_acids[2])
print(nucleic_acids[3])
print(nucleic_acids[4])
#Alternatively, we can do this using a for loop:
for nucleotide in nucleic_acids:
print(nucleotide)
###Output
_____no_output_____
###Markdown
A for loop has the following structure: for temporary_variable in itterable : • • • • instruction[temporary_variable]Let's break this down a bit...* ``for`` - a for loop must start with a for statement* ``temporary_variable`` - the next character(s) right after the ``for`` are actually the name of a special, variable. This variable is a placeholder for the objects that will come next in the loop.* ``in`` - this ``in`` must be included and tells Python what itterable it should execute the for loop on* ``iterable:`` The iterable is any ordered collection (such as a string or a list. A ``:`` must come after the iterable.* (indent) - the next line of a for loop must always be indented. The best practice is to use 4 spaces (not the tab key)* • • • • - 4 space indent* ``instruction`` - these are the instructions you want Python to execute. If your instructions make use of the variable (they don't have to) you will use ``temporary_variable`` (whatever you have named it)
###Code
# Try the following with for loops
nucleic_acids = ['adenine',
'thymine',
'cytosine',
'guanine',
'uracil']
# Write a for loop that prints the names of the nucleotides
# Write a for loop that prints 'nucleotide!' for each of the nucleotides
# Write a for loop prints nucleotide name and its one-letter abbreviation
###Output
_____no_output_____
###Markdown
ConditionalsOne of the key functionalities in computing is the ability to make comparisons and choices. In Python, we have several ways to use this. In each case, the answer to a conditional statement is a simple binary result: True or False. Run the following cells and also make some changes to see that you understand how Python is evaluating the statement. Evaluate 1 > 0 + 1 ? How about 99 >= 99 ? What about 0 <= 1 ? And try 1 == 1 The conditionals above all use the comparison operators, a more complete list is as follows:|Operator|Description||-------|:----------||``==``|Comparison - True if both operands are equal||``!=``|Not equal - True if both operands are not equal||``>``|Greater than - True if left operand is greater than right||``<``|Less than - True if left operand is less than right||``<=``|Less than or equal to - True if left operand is less than or equal to right||``>=``|Greater than or equal to - True if left operand is greater than or equal to right| Random number and conditionals - Heads or TailsNow, let's combine randomness with our conditional operators to make a simple simulation: flipping a coin. Python has a [Module](https://docs.python.org/2/tutorial/modules.html) call [NumPy](http://www.numpy.org/). NumPy contains a number of useful functions including the ability to generate 'random' numbers. Generating a truly random number is a [science in itself](https://www.random.org/randomness/), but the NumPy ``random`` module will be sufficient for our purpose. See how we use this function in the next cell:
###Code
# Using the from xxx import xxx statement, we tell Python we want to use a package that
# is not part of the default set of Python packages
# NumPy happens to be installed already for us, otherwise we would have to download it
from numpy import random
# We create a variable and then use the . notation to get the random number
# in this case, we are requesting a random int that is between 1 and 10
my_random_int = random.randint(1,10)
print('My random int is %d' % my_random_int)
# rerun this cell a few times to see that you get only number 1-9
###Output
_____no_output_____
###Markdown
Print formattingNotice a new feature in the printing statement. We haven’t used it before, but this string formatting feature allows us to print a variable in a string without using a variablejust put ``%d`` in the string where you want an integer to appear, then after closing the string, put another ``%`` sign followed by the variable name. You can also generate floats:
###Code
# returns a float between 0.0 and 1.0)
my_random_float = random.ranf()
print('My random float is %f' % my_random_float)
# You can also control precision of the float
print('My random float is %0.3f to 3 digits' % my_random_float)
print('My random float is %0.9f to 9 digits' % my_random_float)
print('My random float is %0.30f to 30 digits' % my_random_float)
# You can do this multiple times in the same string
print('My random float is %0.3f or %0.9f' % (my_random_float, my_random_float))
###Output
_____no_output_____
###Markdown
if else statementsWe are now ready to combine the conditions and random number generator to do our first simulation. To do so we will need to make an if else statement:
###Code
if 1 == 1:
print('1 is equal to 1')
###Output
_____no_output_____
###Markdown
The if statement uses the following pattern: if conditional_to_evaluate: • • • • instruction* ``if`` - if statements begin with an if* ``conditional_to_evaluate`` - this is some conditional statement that Python will evaluate as ``True`` or ``False``. This statement will be followed by a ``:``* (indent) - the next line of a for loop must always be indented. The best practice is to use 4 spaces (not the tab key)* • • • • - 4 space indent* ``instruction`` - these are the instructions you want Python to execute. The instructions will also be executed iff the conditional statement is ``True``Write a few conditional statements and see what happens when the statement is ``True`` or ``False`` We can supplement the if statement by telling Python what to do if the conditional is false, using the else statement:
###Code
if 1 == 2:
print('one is now equal to two')
else:
print('one is NOT equal to two')
###Output
_____no_output_____
###Markdown
Remembering that indenting is important, try writing a few if else statements yourself: As powerful as if/else statements can be, we sometimes wish to let Python explore several contingencies. We do this using ``elif`` (else if) which allows us to use another if statement iff the preceding if statement is ``False``. Complete the next two cells to see an example:
###Code
# What day is today, enter this as a string below
today =
# Things to do
if today == 'Monday':
print('Walk the dog')
elif today == 'Tuesday':
print('Pick up the laundry')
elif today == 'Wednesday':
print('Go shopping')
elif today == 'Thursday':
print('Call mom')
elif today == 'Friday':
print('Plan for the weekend')
else:
print('It must be the weekend, nothing to do')
###Output
_____no_output_____
###Markdown
To recap: The above if/else statement covered several explicit contingencies (If the day of the week was Monday-Friday) as one as a final contingencies if none of the above were ``True`` (the final else statement). Write a statement below using the if/elif/else chain of conditionals. Remember to pay attention to indenting. Putting it all togetherUsing what you have learned so far, write some code to simulate flipping a coin.
###Code
# Use the random number function of NumPy to generate a float
# Use conditionals so that if the float is greater than or equal to 0.5 consider that
#'Heads' otherwise 'Tails'
###Output
_____no_output_____
###Markdown
Simulating mutation of the HIV genomeMutations are (at least in part) a random process that drives the change of a genome. Virus in particular use this to their advantage. Mutations in viruses can allow them to evade their hosts immune responses, concur drug resistance, or even the acquisition of new functions.According to [Abrahm et.al. 2010](http://www.uv.es/rsanjuan/Abram%20JVirol%2010.pdf) the mutation rate for the HIV-1 genome is about 4.4E-05 or 0.000044 mutations per single cell infection cycle. The most common mutation type are single nucleotide polymorphisims [SNPs](https://en.wikipedia.org/wiki/Single-nucleotide_polymorphism). In our toy simulation we will use Python to simulate the following:* flip a coin weighted to the probability of the HIV-1 mutation (genome size * mutation rate)* Choose a random nucleotide in the HIV-1 genome to mutate (using the .randint() method)* flip a weighted coin to choose what type of mutation the mutation should be (using the following information, and assuming the genome size is 9719 nucleotides) Here are some code examples that will help
###Code
# unfair coin
from numpy import random
# Coins have two sides (states) - heads or tails; use these as a list
coin_state = ['Heads','Tails']
# A fair coin would have a 50/50 chance of being heads or tails. Represent these probabilities as
# floats which sum to 1.0
fair_coin_probabilities = [0.5,0.5]
#flip the fair coin using numpy's random.choice method
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
#print the result
print("My fair coin is %s" %fair_flip)
# An unfair coin could be weighted like this
unfair_coin_probabilities = [0.1,0.9]
# Therefore...
unfair_flip = random.choice(coin_state,p = unfair_coin_probabilities)
print("My unfair coin is %s" %unfair_flip)
###Output
_____no_output_____
###Markdown
1. Write a simulation which determines if in one round of replication HIV will mutate or not
###Code
# Set the states (mutation,no_mutation)
# Set the probabilities for each state (hint: they must sum to 1)
# flip the coin (make the choice)
###Output
_____no_output_____
###Markdown
2. Determine how often would HIV mutate in 20 rounds of replicationWe will use a for loop to repeat the coin flip 20 times. We can use a special function ``range()`` to tell Python how many times to execute the for loop. Use the following coin flipping example, to improve your HIV simulation.
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
print(fair_flip)
###Output
_____no_output_____
###Markdown
You can take this even further by saving the result as a list:
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
# tip: notice how the list is created before the for loop. If you declared
# flip_results = [] in the for loop, it would be reset 20 times
flip_results = []
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
flip_results.append(fair_flip)
###Output
_____no_output_____
###Markdown
Dont' forget you can print the result to see the list:
###Code
print(flip_results)
###Output
_____no_output_____
###Markdown
3. If HIV is in the mutation state, determine which nuclotide to mutateLet's use our coin to determine if I should walk the dog on Monday or Tuesday:
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
flip_results = []
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
flip_results.append(fair_flip)
# Tip - pay attention to the indenting in this for loop that contains an if/else statement
for result in flip_results:
if result == 'Heads':
print("Walk the dog Monday")
elif result == 'Tails':
print("Walk the dog Tuesday")
###Output
_____no_output_____
###Markdown
Besides using the print instruction you can also place my results into a new list based on the conditional outcome:
###Code
from numpy import random
coin_state = ['Heads','Tails']
fair_coin_probabilities = [0.5,0.5]
flip_results = []
# Initialize some new lists for my conditional outcomes
monday_results = []
tuesday_results = []
for flip in range(1,21):
fair_flip = random.choice(coin_state,p = fair_coin_probabilities)
flip_results.append(fair_flip)
for result in flip_results:
if result == 'Heads':
monday_results.append("Walk the dog Monday")
elif result == 'Tails':
tuesday_results.append("Walk the dog Tuesday")
# We can print how many times we had each type of result stored in our lists
print("My coin said to walk the dog Monday %d times" % len(monday_results))
print("My coin said to walk the dog Tuesday %d times" % len(tuesday_results))
###Output
_____no_output_____
###Markdown
Using the above examples, and your knowledge of how to slice strings to:* determine which nucleotide in the HIV-1 genome to mutate* flip a coin weighted to the probabilities of mutation given in the 'Class 1: single nt substitution' chart above. In each the number of observed mutations of a nucleotide on the y-axis changing to one on the x-axis is shown. * use the ``replace()`` function to mutate your HIV-1 genome **Bonus*** determine and report in which gene your mutations arise (ignore genes less than 200nt)* determine and report if the mutation in any particular gene introduces a stop codon in reading frame one* determine and report if the mutation in any particular gene introduces a stop codon in the actual reading frame of that gene A little more on HIV viral replication
###Code
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/RO8MP3wMvqg" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')
# Other nice animations here: https://www.wehi.edu.au/wehi-tv
###Output
_____no_output_____ |
workshops/POC_in_a_box/02_Creating_and_Evaluating_Solutions.ipynb | ###Markdown
Creating and Evaluating Solutions In this notebook, you will train several models using Amazon Personalize.1. [Introduction](intro)1. [Create solutions](solutions)1. [Evaluate solutions](eval)1. [Using evaluation metrics](use)1. [Storing useful variables](vars) Introduction To recap, for the most part, the algorithms in Amazon Personalize (called recipes) look to solve different tasks, explained here:1. **HRNN & HRNN-Metadata** - Recommends items based on previous user interactions with items.1. **HRNN-Coldstart** - Recommends new items for which interaction data is not yet available.1. **Personalized-Ranking** - Takes a collection of items and then orders them in probable order of interest using an HRNN-like approach.1. **SIMS (Similar Items)** - Given one item, recommends other items also interacted with by users.1. **Popularity-Count** - Recommends the most popular items, if HRNN or HRNN-Metadata do not have an answer - this is returned by default.No matter the use case, the algorithms all share a base of learning on user-item-interaction data which is defined by 3 core attributes:1. **UserID** - The user who interacted1. **ItemID** - The item the user interacted with1. **Timestamp** - The time at which the interaction occurredWe also support event types and event values defined by:1. **Event Type** - Categorical label of an event (browse, purchased, rated, etc).1. **Event Value** - A value corresponding to the event type that occurred. Generally speaking, we look for normalized values between 0 and 1 over the event types. For example, if there are three phases to complete a transaction (clicked, added-to-cart, and purchased), then there would be an event_value for each phase as 0.33, 0.66, and 1.0 respectfully.The event type and event value fields are additional data which can be used to filter the data sent for training the personalization model. In this particular exercise we will not have an event type or event value. To run this notebook, you need to have run the previous notebook, `01_Validating_and_Importing_User_Item_Interaction_Data`, where you created a dataset and imported interaction data into Amazon Personalize. At the end of that notebook, you saved some of the variable values, which you now need to load into this notebook.
###Code
%store -r
###Output
_____no_output_____
###Markdown
Create solutions [Back to top](top)In this notebook, you will create solutions with the following recipes:1. HRNN1. SIMS1. Personalized-RankingSince you have not imported any metadata, and there are no coldstart items in this dataset, this notebook will skip the HRNN-Metadata and HRNN-Coldstart recipes. The Popularity-Count recipe is the simplest solution available in Amazon Personalize and it should only be used as a fallback, so it will also not be covered in this notebook.Similar to the previous notebook, start by importing the relevant packages, and set up a connection to Amazon Personalize using the SDK.
###Code
import time
from time import sleep
import json
import boto3
# Configure the SDK to Personalize:
personalize = boto3.client('personalize')
personalize_runtime = boto3.client('personalize-runtime')
###Output
_____no_output_____
###Markdown
In Amazon Personalize, a specific variation of an algorithm is called a recipe. Different recipes are suitable for different situations. A trained model is called a solution, and each solution can have many versions that relate to a given volume of data when the model was trained.To start, we will list all the recipes that are supported. This will allow you to select one and use that to build your model.
###Code
personalize.list_recipes()
###Output
_____no_output_____
###Markdown
The output is just a JSON representation of all of the algorithms mentioned in the introduction.Next we will select specific recipes and build models with them. HRNNHRNN (hierarchical recurrent neural network) is one of the more advanced recommendation models that you can use and it allows for real-time updates of recommendations based on user behavior. It also tends to outperform other approaches, like collaborative filtering. This recipe takes the longest to train, so let's start with this recipe first.For our use case, using the LastFM data, we can use HRNN to recommend new artists to a user based on the user's previous artist tagging behavior. Remember, we used the tagging data to represent positive interactions between a user and an artist.First, select the recipe by finding the ARN in the list of recipes above.
###Code
HRNN_recipe_arn = "arn:aws:personalize:::recipe/aws-hrnn"
###Output
_____no_output_____
###Markdown
Create the solutionFirst you create a solution using the recipe. Although you provide the dataset ARN in this step, the model is not yet trained. See this as an identifier instead of a trained model.
###Code
hrnn_create_solution_response = personalize.create_solution(
name = "personalize-poc-hrnn",
datasetGroupArn = dataset_group_arn,
recipeArn = HRNN_recipe_arn
)
hrnn_solution_arn = hrnn_create_solution_response['solutionArn']
print(json.dumps(hrnn_create_solution_response, indent=2))
###Output
_____no_output_____
###Markdown
Create the solution versionOnce you have a solution, you need to create a version in order to complete the model training. The training can take a while to complete, upwards of 25 minutes, and an average of 40 minutes for this recipe with our dataset. Normally, we would use a while loop to poll until the task is completed. However the task would block other cells from executing, and the goal here is to create many models and deploy them quickly. So we will set up the while loop for all of the solutions further down in the notebook. There, you will also find instructions for viewing the progress in the AWS console.
###Code
hrnn_create_solution_version_response = personalize.create_solution_version(
solutionArn = hrnn_solution_arn
)
hrnn_solution_version_arn = hrnn_create_solution_version_response['solutionVersionArn']
print(json.dumps(hrnn_create_solution_version_response, indent=2))
###Output
_____no_output_____
###Markdown
SIMSSIMS is one of the oldest algorithms used within Amazon for recommendation systems. A core use case for it is when you have one item and you want to recommend items that have been interacted with in similar ways over your entire user base. This means the result is not personalized per user. Sometimes this leads to recommending mostly popular items, so there is a hyperparameter that can be tweaked which will reduce the popular items in your results. For our use case, using the LastFM data, let's assume we pick a particular artist. We can then use SIMS to recommend other artists based on the tagging behavior of the entire user base. The results are not personalized per user, but instead, differ depending on the original artist we chose as our input.Just like last time, we start by selecting the recipe.
###Code
SIMS_recipe_arn = "arn:aws:personalize:::recipe/aws-sims"
###Output
_____no_output_____
###Markdown
Create the solutionAs with HRNN, start by creating the solution first. Although you provide the dataset ARN in this step, the model is not yet trained. See this as an identifier instead of a trained model.
###Code
sims_create_solution_response = personalize.create_solution(
name = "personalize-poc-sims",
datasetGroupArn = dataset_group_arn,
recipeArn = SIMS_recipe_arn
)
sims_solution_arn = sims_create_solution_response['solutionArn']
print(json.dumps(sims_create_solution_response, indent=2))
###Output
_____no_output_____
###Markdown
Create the solution versionOnce you have a solution, you need to create a version in order to complete the model training. The training can take a while to complete, upwards of 25 minutes, and an average of 35 minutes for this recipe with our dataset. Normally, we would use a while loop to poll until the task is completed. However the task would block other cells from executing, and the goal here is to create many models and deploy them quickly. So we will set up the while loop for all of the solutions further down in the notebook. There, you will also find instructions for viewing the progress in the AWS console.
###Code
sims_create_solution_version_response = personalize.create_solution_version(
solutionArn = sims_solution_arn
)
sims_solution_version_arn = sims_create_solution_version_response['solutionVersionArn']
print(json.dumps(sims_create_solution_version_response, indent=2))
###Output
_____no_output_____
###Markdown
Personalized RankingPersonalized Ranking is an interesting application of HRNN. Instead of just recommending what is most probable for the user in question, this algorithm takes in a user and a list of items as well. The items are then rendered back in the order of most probable relevance for the user. The use case here is for filtering on genre for example, or when you have a broad collection that you would like better ordered for a particular user.For our use case, using the LastFM data, we could imagine that a particular record label is paying us to recommend their artists to our users in a special promotion. Therefore, we know the list of artists we want to recommend, but we want to find out which of these artists each user will like most. We would use personalized ranking to re-order the list of artists for each user, based on their previous tagging history. Just like last time, we start by selecting the recipe.
###Code
rerank_recipe_arn = "arn:aws:personalize:::recipe/aws-personalized-ranking"
###Output
_____no_output_____
###Markdown
Create the solutionAs with the previous solution, start by creating the solution first. Although you provide the dataset ARN in this step, the model is not yet trained. See this as an identifier instead of a trained model.
###Code
rerank_create_solution_response = personalize.create_solution(
name = "personalize-poc-rerank",
datasetGroupArn = dataset_group_arn,
recipeArn = rerank_recipe_arn
)
rerank_solution_arn = rerank_create_solution_response['solutionArn']
print(json.dumps(rerank_create_solution_response, indent=2))
###Output
_____no_output_____
###Markdown
Create the solution versionOnce you have a solution, you need to create a version in order to complete the model training. The training can take a while to complete, upwards of 25 minutes, and an average of 35 minutes for this recipe with our dataset. Normally, we would use a while loop to poll until the task is completed. However the task would block other cells from executing, and the goal here is to create many models and deploy them quickly. So we will set up the while loop for all of the solutions further down in the notebook. There, you will also find instructions for viewing the progress in the AWS console.
###Code
rerank_create_solution_version_response = personalize.create_solution_version(
solutionArn = rerank_solution_arn
)
rerank_solution_version_arn = rerank_create_solution_version_response['solutionVersionArn']
print(json.dumps(rerank_create_solution_version_response, indent=2))
###Output
_____no_output_____
###Markdown
View solution creation statusAs promised, how to view the status updates in the console:* In another browser tab you should already have the AWS Console up from opening this notebook instance. * Switch to that tab and search at the top for the service `Personalize`, then go to that service page. * Click `View dataset groups`.* Click the name of your dataset group, most likely something with POC in the name.* Click `Solutions and recipes`.* You will now see a list of all of the solutions you created above, including a column with the status of the solution versions. Once it is `Active`, your solution is ready to be reviewed. It is also capable of being deployed.Or simply run the cell below to keep track of the solution version creation status.
###Code
in_progress_solution_versions = [
hrnn_solution_version_arn,
sims_solution_version_arn,
rerank_solution_version_arn
]
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
for solution_version_arn in in_progress_solution_versions:
version_response = personalize.describe_solution_version(
solutionVersionArn = solution_version_arn
)
status = version_response["solutionVersion"]["status"]
if status == "ACTIVE":
print("Build succeeded for {}".format(solution_version_arn))
in_progress_solution_versions.remove(solution_version_arn)
elif status == "CREATE FAILED":
print("Build failed for {}".format(solution_version_arn))
in_progress_solution_versions.remove(solution_version_arn)
if len(in_progress_solution_versions) <= 0:
break
else:
print("At least one solution build is still in progress")
time.sleep(60)
###Output
_____no_output_____
###Markdown
Hyperparameter tuningPersonalize offers the option of running hyperparameter tuning when creating a solution. Because of the additional computation required to perform hyperparameter tuning, this feature is turned off by default. Therefore, the solutions we created above, will simply use the default values of the hyperparameters for each recipe. For more information about hyperparameter tuning, see the [documentation](https://docs.aws.amazon.com/personalize/latest/dg/customizing-solution-config-hpo.html).If you have settled on the correct recipe to use, and are ready to run hyperparameter tuning, the following code shows how you would do so, using SIMS as an example.```pythonsims_create_solution_response = personalize.create_solution( name = "personalize-poc-sims-hpo", datasetGroupArn = dataset_group_arn, recipeArn = SIMS_recipe_arn, performHPO=True)sims_solution_arn = sims_create_solution_response['solutionArn']print(json.dumps(sims_create_solution_response, indent=2))```If you already know the values you want to use for a specific hyperparameter, you can also set this value when you create the solution. The code below shows how you could set the value for the `popularity_discount_factor` for the SIMS recipe.```pythonsims_create_solution_response = personalize.create_solution( name = "personalize-poc-sims-set-hp", datasetGroupArn = dataset_group_arn, recipeArn = SIMS_recipe_arn, solutionConfig = { 'algorithmHyperParameters': { 'popularity_discount_factor': '0.7' } })sims_solution_arn = sims_create_solution_response['solutionArn']print(json.dumps(sims_create_solution_response, indent=2))``` Evaluate solution versions [Back to top](top)It should not take more than an hour to train all the solutions from this notebook. While training is in progress, we recommend taking the time to read up on the various algorithms (recipes) and their behavior in detail. This is also a good time to consider alternatives to how the data was fed into the system and what kind of results you expect to see.When the solutions finish creating, the next step is to obtain the evaluation metrics. Personalize calculates these metrics based on a subset of the training data. The image below illustrates how Personalize splits the data. Given 10 users, with 10 interactions each (a circle represents an interaction), the interactions are ordered from oldest to newest based on the timestamp. Personalize uses all of the interaction data from 90% of the users (blue circles) to train the solution version, and the remaining 10% for evaluation. For each of the users in the remaining 10%, 90% of their interaction data (green circles) is used as input for the call to the trained model. The remaining 10% of their data (orange circle) is compared to the output produced by the model and used to calculate the evaluation metrics.We recommend reading [the documentation](https://docs.aws.amazon.com/personalize/latest/dg/working-with-training-metrics.html) to understand the metrics, but we have also copied parts of the documentation below for convenience.You need to understand the following terms regarding evaluation in Personalize:* *Relevant recommendation* refers to a recommendation that matches a value in the testing data for the particular user.* *Rank* refers to the position of a recommended item in the list of recommendations. Position 1 (the top of the list) is presumed to be the most relevant to the user.* *Query* refers to the internal equivalent of a GetRecommendations call.The metrics produced by Personalize are:* **coverage**: The proportion of unique recommended items from all queries out of the total number of unique items in the training data (includes both the Items and Interactions datasets).* **mean_reciprocal_rank_at_25**: The [mean of the reciprocal ranks](https://en.wikipedia.org/wiki/Mean_reciprocal_rank) of the first relevant recommendation out of the top 25 recommendations over all queries. This metric is appropriate if you're interested in the single highest ranked recommendation.* **normalized_discounted_cumulative_gain_at_K**: Discounted gain assumes that recommendations lower on a list of recommendations are less relevant than higher recommendations. Therefore, each recommendation is discounted (given a lower weight) by a factor dependent on its position. To produce the [cumulative discounted gain](https://en.wikipedia.org/wiki/Discounted_cumulative_gain) (DCG) at K, each relevant discounted recommendation in the top K recommendations is summed together. The normalized discounted cumulative gain (NDCG) is the DCG divided by the ideal DCG such that NDCG is between 0 - 1. (The ideal DCG is where the top K recommendations are sorted by relevance.) Amazon Personalize uses a weighting factor of 1/log(1 + position), where the top of the list is position 1. This metric rewards relevant items that appear near the top of the list, because the top of a list usually draws more attention.* **precision_at_K**: The number of relevant recommendations out of the top K recommendations divided by K. This metric rewards precise recommendation of the relevant items.Let's take a look at the evaluation metrics for each of the solutions produced in this notebook. *Please note, your results might differ from the results described in the text of this notebook, due to the quality of the LastFM dataset.* HRNN metricsFirst, retrieve the evaluation metrics for the HRNN solution version.
###Code
hrnn_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = hrnn_solution_version_arn
)
print(json.dumps(hrnn_solution_metrics_response, indent=2))
###Output
_____no_output_____
###Markdown
The normalized discounted cumulative gain above tells us that at 5 items, we have less than a 1% chance (.8% literally) in a recommendation being a part of a user's interaction history (in the hold out phase from training and validation). Around 3% of the recommended items are unique, and we have a precision of only 0.77% in the top 5 recommended items. This is clearly not a great model, but keep in mind that we had to use tagging data for our interactions because the listening data did not have timestamps associated with it. These results indicate that the tagging data may not be as relevant as we hoped. But, let's take a look at the results of the other solutions first. SIMS metricsNow, retrieve the evaluation metrics for the SIMS solution version.
###Code
sims_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = sims_solution_version_arn
)
print(json.dumps(sims_solution_metrics_response, indent=2))
###Output
_____no_output_____
###Markdown
In this example we are seeing a slightly elevated precision at 5 items, a little over 1% at 1.04% this time. Effectively this is probably within the margin of error, but given that no effort was made to mask popularity, it may just be returning super popular results that a large volume of users have interacted with in some way. Personalized ranking metricsNow, retrieve the evaluation metrics for the personalized ranking solution version.
###Code
rerank_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = rerank_solution_version_arn
)
print(json.dumps(rerank_solution_metrics_response, indent=2))
###Output
_____no_output_____
###Markdown
Just a quick comment on this one, here we see again a precision of near 1%, as this is based on HRNN, that is to be expected. Using evaluation metrics [Back to top](top)It is important to use evaluation metrics carefully. There are a number of factors to keep in mind.* If there is an existing recommendation system in place, this will have influenced the user's interaction history which you use to train your new solutions. This means the evaluation metrics are biased to favor the existing solution. If you work to push the evaluation metrics to match or exceed the existing solution, you may just be pushing the HRNN to behave like the existing solution and might not end up with something better.* The HRNN Coldstart recipe is difficult to evaluate using the metrics produced by Amazon Personalize. The aim of the recipe is to recommend items which are new to your business. Therefore, these items will not appear in the existing user transaction data which is used to compute the evaluation metrics. As a result, HRNN Coldstart will never appear to perform better than the other recipes, when compared on the evaluation metrics alone. Keeping in mind these factors, the evaluation metrics produced by Personalize are generally useful for two cases:1. Comparing the performance of solution versions trained on the same recipe, but with different values for the hyperparameters.1. Comparing the performance of solution versions trained on different recipes (except HRNN Coldstart).Properly evaluating a recommendation system is always best done through A/B testing while measuring actual business outcomes. Since recommendations generated by a system usually influence the user behavior which it is based on, it is better to run small experiments and apply A/B testing for longer periods of time. Over time, the bias from the existing model will fade. Storing useful variables [Back to top](top)Before exiting this notebook, run the following cells to save the version ARNs for use in the next notebook.
###Code
%store hrnn_solution_version_arn
%store sims_solution_version_arn
%store rerank_solution_version_arn
###Output
_____no_output_____ |
arff_dataset.ipynb | ###Markdown
*Load .arff dataset in colab*
###Code
#ARFF = Attribute-Relation File Format
import pandas as pd
from scipy.io import arff
maldata = arff.loadarff('/content/drive/MyDrive/Colab Notebooks/bone-marrow.arff')
df = pd.DataFrame(maldata[0])
df.head()
###Output
_____no_output_____ |
qiskit/advanced/aqua/amplitude_estimation.ipynb | ###Markdown
 _*Qiskit Aqua: Amplitude Estimation*_ The latest version of this notebook is available on https://github.com/Qiskit/qiskit-iqx-tutorials.*** ContributorsStefan Woerner[1], Daniel Egger[1], Shaohan Hu[1], Stephen Wood[1], Marco Pistoia[1] Affliation- [1]IBMQ IntroductionThis notebook illustrates amplitude estimation in the simplest case, where the (assumed to be unknown) success probability $p$ of a Bernoulli random variable is estimated.In other words, we assume a qubit is prepared in a state $\sqrt{1-p}\,\big|0\rangle + \sqrt{p}\,\big|1\rangle$, i.e., the probability of measuring $\big|1\rangle$ equals $p$.This matches the results that have been demonstrated on real hardware in [1].Amplitude estimation uses two operators: $A$ and $Q$, where $A$ describes the problem and $Q$ is derived from $A$ [2].Here, $A = R_y(\theta_p)$, i.e., it equals a rotation with an angle $\theta_p = 2\sin^{-1}(\sqrt{p})$ around the Y-axis. For this single qubit case, $Q$ can be represented by a Y-rotation around twice that angle, i.e., $Q = R_y(2\theta_p)$ [1]. This particularly implies that powers of the operator can be efficiently represented, i.e. $Q^k = R_y(2k\theta_p)$.The result is then mapped into $m$ evaluation qubits. The larger $m$, the higher the accuracy of the estimation but also the longer the quantum circuit.[1] Quantum Risk Analysis. Woerner, Egger. 2018.[2] Quantum Amplitude Amplification and Estimation. Brassard et al. 2000.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit.tools.visualization import plot_bloch_vector
from qiskit import BasicAer
from qiskit.aqua.algorithms import AmplitudeEstimation
from qiskit.aqua.algorithms.single_sample.amplitude_estimation.q_factory import QFactory
from qiskit.aqua.components.uncertainty_problems import UncertaintyProblem
from qiskit.aqua.circuits.gates import cry
# the probability to be recovered
probability = 0.3
theta_p = 2*np.arcsin(np.sqrt(probability))
# the resulting quantum state after A is applied
plot_bloch_vector([np.sin(theta_p), 0.0, np.cos(theta_p)])
class BernoulliAFactory(UncertaintyProblem):
"""
Circuit Factory representing the operator A.
A is used to initialize the state as well as to construct Q.
"""
def __init__(self, probability=0.5):
#
super().__init__(1)
self._probability = probability
self.i_state = 0
self._theta_p = 2 * np.arcsin(np.sqrt(probability))
def build(self, qc, q, q_ancillas=None):
# A is a rotation of angle theta_p around the Y-axis
qc.ry(self._theta_p, q[self.i_state])
class BernoulliQFactory(QFactory):
"""
Circuit Factory representing the operator Q.
This implementation exploits the fact that powers of Q can be implemented efficiently by just multiplying the angle.
(amplitude estimation only requires controlled powers of Q, thus, only this method is overridden.)
"""
def __init__(self, bernoulli_expected_value):
super().__init__(bernoulli_expected_value, i_objective=0)
def build(self, qc, q, q_ancillas=None):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
# Q is a rotation of angle 2*theta_p around the Y-axis
qc.ry(q[i_state], 2*theta_p)
def build_controlled_power(self, qc, q, q_control, power, q_ancillas=None, use_basis_gates=True):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
qc.cry(2*power*theta_p, q_control, q[i_state])
# construct factories for A and Q
bernoulli_a_factory = BernoulliAFactory(probability)
bernoulli_q_factory = BernoulliQFactory(bernoulli_a_factory)
# set number of evaluation qubits
m = 3
# construct amplitude estimation
# here, we override the standard construction of Q since we know a more efficient way
# (exploiting the fact that A and Q are just Y-rotations)
ae = AmplitudeEstimation(m, bernoulli_a_factory, q_factory=bernoulli_q_factory)
# result = ae.run(quantum_instance=BasicAer.get_backend('qasm_simulator'))
result = ae.run(quantum_instance=BasicAer.get_backend('statevector_simulator'))
# plot estimated values
plt.bar(result['values'], result['probabilities'], width=0.5/len(result['probabilities']))
plt.plot([probability, probability], [0,1], 'r--', linewidth=2)
plt.xticks(size=15)
plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15)
plt.title('Estimated Values', size=15)
plt.ylabel('Probability', size=15)
plt.ylim((0,1))
plt.grid()
plt.show()
# plot circuit
ae._circuit.draw(output='mpl')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
 Qiskit Aqua: Amplitude Estimation IntroductionThis notebook illustrates amplitude estimation in the simplest case, where the (assumed to be unknown) success probability $p$ of a Bernoulli random variable is estimated.In other words, we assume a qubit is prepared in a state $\sqrt{1-p}\,\big|0\rangle + \sqrt{p}\,\big|1\rangle$, i.e., the probability of measuring $\big|1\rangle$ equals $p$.This matches the results that have been demonstrated on real hardware in [1].Amplitude estimation uses two operators: $A$ and $Q$, where $A$ describes the problem and $Q$ is derived from $A$ [2].Here, $A = R_y(\theta_p)$, i.e., it equals a rotation with an angle $\theta_p = 2\sin^{-1}(\sqrt{p})$ around the Y-axis. For this single qubit case, $Q$ can be represented by a Y-rotation around twice that angle, i.e., $Q = R_y(2\theta_p)$ [1]. This particularly implies that powers of the operator can be efficiently represented, i.e. $Q^k = R_y(2k\theta_p)$.The result is then mapped into $m$ evaluation qubits. The larger $m$, the higher the accuracy of the estimation but also the longer the quantum circuit.[1] Quantum Risk Analysis. Woerner, Egger. 2018.[2] Quantum Amplitude Amplification and Estimation. Brassard et al. 2000.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit.tools.visualization import plot_bloch_vector
from qiskit import BasicAer
from qiskit.aqua.algorithms import AmplitudeEstimation
from qiskit.aqua.algorithms.single_sample.amplitude_estimation.q_factory import QFactory
from qiskit.aqua.components.uncertainty_problems import UncertaintyProblem
from qiskit.aqua.circuits.gates import cry
# the probability to be recovered
probability = 0.3
theta_p = 2*np.arcsin(np.sqrt(probability))
# the resulting quantum state after A is applied
plot_bloch_vector([np.sin(theta_p), 0.0, np.cos(theta_p)])
class BernoulliAFactory(UncertaintyProblem):
"""
Circuit Factory representing the operator A.
A is used to initialize the state as well as to construct Q.
"""
def __init__(self, probability=0.5):
#
super().__init__(1)
self._probability = probability
self.i_state = 0
self._theta_p = 2 * np.arcsin(np.sqrt(probability))
def build(self, qc, q, q_ancillas=None):
# A is a rotation of angle theta_p around the Y-axis
qc.ry(self._theta_p, q[self.i_state])
class BernoulliQFactory(QFactory):
"""
Circuit Factory representing the operator Q.
This implementation exploits the fact that powers of Q can be implemented efficiently by just multiplying the angle.
(amplitude estimation only requires controlled powers of Q, thus, only this method is overridden.)
"""
def __init__(self, bernoulli_expected_value):
super().__init__(bernoulli_expected_value, i_objective=0)
def build(self, qc, q, q_ancillas=None):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
# Q is a rotation of angle 2*theta_p around the Y-axis
qc.ry(q[i_state], 2*theta_p)
def build_controlled_power(self, qc, q, q_control, power, q_ancillas=None, use_basis_gates=True):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
qc.cry(2*power*theta_p, q_control, q[i_state])
# construct factories for A and Q
bernoulli_a_factory = BernoulliAFactory(probability)
bernoulli_q_factory = BernoulliQFactory(bernoulli_a_factory)
# set number of evaluation qubits
m = 3
# construct amplitude estimation
# here, we override the standard construction of Q since we know a more efficient way
# (exploiting the fact that A and Q are just Y-rotations)
ae = AmplitudeEstimation(m, bernoulli_a_factory, q_factory=bernoulli_q_factory)
# result = ae.run(quantum_instance=BasicAer.get_backend('qasm_simulator'))
result = ae.run(quantum_instance=BasicAer.get_backend('statevector_simulator'))
# plot estimated values
plt.bar(result['values'], result['probabilities'], width=0.5/len(result['probabilities']))
plt.plot([probability, probability], [0,1], 'r--', linewidth=2)
plt.xticks(size=15)
plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15)
plt.title('Estimated Values', size=15)
plt.ylabel('Probability', size=15)
plt.ylim((0,1))
plt.grid()
plt.show()
# plot circuit
ae._circuit.draw(output='mpl')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
Trusted Notebook" align="middle"> _*Qiskit Aqua: Amplitude Estimation*_ The latest version of this notebook is available on https://github.com/Qiskit/qiskit-tutorials.*** ContributorsStefan Woerner[1], Daniel Egger[1], Shaohan Hu[1], Stephen Wood[1], Marco Pistoia[1] Affliation- [1]IBMQ IntroductionThis notebook illustrates amplitude estimation in the simplest case, where the (assumed to be unkown) success probability $p$ of a Bernoulli random variable is estimated.In other words, we assume a qubit is prepared in a state $\sqrt{1-p}\,\big|0\rangle + \sqrt{p}\,\big|1\rangle$, i.e., the probability of measuring $\big|1\rangle$ equals $p$.This matches the results that have been demonstrated on real hardware in [1].Amplitude estimation uses two operators: $A$ and $Q$, where $A$ describes the problem and $Q$ is derived from $A$ [2].Here, $A = R_y(\theta_p)$, i.e., it equals a rotation with an angle $\theta_p = 2\sin^{-1}(\sqrt{p})$ around the Y-axis. For this single qubit case, $Q$ can be represented by a Y-rotation around twice that angle, i.e., $Q = R_y(2\theta_p)$ [1]. This particularly implies that powers of the operator can be efficiently represented, i.e. $Q^k = R_y(2k\theta_p)$.The result is then mapped into $m$ evaluation qubits. The larger $m$, the higher the accuracy of the estimation but also the longer the quantum circuit.[1] Quantum Risk Analysis. Woerner, Egger. 2018.[2] Quantum Amplitude Amplification and Estimation. Brassard et al. 2000.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit.tools.visualization import plot_bloch_vector
from qiskit import BasicAer
from qiskit.aqua.algorithms import AmplitudeEstimation
from qiskit.aqua.algorithms.single_sample.amplitude_estimation.q_factory import QFactory
from qiskit.aqua.components.uncertainty_problems import UncertaintyProblem
from qiskit.aqua.circuits.gates import cry
# the probability to be recovered
probability = 0.3
theta_p = 2*np.arcsin(np.sqrt(probability))
# the resulting quantum state after A is applied
plot_bloch_vector([np.sin(theta_p), 0.0, np.cos(theta_p)])
class BernoulliAFactory(UncertaintyProblem):
"""
Circuit Factory representing the operator A.
A is used to initialize the state as well as to construct Q.
"""
def __init__(self, probability=0.5):
#
super().__init__(1)
self._probability = probability
self.i_state = 0
self._theta_p = 2 * np.arcsin(np.sqrt(probability))
def build(self, qc, q, q_ancillas=None):
# A is a rotation of angle theta_p around the Y-axis
qc.ry(self._theta_p, q[self.i_state])
class BernoulliQFactory(QFactory):
"""
Circuit Factory representing the operator Q.
This implementation exploits the fact that powers of Q can be implemented efficiently by just multiplying the angle.
(amplitude estimation only requires controlled powers of Q, thus, only this method is overridden.)
"""
def __init__(self, bernoulli_expected_value):
super().__init__(bernoulli_expected_value, i_objective=0)
def build(self, qc, q, q_ancillas=None):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
# Q is a rotation of angle 2*theta_p around the Y-axis
qc.ry(q[i_state], 2*theta_p)
def build_controlled_power(self, qc, q, q_control, power, q_ancillas=None, use_basis_gates=True):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
qc.cry(2*power*theta_p, q_control, q[i_state])
# construct factories for A and Q
bernoulli_a_factory = BernoulliAFactory(probability)
bernoulli_q_factory = BernoulliQFactory(bernoulli_a_factory)
# set number of evaluation qubits
m = 3
# construct amplitude estimation
# here, we override the standard construction of Q since we know a more efficient way
# (exploiting the fact that A and Q are just Y-rotations)
ae = AmplitudeEstimation(m, bernoulli_a_factory, q_factory=bernoulli_q_factory)
# result = ae.run(quantum_instance=BasicAer.get_backend('qasm_simulator'))
result = ae.run(quantum_instance=BasicAer.get_backend('statevector_simulator'))
# plot estimated values
plt.bar(result['values'], result['probabilities'], width=0.5/len(result['probabilities']))
plt.plot([probability, probability], [0,1], 'r--', linewidth=2)
plt.xticks(size=15)
plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15)
plt.title('Estimated Values', size=15)
plt.ylabel('Probability', size=15)
plt.ylim((0,1))
plt.grid()
plt.show()
# plot circuit
ae._circuit.draw(output='mpl')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
 _*Qiskit Aqua: Amplitude Estimation*_ The latest version of this notebook is available on https://github.com/Qiskit/qiskit-iqx-tutorials.*** ContributorsStefan Woerner[1], Daniel Egger[1], Shaohan Hu[1], Stephen Wood[1], Marco Pistoia[1] Affiliation- [1]IBMQ IntroductionThis notebook illustrates amplitude estimation in the simplest case, where the (assumed to be unknown) success probability $p$ of a Bernoulli random variable is estimated.In other words, we assume a qubit is prepared in a state $\sqrt{1-p}\,\big|0\rangle + \sqrt{p}\,\big|1\rangle$, i.e., the probability of measuring $\big|1\rangle$ equals $p$.This matches the results that have been demonstrated on real hardware in [1].Amplitude estimation uses two operators: $A$ and $Q$, where $A$ describes the problem and $Q$ is derived from $A$ [2].Here, $A = R_y(\theta_p)$, i.e., it equals a rotation with an angle $\theta_p = 2\sin^{-1}(\sqrt{p})$ around the Y-axis. For this single qubit case, $Q$ can be represented by a Y-rotation around twice that angle, i.e., $Q = R_y(2\theta_p)$ [1]. This particularly implies that powers of the operator can be efficiently represented, i.e. $Q^k = R_y(2k\theta_p)$.The result is then mapped into $m$ evaluation qubits. The larger $m$, the higher the accuracy of the estimation but also the longer the quantum circuit.[1] Quantum Risk Analysis. Woerner, Egger. 2018.[2] Quantum Amplitude Amplification and Estimation. Brassard et al. 2000.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit.tools.visualization import plot_bloch_vector
from qiskit import BasicAer
from qiskit.aqua.algorithms import AmplitudeEstimation
from qiskit.aqua.algorithms.single_sample.amplitude_estimation.q_factory import QFactory
from qiskit.aqua.components.uncertainty_problems import UncertaintyProblem
from qiskit.aqua.circuits.gates import cry
# the probability to be recovered
probability = 0.3
theta_p = 2*np.arcsin(np.sqrt(probability))
# the resulting quantum state after A is applied
plot_bloch_vector([np.sin(theta_p), 0.0, np.cos(theta_p)])
class BernoulliAFactory(UncertaintyProblem):
"""
Circuit Factory representing the operator A.
A is used to initialize the state as well as to construct Q.
"""
def __init__(self, probability=0.5):
#
super().__init__(1)
self._probability = probability
self.i_state = 0
self._theta_p = 2 * np.arcsin(np.sqrt(probability))
def build(self, qc, q, q_ancillas=None):
# A is a rotation of angle theta_p around the Y-axis
qc.ry(self._theta_p, q[self.i_state])
class BernoulliQFactory(QFactory):
"""
Circuit Factory representing the operator Q.
This implementation exploits the fact that powers of Q can be implemented efficiently by just multiplying the angle.
(amplitude estimation only requires controlled powers of Q, thus, only this method is overridden.)
"""
def __init__(self, bernoulli_expected_value):
super().__init__(bernoulli_expected_value, i_objective=0)
def build(self, qc, q, q_ancillas=None):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
# Q is a rotation of angle 2*theta_p around the Y-axis
qc.ry(q[i_state], 2*theta_p)
def build_controlled_power(self, qc, q, q_control, power, q_ancillas=None, use_basis_gates=True):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
qc.cry(2*power*theta_p, q_control, q[i_state])
# construct factories for A and Q
bernoulli_a_factory = BernoulliAFactory(probability)
bernoulli_q_factory = BernoulliQFactory(bernoulli_a_factory)
# set number of evaluation qubits
m = 3
# construct amplitude estimation
# here, we override the standard construction of Q since we know a more efficient way
# (exploiting the fact that A and Q are just Y-rotations)
ae = AmplitudeEstimation(m, bernoulli_a_factory, q_factory=bernoulli_q_factory)
# result = ae.run(quantum_instance=BasicAer.get_backend('qasm_simulator'))
result = ae.run(quantum_instance=BasicAer.get_backend('statevector_simulator'))
# plot estimated values
plt.bar(result['values'], result['probabilities'], width=0.5/len(result['probabilities']))
plt.plot([probability, probability], [0,1], 'r--', linewidth=2)
plt.xticks(size=15)
plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15)
plt.title('Estimated Values', size=15)
plt.ylabel('Probability', size=15)
plt.ylim((0,1))
plt.grid()
plt.show()
# plot circuit
ae._circuit.draw(output='mpl')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
Trusted Notebook" align="middle"> _*Qiskit Aqua: Amplitude Estimation*_ The latest version of this notebook is available on https://github.com/Qiskit/qiskit-iqx-tutorials.*** ContributorsStefan Woerner[1], Daniel Egger[1], Shaohan Hu[1], Stephen Wood[1], Marco Pistoia[1] Affliation- [1]IBMQ IntroductionThis notebook illustrates amplitude estimation in the simplest case, where the (assumed to be unknown) success probability $p$ of a Bernoulli random variable is estimated.In other words, we assume a qubit is prepared in a state $\sqrt{1-p}\,\big|0\rangle + \sqrt{p}\,\big|1\rangle$, i.e., the probability of measuring $\big|1\rangle$ equals $p$.This matches the results that have been demonstrated on real hardware in [1].Amplitude estimation uses two operators: $A$ and $Q$, where $A$ describes the problem and $Q$ is derived from $A$ [2].Here, $A = R_y(\theta_p)$, i.e., it equals a rotation with an angle $\theta_p = 2\sin^{-1}(\sqrt{p})$ around the Y-axis. For this single qubit case, $Q$ can be represented by a Y-rotation around twice that angle, i.e., $Q = R_y(2\theta_p)$ [1]. This particularly implies that powers of the operator can be efficiently represented, i.e. $Q^k = R_y(2k\theta_p)$.The result is then mapped into $m$ evaluation qubits. The larger $m$, the higher the accuracy of the estimation but also the longer the quantum circuit.[1] Quantum Risk Analysis. Woerner, Egger. 2018.[2] Quantum Amplitude Amplification and Estimation. Brassard et al. 2000.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit.tools.visualization import plot_bloch_vector
from qiskit import BasicAer
from qiskit.aqua.algorithms import AmplitudeEstimation
from qiskit.aqua.algorithms.single_sample.amplitude_estimation.q_factory import QFactory
from qiskit.aqua.components.uncertainty_problems import UncertaintyProblem
from qiskit.aqua.circuits.gates import cry
# the probability to be recovered
probability = 0.3
theta_p = 2*np.arcsin(np.sqrt(probability))
# the resulting quantum state after A is applied
plot_bloch_vector([np.sin(theta_p), 0.0, np.cos(theta_p)])
class BernoulliAFactory(UncertaintyProblem):
"""
Circuit Factory representing the operator A.
A is used to initialize the state as well as to construct Q.
"""
def __init__(self, probability=0.5):
#
super().__init__(1)
self._probability = probability
self.i_state = 0
self._theta_p = 2 * np.arcsin(np.sqrt(probability))
def build(self, qc, q, q_ancillas=None):
# A is a rotation of angle theta_p around the Y-axis
qc.ry(self._theta_p, q[self.i_state])
class BernoulliQFactory(QFactory):
"""
Circuit Factory representing the operator Q.
This implementation exploits the fact that powers of Q can be implemented efficiently by just multiplying the angle.
(amplitude estimation only requires controlled powers of Q, thus, only this method is overridden.)
"""
def __init__(self, bernoulli_expected_value):
super().__init__(bernoulli_expected_value, i_objective=0)
def build(self, qc, q, q_ancillas=None):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
# Q is a rotation of angle 2*theta_p around the Y-axis
qc.ry(q[i_state], 2*theta_p)
def build_controlled_power(self, qc, q, q_control, power, q_ancillas=None, use_basis_gates=True):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
qc.cry(2*power*theta_p, q_control, q[i_state])
# construct factories for A and Q
bernoulli_a_factory = BernoulliAFactory(probability)
bernoulli_q_factory = BernoulliQFactory(bernoulli_a_factory)
# set number of evaluation qubits
m = 3
# construct amplitude estimation
# here, we override the standard construction of Q since we know a more efficient way
# (exploiting the fact that A and Q are just Y-rotations)
ae = AmplitudeEstimation(m, bernoulli_a_factory, q_factory=bernoulli_q_factory)
# result = ae.run(quantum_instance=BasicAer.get_backend('qasm_simulator'))
result = ae.run(quantum_instance=BasicAer.get_backend('statevector_simulator'))
# plot estimated values
plt.bar(result['values'], result['probabilities'], width=0.5/len(result['probabilities']))
plt.plot([probability, probability], [0,1], 'r--', linewidth=2)
plt.xticks(size=15)
plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15)
plt.title('Estimated Values', size=15)
plt.ylabel('Probability', size=15)
plt.ylim((0,1))
plt.grid()
plt.show()
# plot circuit
ae._circuit.draw(output='mpl')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
Trusted Notebook" align="middle"> _*Qiskit Aqua: Amplitude Estimation*_ The latest version of this notebook is available on https://github.com/Qiskit/qiskit-tutorials.*** ContributorsStefan Woerner[1], Daniel Egger[1], Shaohan Hu[1], Stephen Wood[1], Marco Pistoia[1] Affliation- [1]IBMQ IntroductionThis notebook illustrates amplitude estimation in the simplest case, where the (assumed to be unkown) success probability $p$ of a Bernoulli random variable is estimated.In other words, we assume a qubit is prepared in a state $\sqrt{1-p}\,\big|0\rangle + \sqrt{p}\,\big|1\rangle$, i.e., the probability of measuring $\big|1\rangle$ equals $p$.This matches the results that have been demonstrated on real hardware in [1].Amplitude estimation uses two operators: $A$ and $Q$, where $A$ describes the problem and $Q$ is derived from $A$ [2].Here, $A = R_y(\theta_p)$, i.e., it equals a rotation with an angle $\theta_p = 2\sin^{-1}(\sqrt{p})$ around the Y-axis. For this single qubit case, $Q$ can be represented by a Y-rotation around twice that angle, i.e., $Q = R_y(2\theta_p)$ [1]. This particularly implies that powers of the operator can be efficiently represented, i.e. $Q^k = R_y(2k\theta_p)$.The result is then mapped into $m$ evaluation qubits. The larger $m$, the higher the accuracy of the estimation but also the longer the quantum circuit.[1] Quantum Risk Analysis. Woerner, Egger. 2018.[2] Quantum Amplitude Amplification and Estimation. Brassard et al. 2000.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit.tools.visualization import plot_bloch_vector
from qiskit import BasicAer
from qiskit.aqua.algorithms import AmplitudeEstimation
from qiskit.aqua.algorithms.single_sample.amplitude_estimation.q_factory import QFactory
from qiskit.aqua.components.uncertainty_problems import UncertaintyProblem
from qiskit.aqua.circuits.gates import cry
# the probability to be recovered
probability = 0.3
theta_p = 2*np.arcsin(np.sqrt(probability))
# the resulting quantum state after A is applied
plot_bloch_vector([np.sin(theta_p), 0.0, np.cos(theta_p)])
class BernoulliAFactory(UncertaintyProblem):
"""
Circuit Factory representing the operator A.
A is used to initialize the state as well as to construct Q.
"""
def __init__(self, probability=0.5):
#
super().__init__(1)
self._probability = probability
self.i_state = 0
self._theta_p = 2 * np.arcsin(np.sqrt(probability))
def build(self, qc, q, q_ancillas=None):
# A is a rotation of angle theta_p around the Y-axis
qc.ry(self._theta_p, q[self.i_state])
class BernoulliQFactory(QFactory):
"""
Circuit Factory representing the operator Q.
This implementation exploits the fact that powers of Q can be implemented efficiently by just multiplying the angle.
(amplitude estimation only requires controlled powers of Q, thus, only this method is overridden.)
"""
def __init__(self, bernoulli_expected_value):
super().__init__(bernoulli_expected_value, i_objective=0)
def build(self, qc, q, q_ancillas=None):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
# Q is a rotation of angle 2*theta_p around the Y-axis
qc.ry(q[i_state], 2*theta_p)
def build_controlled_power(self, qc, q, q_control, power, q_ancillas=None, use_basis_gates=True):
i_state = self.a_factory.i_state
theta_p = self.a_factory._theta_p
qc.cry(2*power*theta_p, q_control, q[i_state])
# construct factories for A and Q
bernoulli_a_factory = BernoulliAFactory(probability)
bernoulli_q_factory = BernoulliQFactory(bernoulli_a_factory)
# set number of evaluation qubits
m = 3
# construct amplitude estimation
# here, we override the standard construction of Q since we know a more efficient way
# (exploiting the fact that A and Q are just Y-rotations)
ae = AmplitudeEstimation(m, bernoulli_a_factory, q_factory=bernoulli_q_factory)
# result = ae.run(quantum_instance=BasicAer.get_backend('qasm_simulator'))
result = ae.run(quantum_instance=BasicAer.get_backend('statevector_simulator'))
# plot estimated values
plt.bar(result['values'], result['probabilities'], width=0.5/len(result['probabilities']))
plt.plot([probability, probability], [0,1], 'r--', linewidth=2)
plt.xticks(size=15)
plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15)
plt.title('Estimated Values', size=15)
plt.ylabel('Probability', size=15)
plt.ylim((0,1))
plt.grid()
plt.show()
# plot circuit
ae._circuit.draw(output='mpl')
###Output
_____no_output_____ |
tools/TestSyntax.ipynb | ###Markdown
Test Cases of Syntax
###Code
import Syntax as sx
# Carmella is kind of your typical teen.
sentence = [["1790", "0", "0", "0", "0", "8", "S", "2", "Carmella", "Carmella", "Carmella", "NNP",
"PERSON", "nsubj", "false", "11"],
["1790", "0", "0", "1", "9", "11", "S", "2", "is", "is", "be", "VBZ",
"O", "cop", "false", "-1"],
["1790", "0", "0", "2", "12", "16", "S", "-1", "kind", "kind", "kind", "NN",
"O", "null", "false", "-1"],
["1790", "0", "0", "3", "17", "19", "S", "2", "of", "of", "of", "IN",
"O", "prep", "false", "-1"],
["1790", "0", "0", "4", "20", "24", "S", "6", "your", "your", "you", "PRP$",
"O", "poss", "false", "-1"],
["1790", "0", "0", "5", "25", "32", "S", "6", "typical", "typical", "typical", "JJ",
"O", "amod", "false", "-1"],
["1790", "0", "0", "6", "33", "37", "", "3", "teen", "teen", "teen", "NN",
"O", "pobj", "false", "-1"],
["1790", "0", "0", "7", "37", "38", "S", "2", ".", ".", ".", ".",
"O", "punct", "false", "-1"]]
syn = sx.SyntaxTree()
syn.creat(sentence)
print syn.extract_label(11)
print syn.extract_label_with_info(11)
print syn.extract_des(11)
print syn.extract_action(11)
print syn.extract_full_action(11)
sentence = [["1790", "0", "1", "8", "39", "42", "", "12", "She", "She", "she", "PRP", "O", "nsubj", "false", "11"],
["1790", "0", "1", "9", "42", "44", "S", "12", "'s", "'s", "be", "VBZ", "O", "cop", "false", "-1"],
["1790", "0", "1", "10", "45", "52", "SS", "12", "sixteen", "sixteen", "sixteen", "CD", "NUMBER", "num", "false", "-1"],
["1790", "0", "1", "11", "54", "57", "S", "12", "the", "the", "the", "DT", "O", "det", "false", "-1"],
["1790", "0", "1", "12", "58", "62", "S", "-1", "baby", "baby", "baby", "NN", "O", "null", "false", "-1"],
["1790", "0", "1", "13", "63", "65", "S", "12", "of", "of", "of", "IN", "O", "prep", "false", "-1"],
["1790", "0", "1", "14", "66", "69", "S", "15", "the", "the", "the", "DT", "O", "det", "false", "-1"],
["1790", "0", "1", "15", "70", "76", "S", "13", "family", "family", "family", "NN", "O", "pobj", "false", "-1"],
["1790", "0", "1", "16", "77", "81", "S", "12", "with", "with", "with", "IN", "O", "prep", "false", "-1"],
["1790", "0", "1", "17", "82", "84", "S", "20", "an", "an", "a", "DT", "O", "det", "false", "-1"],
["1790", "0", "1", "18", "85", "93", "S", "20", "annoying", "annoying", "annoying", "JJ", "O", "amod", "false", "-1"],
["1790", "0", "1", "19", "94", "97", "S", "20", "big", "big", "big", "JJ", "O", "amod", "false", "-1"],
["1790", "0", "1", "20", "98", "104", "S", "16", "sister", "sister", "sister", "NN", "O", "pobj", "false", "-1"],
["1790", "0", "1", "21", "105", "108", "S", "12", "and", "and", "and", "CC", "O", "cc", "false", "-1"],
["1790", "0", "1", "22", "109", "115", "SS", "12", "strict", "strict", "strict", "JJ", "O", "conj", "false", "-1"],
["1790", "0", "1", "23", "117", "121", "S", "22", "over", "over", "over", "IN", "O", "prep", "false", "-1"],
["1790", "0", "1", "24", "122", "132", "S", "25", "protective", "protective", "protective", "JJ", "O", "amod", "false", "-1"],
["1790", "0", "1", "25", "133", "140", "", "23", "parents", "parents", "parent", "NNS", "O", "pobj", "false", "-1"],
["1790", "0", "1", "26", "140", "141", "S", "12", ".", ".", ".", ".", "O", "punct", "false", "-1"]]
syn = sx.SyntaxTree()
syn.creat(sentence)
print syn.extract_label(11)
print syn.extract_label_with_info(11)
print syn.extract_des(11)
print syn.extract_action(11)
print syn.extract_full_action(11)
sentence = [["1790", "1", "17", "241", "1102", "1103", "S", "243", "I", "I", "I", "PRP", "O", "nsubj", "false", "11"],
["1790", "1", "17", "242", "1104", "1106", "S", "243", "do", "do", "do", "VBP", "O", "aux", "false", "-1"],
["1790", "1", "17", "243", "1107", "1111", "S", "250", "know", "know", "know", "VB", "O", "parataxis", "false", "-1"],
["1790", "1", "17", "244", "1112", "1116", "S", "247", "that", "that", "that", "IN", "O", "complm", "false", "-1"],
["1790", "1", "17", "245", "1117", "1118", "S", "247", "I", "I", "I", "PRP", "O", "nsubj", "false", "11"],
["1790", "1", "17", "246", "1119", "1121", "S", "247", "am", "am", "be", "VBP", "O", "cop", "false", "-1"],
["1790", "1", "17", "247", "1122", "1126", "", "243", "evil", "evil", "evil", "JJ", "O", "ccomp", "false", "-1"],
["1790", "1", "17", "248", "1126", "1127", "S", "243", ",", ",", ",", ",", "O", "punct", "false", "-1"],
["1790", "1", "17", "249", "1128", "1140", "", "250", "mhuahahahaaa", "mhuahahahaaa", "mhuahahahaaa", "JJ", "O", "amod", "false", "-1"],
["1790", "1", "17", "250", "1140", "1143", "S", "235", "!!!", "!!!", "!!!", "NN", "O", "conj", "false", "-1"]]
syn = sx.SyntaxTree()
syn.creat(sentence)
print syn.extract_label(11)
print syn.extract_label_with_info(11)
print syn.extract_des(11)
print syn.extract_action(11)
print syn.extract_full_action(11)
sentence = [["1790", "2", "215", "4654", "20920", "20921", "S", "4659", "I", "I", "I", "PRP", "O", "nsubj", "false", "11"],
["1790", "2", "215", "4655", "20922", "20925", "S", "4659", "was", "was", "be", "VBD", "O", "cop", "false", "-1"],
["1790", "2", "215", "4656", "20926", "20929", "S", "4659", "the", "the", "the", "DT", "O", "det", "false", "-1"],
["1790", "2", "215", "4657", "20930", "20934", "S", "4658", "most", "most", "most", "RBS", "O", "advmod", "false", "-1"],
["1790", "2", "215", "4658", "20935", "20944", "S", "4659", "beautiful", "beautiful", "beautiful", "JJ", "O", "amod", "false", "-1"],
["1790", "2", "215", "4659", "20945", "20953", "S", "4663", "creature", "creature", "creature", "NN", "O", "dobj", "false", "-1"],
["1790", "2", "215", "4660", "20954", "20956", "S", "4663", "he", "he", "he", "PRP", "O", "nsubj", "false", "5"],
["1790", "2", "215", "4661", "20957", "20960", "S", "4663", "had", "had", "have", "VBD", "O", "aux", "false", "-1"],
["1790", "2", "215", "4662", "20961", "20965", "S", "4663", "ever", "ever", "ever", "RB", "O", "advmod", "false", "-1"],
["1790", "2", "215", "4663", "20966", "20970", "", "4641", "seen", "seen", "see", "VBN", "O", "rcmod", "false", "-1"],
["1790", "2", "215", "4664", "20970", "20971", "S", "4629", ".", ".", ".", ".", "O", "punct", "false", "-1"]]
syn = sx.SyntaxTree()
syn.creat(sentence)
print syn.extract_label(11)
print syn.extract_label_with_info(11)
print syn.extract_des(11)
print syn.extract_action(11)
print syn.extract_full_action(11)
sentence = [["1839", "2", "108", "2323", "10798", "10801", "S", "2327", "Max", "Max", "max", "NN", "PERSON", "nsubj", "true", "11"],
["1839", "2", "108", "2324", "10802", "10805", "", "2327", "was", "was", "be", "VBD", "O", "cop", "true", "-1"],
["1839", "2", "108", "2325", "10805", "10808", "S", "2327", "n't", "n't", "not", "RB", "O", "neg", "true", "-1"],
["1839", "2", "108", "2326", "10809", "10821", "S", "2327", "particularly", "particularly", "particularly", "RB", "O", "advmod", "true", "-1"],
["1839", "2", "108", "2327", "10822", "10826", "S", "-1", "fond", "fond", "fond", "JJ", "O", "null", "true", "-1"],
["1839", "2", "108", "2328", "10827", "10829", "S", "2327", "of", "of", "of", "IN", "O", "prep", "true", "-1"],
["1839", "2", "108", "2329", "10830", "10836", "S", "2328", "having", "having", "have", "VBG", "O", "pcomp", "true", "-1"],
["1839", "2", "108", "2330", "10837", "10839", "S", "2331", "to", "to", "to", "TO", "O", "aux", "true", "-1"],
["1839", "2", "108", "2331", "10840", "10844", "S", "2329", "hold", "hold", "hold", "VB", "O", "xcomp", "true", "-1"],
["1839", "2", "108", "2332", "10845", "10849", "S", "2331", "onto", "onto", "onto", "IN", "O", "prep", "true", "-1"],
["1839", "2", "108", "2333", "10850", "10851", "S", "2334", "a", "a", "a", "DT", "O", "det", "true", "-1"],
["1839", "2", "108", "2334", "10852", "10859", "", "2332", "vampire", "vampire", "vampire", "NN", "O", "pobj", "true", "-1"],
["1839", "2", "108", "2335", "10859", "10860", "S", "2327", ",", ",", ",", ",", "O", "punct", "true", "-1"],
["1839", "2", "108", "2336", "10861", "10864", "S", "2327", "but", "but", "but", "CC", "O", "cc", "true", "-1"],
["1839", "2", "108", "2337", "10865", "10867", "S", "2341", "if", "if", "if", "IN", "O", "mark", "true", "-1"],
["1839", "2", "108", "2338", "10868", "10870", "S", "2341", "he", "he", "he", "PRP", "O", "nsubj", "true", "1"],
["1839", "2", "108", "2339", "10871", "10874", "S", "2341", "was", "was", "be", "VBD", "O", "cop", "true", "-1"],
["1839", "2", "108", "2340", "10875", "10876", "S", "2341", "a", "a", "a", "DT", "O", "det", "true", "-1"],
["1839", "2", "108", "2341", "10877", "10880", "", "2348", "bat", "bat", "bat", "NN", "O", "advcl", "true", "-1"],
["1839", "2", "108", "2342", "10880", "10881", "S", "2348", ",", ",", ",", ",", "O", "punct", "true", "-1"],
["1839", "2", "108", "2343", "10882", "10884", "S", "2348", "it", "it", "it", "PRP", "O", "nsubj", "true", "-1"],
["1839", "2", "108", "2344", "10885", "10890", "", "2348", "could", "could", "could", "MD", "O", "aux", "true", "-1"],
["1839", "2", "108", "2345", "10890", "10893", "S", "2348", "n't", "n't", "not", "RB", "O", "neg", "true", "-1"],
["1839", "2", "108", "2346", "10894", "10896", "S", "2348", "be", "be", "be", "VB", "O", "cop", "true", "-1"],
["1839", "2", "108", "2347", "10897", "10900", "S", "2348", "too", "too", "too", "RB", "O", "advmod", "true", "-1"],
["1839", "2", "108", "2348", "10901", "10904", "", "2327", "bad", "bad", "bad", "JJ", "O", "conj", "true", "-1"],
["1839", "2", "108", "2349", "10904", "10905", "S", "2327", ".", ".", ".", ".", "O", "punct", "true", "-1"]]
syn = sx.SyntaxTree()
syn.creat(sentence)
print syn.extract_label(11)
print syn.extract_label_with_info(11)
print syn.extract_des(11)
print syn.extract_action(11)
print syn.extract_full_action(11)
sentence = [["1096", "2", "228", "3361", "14997", "14998", "S", "3364", "I", "I", "I", "PRP", "O", "nsubj", "true", "11"],
["1096", "2", "228", "3362", "14999", "15001", "S", "3364", "am", "am", "be", "VBP", "O", "cop", "true", "-1"],
["1096", "2", "228", "3363", "15002", "15006", "S", "3364", "very", "very", "very", "RB", "O", "advmod", "true", "-1"],
["1096", "2", "228", "3364", "15007", "15011", "S", "-1", "glad", "glad", "glad", "JJ", "O", "null", "true", "-1"],
["1096", "2", "228", "3365", "15012", "15015", "S", "3364", "for", "for", "for", "IN", "O", "prep", "true", "-1"],
["1096", "2", "228", "3366", "15016", "15019", "S", "3367", "the", "the", "the", "DT", "O", "det", "true", "-1"],
["1096", "2", "228", "3367", "15020", "15025", "S", "3365", "power", "power", "power", "NN", "O", "pobj", "true", "-1"],
["1096", "2", "228", "3368", "15026", "15028", "S", "3369", "to", "to", "to", "TO", "O", "aux", "true", "-1"],
["1096", "2", "228", "3369", "15029", "15036", "S", "3367", "control", "control", "control", "VB", "O", "infmod", "true", "-1"],
["1096", "2", "228", "3370", "15037", "15042", "S", "3371", "human", "human", "human", "JJ", "O", "amod", "true", "-1"],
["1096", "2", "228", "3371", "15043", "15051", "S", "3369", "memories", "memories", "memory", "NNS", "O", "dobj", "true", "-1"],
["1096", "2", "228", "3372", "15052", "15054", "S", "3369", "at", "at", "at", "IN", "O", "prep", "true", "-1"],
["1096", "2", "228", "3373", "15055", "15058", "S", "3374", "the", "the", "the", "DT", "O", "det", "true", "-1"],
["1096", "2", "228", "3374", "15059", "15065", "", "3372", "moment", "moment", "moment", "NN", "O", "pobj", "true", "-1"],
["1096", "2", "228", "3375", "15065", "15066", "", "3364", ".", ".", ".", ".", "O", "punct", "true", "-1"],
["1096", "2", "228", "3376", "15066", "15067", "SS", "3364", "”", "''", "''", "''", "O", "punct", "true", "-1"]]
syn = sx.SyntaxTree()
syn.creat(sentence)
print syn.extract_label(11)
print syn.extract_label_with_info(11)
print syn.extract_des(11)
print syn.extract_action(11)
print syn.extract_full_action(11)
sentence = [["1096", "2", "236", "3457", "15434", "15441", "S", "3462", "Because", "Because", "because", "IN", "O", "mark", "false", "-1"],
["1096", "2", "236", "3458", "15442", "15445", "", "3462", "she", "she", "she", "PRP", "O", "nsubj", "false", "11"],
["1096", "2", "236", "3459", "15445", "15447", "S", "3462", "’s", "'s", "be", "VBZ", "O", "cop", "false", "-1"],
["1096", "2", "236", "3460", "15448", "15449", "S", "3462", "a", "a", "a", "DT", "O", "det", "false", "-1"],
["1096", "2", "236", "3461", "15450", "15458", "S", "3462", "stubborn", "stubborn", "stubborn", "JJ", "O", "amod", "false", "-1"],
["1096", "2", "236", "3462", "15459", "15464", "", "3465", "idiot", "idiot", "idiot", "NN", "O", "advcl", "false", "-1"],
["1096", "2", "236", "3463", "15464", "15465", "S", "3465", ",", ",", ",", ",", "O", "punct", "false", "-1"],
["1096", "2", "236", "3464", "15466", "15467", "S", "3465", "I", "I", "I", "PRP", "O", "nsubj", "false", "-1"],
["1096", "2", "236", "3465", "15468", "15473", "S", "-1", "heard", "heard", "hear", "VBD", "O", "null", "false", "-1"],
["1096", "2", "236", "3466", "15474", "15479", "S", "3467", "Daren", "Daren", "Daren", "NNP", "PERSON", "nsubj", "false", "0"],
["1096", "2", "236", "3467", "15480", "15485", "", "3465", "think", "think", "think", "VB", "O", "ccomp", "false", "-1"],
["1096", "2", "236", "3468", "15485", "15486", "S", "3465", ".", ".", ".", ".", "O", "punct", "false", "-1"]]
syn = sx.SyntaxTree()
syn.creat(sentence)
print syn.extract_label(11)
print syn.extract_label_with_info(11)
print syn.extract_des(11)
print syn.extract_action(11)
print syn.extract_full_action(11)
print syn.storyID
print syn.chapterID
print syn.sentenceID
###Output
['stubborn idiot']
['a stubborn idiot ,']
['idiot']
[]
[]
1096
2
236
|
RNN/IMDB Classification.ipynb | ###Markdown
IMDB Sentiment Classification
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import LambdaCallback, ModelCheckpoint
from keras.datasets import imdb
from keras.layers import Dense, AlphaDropout, BatchNormalization, GRU, Embedding
from keras.models import Sequential, load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import classification_report
(x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
print("Train-set size: ", len(x_train))
print("Test-set size: ", len(x_test))
num_tokens = [len(tokens) for tokens in x_train + x_test]
num_tokens = np.array(num_tokens)
np.mean(num_tokens)
np.max(num_tokens)
max_tokens = np.mean(num_tokens) + 2 * np.std(num_tokens)
max_tokens = int(max_tokens)
max_tokens
np.sum(num_tokens < max_tokens) / len(num_tokens)
pad = 'pre'
x_train_pad = pad_sequences(x_train, maxlen=max_tokens,
padding=pad, truncating=pad)
x_test_pad = pad_sequences(x_test, maxlen=max_tokens,
padding=pad, truncating=pad)
embedding_size = 128
max_features = 10000
epochs = 1
modelh5 = 'IMDBGRUClassifier'
loadmodelh5 = 'IMDBGRUClassifier-best'
try:
model = load_model(loadmodelh5 + '.h5')
print('Model loaded successfully')
except IOError:
print('Building the model for the first time')
model = Sequential()
model.add(Embedding(input_dim=max_features,
output_dim=embedding_size,
input_length=max_tokens,
name='layer_embedding'))
model.add(GRU(units=128))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
savebestmodel = ModelCheckpoint(modelh5 + '-best.h5', monitor='loss', verbose=0, save_best_only=True)
model.summary()
%%time
model.fit(x_train_pad, y_train,
validation_split=0.05, epochs=epochs, batch_size=256, callbacks=[savebestmodel])
%%time
result = model.evaluate(x_test_pad, y_test, batch_size=512)
y_true, y_prob = y_train, model.predict(x_train_pad, batch_size=512)
y_pred = y_prob >= 0.5
print(classification_report(y_true, y_pred))
y_true, y_prob = y_test, model.predict(x_test_pad, batch_size=512)
y_pred = y_prob >= 0.5
print(classification_report(y_true, y_pred))
###Output
precision recall f1-score support
0 0.82 0.87 0.85 12500
1 0.86 0.81 0.84 12500
avg / total 0.84 0.84 0.84 25000
|
1-Lessons/Lesson03/dev_src/ENGR-1330-Lesson3-Copy1.ipynb | ###Markdown
ENGR 1330 Computational Thinking with Data Science Last GitHub Commit Date: 25 January 2021 Lesson 3 Data Structures and Conditional Statements: - Data structures; lists, arrays, tuples, sets, dictionaries- Name, index, contents; keys- Conditional sturctures; logical compares, block and in-line if--- Special Script Blocks
###Code
%%html
<!-- Script Block to set tables to left alignment -->
<style>
table {margin-left: 0 !important;}
</style>
###Output
_____no_output_____
###Markdown
--- Objectives1) Develop awareness of data structures available in Python to store and manipulate data 1. Implement arrays (lists), dictionaries, and tuples 2. Address contents of lists , dictionaries, and tuples2) Develop awareness of decision making in Python 1. Implement decision making in Python using using if-then ... conditional statements --- Data Structures and Conditional Statements**Computational thinking (CT)** concepts involved are:- `Decomposition` : Data interpretation, manipulation, and analysis of NumPy arrays- `Abstraction` : Data structures; Arrays, lists, tuples, sets, and dictionaries- `Algorithms` : Conditional statements What is a data structure?Data Structures are a specialized means of organizing and storing data in computers in such a way that we can perform operations on the stored data more efficiently.In our iPython world the structures are illustrated in the figure below ListsA list is a collection of data that are somehow related. It is a convenient way to refer to acollection of similar things by a single name, and using an index (like a subscript in math)to identify a particular item.Consider the "math-like" variable $x$ below:\begin{gather}x_0= 7 \\x_1= 11 \\x_2= 5 \\x_3= 9 \\x_4= 13 \\\dots \\x_N= 223 \\\end{gather} The variable name is $x$ and the subscripts correspond to different values. Thus the `value` of the variable named $x$ associated with subscript $3$ is the number $9$.The figure below is a visual representation of a the concept that treats a variable as a collection of cells. In the figure, the variable name is `MyList`, the subscripts are replaced by an indexwhich identifies which cell is being referenced. The value is the cell content at the particular index. So in the figure the value of `MyList` at Index = 3 is the number 9.'In engineering and data science we use lists a lot - we often call then vectors, arrays, matrices and such, but they are ultimately just lists.To declare a list you can write the list name and assign it values. The square brackets are used to identify that the variable is a list. Like: MyList = [7,11,5,9,13,66,99,223]One can also declare a null list and use the `append()` method to fill it as needed. MyOtherList = [ ] Python indices start at **ZERO**. A lot of other languages start at ONE. It's just the convention. The first element in a list has an index of 0, the second an index of 1, and so on.We access the contents of a list by referring to its name and index. For example MyList[3] has a value of the number 9. ArraysArrays are lists that are used to store only elements of a specific data type- Ordered: Elements in an array can be indexed- Mutable: Elements in an array can be alteredData type that an array must hold is specified using the type code when it is created- ‘f’ for float- ‘d’ for double- ‘i’ for signed int- ‘I’ for unsigned intMore types are listed below|Type Code|C Data Type|Python Data Type|Minimum Size in Bytes||:---|---|---|---:||'b'| signed char|int |1||'B'| unsigned char |int |1||'h'| signed short |int |2||'H'| unsigned short |int |2||'i'| signed int |int |2||'I'| unsigned int |int |2||'l'| signed long |int |4||'L'| unsigned long |int |4||'q'| signed long long |int |8||'Q'| unsigned long long |int |8||'f'| float |float |4||'d'| double |float |8|To use arrays, a library named ‘array’ must be imported
###Code
import array
###Output
_____no_output_____
###Markdown
Creating an array that contains signed integer numbers
###Code
myarray = array.array('i', [1, 2, 4, 8, 16, 32])
myarray[0] #1-st element, 0-th position
import array as arr #import using an alias so the calls dont look so funny
myarray = arr.array('i', [1, 2, 4, 8, 16, 32])
myarray[0] #1-st element, 0-th position
###Output
_____no_output_____
###Markdown
Lists: Can store elements of different data types; like arrays they are (arrays are lists, but lists are not quite arrays!)- Ordered: Elements in a list can be indexed- Mutable: Elements in a list can be altered- Mathematical operations must be applied to each element of the list Tuple - A special listA tuple is a special kind of list where the **values cannot be changed** after the list is created.Such a property is called `immutable`It is useful for list-like things that are static - like days in a week, or months of a year.You declare a tuple like a list, except use round brackets instead of square brackets. MyTupleName = ("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") Tuples are often created as output from packages and functions. Dictionary - A special listA dictionary is a special kind of list where the items are related data `PAIRS`. It is a lot like a relational database (it probably is one in fact) where the first item in the pair is called the key, and must be unique in a dictionary, and the second item in the pair is the data.The second item could itself be a list, so a dictionary would be a meaningful way to build adatabase in Python.To declare a dictionary using `curly` brackets MyPetsNamesAndMass = { "Dusty":7.8 , "Aspen":6.3, "Merrimee":0.03}To declare a dictionary using the `dict()` method MyPetsNamesAndMassToo = dict(Dusty = 7.8 , Aspen = 6.3, Merrimee = 0.03) Dictionary properties- Unordered: Elements in a dictionary cannot be- Mutable elements: Elements in a dictionary can be altered- Immutable keys: Keys in a dictionary cannot be altered Sets - A special listSets: Are used to store elements of different data types- Unordered: Elements in a set cannot be indexed- Mutable: Elements in a set can be altered- Non-repetition: Elements in a set are uniqueElements of a set are enclosed in curly brackets { }- Creating sets that contains different data types- Sets cannot be nested What's the difference between a set and dictionary? From https://stackoverflow.com/questions/34370599/difference-between-dict-and-set-python"Well, a set is like a dict with keys but no values, and they're both implemented using a hash table. But yes, it's a little annoying that the `{}` notation denotes an empty `dict` rather than an empty `set`, but that's a historical artifact." --- Conditional StatementsDecision making via conditional statements is an important step in algorithm design; they control the flow of execution of a program.Conditional statements in Python include:- `if` statement- `if....else` statements- `if....elif....else` statementsConditional statements are logical expressions that evaluate as TRUE or FALSE and usingthese results to perform further operations based on these conditions.All flow control in a program depends on evaluating conditions. The program will proceeddiferently based on the outcome of one or more conditions - really sophisticated AI programs are a collection of conditions and correlations. Expressed in a flowchart a block `if` statement looks like:As psuedo code: if(condition is true): do stuffAmazon knowing what you kind of want is based on correlations of your past behavior compared to other peoples similar, but more recent behavior, and then it uses conditional statements to decide what item to offer you in your recommendation items. It's spooky, but ultimately just a program running in the background trying to make your money theirs. ComparisonThe most common conditional operation is comparison. If we wish to compare whether twovariables are the same we use the == (double equal sign).For example x == y means the program will ask whether x and y have the same value. If they do, the result is TRUE if not then the result is FALSE.Other comparison signs are `!=` does NOT equal, ` `larger than, `=` greater than or equal.There are also three logical operators when we want to build multiple compares(multiple conditioning); these are `and`, `or`, and `not`.The `and` operator returns TRUE if (and only if) **all** conditions are TRUE.For instance `5 == 5 and 5 < 6` will return a TRUE because both conditions are true.The `or` operator returns `TRUE` if at least one condition is true. If **all** conditions are FALSE, then it will return a FALSE. For instance `4 > 3 or 17 > 20 or 3 == 2` will return `TRUE`because the first condition is true.The `not` operator returns `TRUE` if the condition after the `not` keyword is false. Think of itas a way to do a logic reversal. Block `if` statementThe `if` statement is a common flow control statement. It allows the program to evaluate if a certain condition is satisfied and to perform a designed action based on the result of the evaluation. The structure of an `if` statement is if condition1 is met: do A elif condition 2 is met: do b elif condition 3 is met: do c else: do e The `elif` means "else if". The `:` colon is an important part of the structure it tells where the action begins. Also there are no scope delimiters like (), or {} . Instead Python uses indentation to isolate blocks of code. This convention is hugely important - many other coding environments use delimiters (called scoping delimiters), but Python does not. The indentation itself is the scoping delimiter. Inline `if` statementAn inline `if` statement is a simpler form of an `if` statement and is more convenient if youonly need to perform a simple conditional task. The syntax is: do TaskA `if` condition is true `else` do TaskB An example would be myInt = 3 num1 = 12 if myInt == 0 else 13 num1An alternative way is to enclose the condition in brackets for some clarity like myInt = 3 num1 = 12 if (myInt == 0) else 13 num1In either case the result is that `num1` will have the value `13` (unless you set myInt to 0).One can also use `if` to construct extremely inefficient loops. Readings1. Computational and Inferential Thinking Ani Adhikari and John DeNero, Computational and Inferential Thinking, The Foundations of Data Science, Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND) Chapter 4 Subpart 3 https://www.inferentialthinking.com/chapters/04/3/Comparison.html2. Computational and Inferential Thinking Ani Adhikari and John DeNero, Computational and Inferential Thinking, The Foundations of Data Science, Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND) Chapter 4 https://www.inferentialthinking.com/chapters/04/Data_Types.html3. Learn Python the Hard Way (Online Book) (https://learnpythonthehardway.org/book/) Recommended for beginners who want a complete course in programming with Python.4. LearnPython.org (Interactive Tutorial) (https://www.learnpython.org/) Short, interactive tutorial for those who just need a quick way to pick up Python syntax.
###Code
# Script block to identify host, user, and kernel
import sys
! hostname
! whoami
! pwd
print(sys.executable)
print(sys.version)
print(sys.version_info)
###Output
ip-172-26-4-2
compthink
/home/compthink/engr-1330-webroot/1-Lessons/Lesson03/OriginalPowerpoint
/opt/jupyterhub/bin/python3
3.8.5 (default, Jul 28 2020, 12:59:40)
[GCC 9.3.0]
sys.version_info(major=3, minor=8, micro=5, releaselevel='final', serial=0)
|
samples/03_org_administrators/clone_a_group.ipynb | ###Markdown
Clone a Group This sample notebook can be used for cloning one or more groups, either on the same portal or from one portal to another.**Note:** If you want to clone all portal users, groups and content refer to the sample [Clone Portal users, groups and content](clone_portal_users_groups_and_content.ipynb)In this sample, we will clone the [Vector Basemaps](http://www.arcgis.com/home/group.html?id=30de8da907d240a0bccd5ad3ff25ef4a) group from ArcGIS Online to an ArcGIS Enterprise.
###Code
from arcgis.gis import GIS
from IPython.display import display
###Output
_____no_output_____
###Markdown
Define the source and target portalsTo start with, define the source and target portals.
###Code
# an anonymous connection to ArcGIS Online is sufficient,
# since we are cloning a public group
source = GIS()
target = GIS("https://pythonapi.playground.esri.com/portal")
###Output
_____no_output_____
###Markdown
Search for the group and its contents in the source portalIn the source portal, search for the group to be cloned. In our case the title of the group is 'Vector Basemaps'.
###Code
source_groups = source.groups.search("title:Vector Basemaps AND owner:esri", outside_org = True)
source_groups
source_group = source_groups[1]
source_group
###Output
_____no_output_____
###Markdown
List the items that are a part of the group 'Vector Basemaps'.
###Code
source_items = source_group.content()
source_items
###Output
_____no_output_____
###Markdown
Clone the group in the target portal if it does not already exist. We create a new group in the target portal with all the properties of the group in the source portal.
###Code
import tempfile
if not target.groups.search('Vector Basemaps'):
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = source_group.download_thumbnail(temp_dir)
#create a group in the target portal with all the properties of the group in the source
target_group = target.groups.create(title = source_group.title,
tags = source_group.tags,
description = source_group.description,
snippet = source_group.snippet,
access = source_group.access,
thumbnail= thumbnail_file,
is_invitation_only = True,
sort_field = 'avgRating',
sort_order ='asc',
is_view_only=True)
#display the group
display(target_group)
except Exception as e:
print('Group {} could not be created'.format(source_group.title))
print(e)
else:
print('Group {} already exists in the portal'.format(source_group.title))
target_group = target.groups.search('Vector Basemaps')[0]
###Output
_____no_output_____
###Markdown
Clone the contents of the group to the target portalIt is possible that some items to be cloned may already be present on the target portal. In such a situation, we simply share those items with the target group. Thus, in the section below, we renew our list of items to be cloned by removing from it, any item that was existing on the target portal beforehand.
###Code
#making a list for the items to be cloned in the target portal
items_to_be_cloned = list(source_items)
#checking for the presence of the item in the target portal
for item in source_items:
searched_items = target.content.search(query='title:'+item.title, item_type = item.type)
for s_item in searched_items:
if s_item.title == item.title:
#if an item is not a part of the group in the target portal then share it
if s_item not in target_group.content():
s_item.share(groups= [target_group])
#remove the already existing item from the list of items to be cloned
items_to_be_cloned.remove(item)
#display the item
display(s_item)
break
###Output
_____no_output_____
###Markdown
Now after having removed the existing items from the list of items to be cloned, we can easily copy the remaining content of the source group to the newly created group in the target portal.
###Code
#cloning all items that were not present on the portal before
for item in items_to_be_cloned:
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = item.download_thumbnail(temp_dir)
metadata_file = item.download_metadata(temp_dir)
target_item_properties = {'title': item.title,
'tags': item.tags,
'text':item.get_data(True),
'type':item.type,
'url':item.url
}
#create an item
target_item = target.content.add(target_item_properties, thumbnail=thumbnail_file)
#share that item with the group on the target portal
target_item.share(groups=[target_group])
#display the item
display(target_item)
except Exception as e:
print('Item {} could not be created in the target portal'.format(item.title))
print(e)
###Output
_____no_output_____
###Markdown
Clone a Group This sample notebook can be used for cloning one or more groups, either on the same portal or from one portal to another.**Note:** If you want to clone all portal users, groups and content refer to the sample [Clone Portal users, groups and content](./clone_portal_users_groups_and_content.ipynb)In this sample, we will clone the [Vector Basemaps](http://www.arcgis.com/home/group.html?id=30de8da907d240a0bccd5ad3ff25ef4a) group from ArcGIS Online to an ArcGIS Enterprise.
###Code
from arcgis.gis import GIS
from IPython.display import display
###Output
_____no_output_____
###Markdown
Define the source and target portalsTo start with, define the source and target portals.
###Code
# an anonymous connection to ArcGIS Online is sufficient,
# since we are cloning a public group
source = GIS()
target = GIS("https://pythonapi.playground.esri.com/portal")
###Output
_____no_output_____
###Markdown
Search for the group and its contents in the source portalIn the source portal, search for the group to be cloned. In our case the title of the group is 'Vector Basemaps'.
###Code
source_groups = source.groups.search("title:Vector Basemaps AND owner:esri", outside_org = True)
source_groups
source_group = source_groups[1]
source_group
###Output
_____no_output_____
###Markdown
List the items that are a part of the group 'Vector Basemaps'.
###Code
source_items = source_group.content()
source_items
###Output
_____no_output_____
###Markdown
Clone the group in the target portal if it does not already exist. We create a new group in the target portal with all the properties of the group in the source portal.
###Code
import tempfile
if not target.groups.search('Vector Basemaps'):
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = source_group.download_thumbnail(temp_dir)
#create a group in the target portal with all the properties of the group in the source
target_group = target.groups.create(title = source_group.title,
tags = source_group.tags,
description = source_group.description,
snippet = source_group.snippet,
access = source_group.access,
thumbnail= thumbnail_file,
is_invitation_only = True,
sort_field = 'avgRating',
sort_order ='asc',
is_view_only=True)
#display the group
display(target_group)
except Exception as e:
print('Group {} could not be created'.format(source_group.title))
print(e)
else:
print('Group {} already exists in the portal'.format(source_group.title))
target_group = target.groups.search('Vector Basemaps')[0]
###Output
_____no_output_____
###Markdown
Clone the contents of the group to the target portalIt is possible that some items to be cloned may already be present on the target portal. In such a situation, we simply share those items with the target group. Thus, in the section below, we renew our list of items to be cloned by removing from it, any item that was existing on the target portal beforehand.
###Code
#making a list for the items to be cloned in the target portal
items_to_be_cloned = list(source_items)
#checking for the presence of the item in the target portal
for item in source_items:
searched_items = target.content.search(query='title:'+item.title, item_type = item.type)
for s_item in searched_items:
if s_item.title == item.title:
#if an item is not a part of the group in the target portal then share it
if s_item not in target_group.content():
s_item.share(groups= [target_group])
#remove the already existing item from the list of items to be cloned
items_to_be_cloned.remove(item)
#display the item
display(s_item)
break
###Output
_____no_output_____
###Markdown
Now after having removed the existing items from the list of items to be cloned, we can easily copy the remaining content of the source group to the newly created group in the target portal.
###Code
#cloning all items that were not present on the portal before
for item in items_to_be_cloned:
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = item.download_thumbnail(temp_dir)
metadata_file = item.download_metadata(temp_dir)
target_item_properties = {'title': item.title,
'tags': item.tags,
'text':item.get_data(True),
'type':item.type,
'url':item.url
}
#create an item
target_item = target.content.add(target_item_properties, thumbnail=thumbnail_file)
#share that item with the group on the target portal
target_item.share(groups=[target_group])
#display the item
display(target_item)
except Exception as e:
print('Item {} could not be created in the target portal'.format(item.title))
print(e)
###Output
_____no_output_____
###Markdown
Clone a Group This sample notebook can be used for cloning one or more groups, either on the same portal or from one portal to another.**Note:** If you want to clone all portal users, groups and content refer to the sample [Clone Portal users, groups and content](./clone_portal_users_groups_and_content.ipynb)In this sample, we will clone the [Vector Basemaps](http://www.arcgis.com/home/group.html?id=30de8da907d240a0bccd5ad3ff25ef4a) group from ArcGIS Online to an ArcGIS Enterprise.
###Code
from arcgis.gis import GIS
from IPython.display import display
###Output
_____no_output_____
###Markdown
Define the source and target portalsTo start with, define the source and target portals.
###Code
# an anonymous connection to ArcGIS Online is sufficient,
# since we are cloning a public group
source = GIS()
target = GIS("https://pythonapi.playground.esri.com/portal", "arcgis_python", "amazing_arcgis_123")
###Output
_____no_output_____
###Markdown
Search for the group and its contents in the source portalIn the source portal, search for the group to be cloned. In our case the title of the group is 'Vector Basemaps'.
###Code
source_groups = source.groups.search("title:Vector Basemaps AND owner:esri", outside_org = True)
source_groups
source_group = source_groups[1]
source_group
###Output
_____no_output_____
###Markdown
List the items that are a part of the group 'Vector Basemaps'.
###Code
source_items = source_group.content()
source_items
###Output
_____no_output_____
###Markdown
Clone the group in the target portal if it does not already exist. We create a new group in the target portal with all the properties of the group in the source portal.
###Code
import tempfile
if not target.groups.search('Vector Basemaps'):
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = source_group.download_thumbnail(temp_dir)
#create a group in the target portal with all the properties of the group in the source
target_group = target.groups.create(title = source_group.title,
tags = source_group.tags,
description = source_group.description,
snippet = source_group.snippet,
access = source_group.access,
thumbnail= thumbnail_file,
is_invitation_only = True,
sort_field = 'avgRating',
sort_order ='asc',
is_view_only=True)
#display the group
display(target_group)
except Exception as e:
print('Group {} could not be created'.format(source_group.title))
print(e)
else:
print('Group {} already exists in the portal'.format(source_group.title))
target_group = target.groups.search('Vector Basemaps')[0]
###Output
_____no_output_____
###Markdown
Clone the contents of the group to the target portalIt is possible that some items to be cloned may already be present on the target portal. In such a situation, we simply share those items with the target group. Thus, in the section below, we renew our list of items to be cloned by removing from it, any item that was existing on the target portal beforehand.
###Code
#making a list for the items to be cloned in the target portal
items_to_be_cloned = list(source_items)
#checking for the presence of the item in the target portal
for item in source_items:
searched_items = target.content.search(query='title:'+item.title, item_type = item.type)
for s_item in searched_items:
if s_item.title == item.title:
#if an item is not a part of the group in the target portal then share it
if s_item not in target_group.content():
s_item.share(groups= [target_group])
#remove the already existing item from the list of items to be cloned
items_to_be_cloned.remove(item)
#display the item
display(s_item)
break
###Output
_____no_output_____
###Markdown
Now after having removed the existing items from the list of items to be cloned, we can easily copy the remaining content of the source group to the newly created group in the target portal.
###Code
#cloning all items that were not present on the portal before
for item in items_to_be_cloned:
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = item.download_thumbnail(temp_dir)
metadata_file = item.download_metadata(temp_dir)
target_item_properties = {'title': item.title,
'tags': item.tags,
'text':item.get_data(True),
'type':item.type,
'url':item.url
}
#create an item
target_item = target.content.add(target_item_properties, thumbnail=thumbnail_file)
#share that item with the group on the target portal
target_item.share(groups=[target_group])
#display the item
display(target_item)
except Exception as e:
print('Item {} could not be created in the target portal'.format(item.title))
print(e)
###Output
_____no_output_____
###Markdown
Clone a Group This sample notebook can be used for cloning one or more groups, either on the same portal or from one portal to another.**Note:** If you want to clone all portal users, groups and content refer to the sample [Clone Portal users, groups and content](python/sample-notebooks/clone-portal-users-groups-and-content/)In this sample, we will clone the [Vector Basemaps](http://www.arcgis.com/home/group.html?id=30de8da907d240a0bccd5ad3ff25ef4a) group from ArcGIS Online to an ArcGIS Enterprise.
###Code
from arcgis.gis import GIS
from IPython.display import display
###Output
_____no_output_____
###Markdown
Define the source and target portalsTo start with, define the source and target portals.
###Code
# an anonymous connection to ArcGIS Online is sufficient,
# since we are cloning a public group
source = GIS()
target = GIS("https://python.playground.esri.com/portal", "arcgis_python", "amazing_arcgis_123")
###Output
_____no_output_____
###Markdown
Search for the group and its contents in the source portalIn the source portal, search for the group to be cloned. In our case the title of the group is 'Vector Basemaps'.
###Code
source_group = source.groups.search("title:Vector Basemaps AND owner:esri", outside_org = True)[0]
source_group
###Output
_____no_output_____
###Markdown
List the items that are a part of the group 'Vector Basemaps'.
###Code
source_items = source_group.content()
source_items
###Output
_____no_output_____
###Markdown
Clone the group in the target portal if it does not already exist. We create a new group in the target portal with all the properties of the group in the source portal.
###Code
import tempfile
if not target.groups.search('Vector Basemaps'):
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = source_group.download_thumbnail(temp_dir)
#create a group in the target portal with all the properties of the group in the source
target_group = target.groups.create(title = source_group.title,
tags = source_group.tags,
description = source_group.description,
snippet = source_group.snippet,
access = source_group.access,
thumbnail= thumbnail_file,
is_invitation_only = True,
sort_field = 'avgRating',
sort_order ='asc',
is_view_only=True)
#display the group
display(target_group)
except Exception as e:
print('Group {} could not be created'.format(source_group.title))
print(e)
else:
print('Group {} already exists in the portal'.format(source_group.title))
target_group = target.groups.search('Vector Basemaps')[0]
###Output
_____no_output_____
###Markdown
Clone the contents of the group to the target portalIt is possible that some items to be cloned may already be present on the target portal. In such a situation, we simply share those items with the target group. Thus, in the section below, we renew our list of items to be cloned by removing from it, any item that was existing on the target portal beforehand.
###Code
#making a list for the items to be cloned in the target portal
items_to_be_cloned = list(source_items)
#checking for the presence of the item in the target portal
for item in source_items:
searched_items = target.content.search(query='title:'+item.title, item_type = item.type)
for s_item in searched_items:
if s_item.title == item.title:
#if an item is not a part of the group in the target portal then share it
if s_item not in target_group.content():
s_item.share(groups= [target_group])
#remove the already existing item from the list of items to be cloned
items_to_be_cloned.remove(item)
#display the item
display(s_item)
break
###Output
_____no_output_____
###Markdown
Now after having removed the existing items from the list of items to be cloned, we can easily copy the remaining content of the source group to the newly created group in the target portal.
###Code
#cloning all items that were not present on the portal before
for item in items_to_be_cloned:
try:
with tempfile.TemporaryDirectory() as temp_dir:
thumbnail_file = item.download_thumbnail(temp_dir)
metadata_file = item.download_metadata(temp_dir)
target_item_properties = {'title': item.title,
'tags': item.tags,
'text':item.get_data(True),
'type':item.type,
'url':item.url
}
#create an item
target_item = target.content.add(target_item_properties, thumbnail=thumbnail_file)
#share that item with the group on the target portal
target_item.share(groups=[target_group])
#display the item
display(target_item)
except Exception as e:
print('Item {} could not be created in the target portal'.format(item.title))
print(e)
###Output
_____no_output_____ |
tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Deploy as web serviceDeploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
import uuid
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4]
service = Model.deploy(workspace=ws,
name=service_name,
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test the model Download test dataDownload the test data to the **./data/** directory
###Code
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
import glob
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0
y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
###Code
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Show predictionsTest the deployed model with a random sample of 30 images from the test data. 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
###Output
Warning: Falling back to use azure cli login credentials.
If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.
Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
Azure ML SDK Version: 1.26.0
###Markdown
Deploy as web serviceDeploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
Writing score.py
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
import uuid
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4]
service = Model.deploy(workspace=ws,
name=service_name,
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
Tips: You can try get_logs(): https://aka.ms/debugimage#dockerlog or local deployment: https://aka.ms/debugimage#debug-locally to debug if deployment takes longer than 10 minutes.
Running
2021-04-11 20:43:57+05:30 Creating Container Registry if not exists.
2021-04-11 20:43:58+05:30 Registering the environment.
2021-04-11 20:43:59+05:30 Building image..
2021-04-11 20:50:23+05:30 Generating deployment configuration.
2021-04-11 20:50:25+05:30 Submitting deployment to compute..
2021-04-11 20:50:33+05:30 Checking the status of deployment sklearn-mnist-svc-6bf6..
2021-04-11 20:53:27+05:30 Checking the status of inference endpoint sklearn-mnist-svc-6bf6.
Succeeded
ACI service creation operation finished, operation "Succeeded"
Wall time: 9min 58s
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
http://d18dd3e9-aa56-4268-8733-73fec7f0c6c0.eastus.azurecontainer.io/score
###Markdown
Test the model Download test dataDownload the test data to the **./data/** directory
###Code
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
import glob
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0
y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
###Code
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
[[ 960 0 2 2 1 4 6 3 1 1]
[ 0 1113 3 1 0 1 5 1 11 0]
[ 9 8 919 20 9 5 10 12 37 3]
[ 4 0 17 918 2 24 4 11 21 9]
[ 1 4 4 3 913 0 10 3 5 39]
[ 10 2 0 42 11 768 17 7 28 7]
[ 9 3 7 2 6 20 907 1 3 0]
[ 2 9 22 5 8 1 1 948 5 27]
[ 10 15 5 21 15 26 7 11 852 12]
[ 7 8 2 14 32 13 0 26 12 895]]
Overall accuracy: 0.9193
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Show predictionsTest the deployed model with a random sample of 30 images from the test data. 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
input_data
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Deploy as web serviceDeploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
import uuid
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4]
service = Model.deploy(workspace=ws,
name=service_name,
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test the model Download test dataDownload the test data to the **./data/** directory
###Code
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
import glob
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0
y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
###Code
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Show predictionsTest the deployed model with a random sample of 30 images from the test data. 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Retrieve the modelYou registered a model in your workspace in the previous tutorial. Now, load this workspace and download the model to your local directory.
###Code
from azureml.core import Workspace
from azureml.core.model import Model
import os
ws = Workspace.from_config()
model=Model(ws, 'sklearn_mnist')
model.download(target_dir=os.getcwd(), exist_ok=True)
# verify the downloaded model file
file_path = os.path.join(os.getcwd(), "sklearn_mnist_model.pkl")
os.stat(file_path)
###Output
_____no_output_____
###Markdown
Test model locallyBefore deploying, make sure your model is working locally by:* Downloading the test data if you haven't already* Loading test data* Predicting test data* Examining the confusion matrix Download test dataIf you haven't already, download the test data to the **./data/** directory
###Code
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / 255.0
y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.
###Code
import pickle
import joblib
clf = joblib.load( os.path.join(os.getcwd(), 'sklearn_mnist_model.pkl'))
y_hat = clf.predict(X_test)
print(y_hat)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Deploy as web serviceOnce you've tested the model and are satisfied with the results, deploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* An environment file to show what packages need to be installed* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create environment fileNext, create an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs `scikit-learn` and `azureml-sdk`.
###Code
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies()
myenv.add_conda_package("scikit-learn==0.22.1")
myenv.add_pip_package("azureml-defaults")
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Review the content of the `myenv.yml` file.
###Code
with open("myenv.yml","r") as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test deployed serviceEarlier you scored all the test data with the local version of the model. Now, you can test the deployed model with a random sample of 30 images from the test data. The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
Azure ML SDK Version: 1.1.5
###Markdown
Retrieve the modelYou registered a model in your workspace in the previous tutorial. Now, load this workspace and download the model to your local directory.
###Code
from azureml.core import Workspace
from azureml.core.model import Model
import os
from azureml.core.authentication import InteractiveLoginAuthentication
interactive_auth = InteractiveLoginAuthentication(tenant_id="ac5c5e7c-0141-491a-a5dd-d3608633ce62")
ws = Workspace.from_config()
model=Model(ws, 'sklearn_mnist')
model.download(target_dir=os.getcwd(), exist_ok=True)
# verify the downloaded model file
file_path = os.path.join(os.getcwd(), "sklearn_mnist_model.pkl")
os.stat(file_path)
###Output
WARNING - Warning: Falling back to use azure cli login credentials.
If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.
Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.
###Markdown
Test model locallyBefore deploying, make sure your model is working locally by:* Downloading the test data if you haven't already* Loading test data* Predicting test data* Examining the confusion matrix Download test dataIf you haven't already, download the test data to the **./data/** directory
###Code
# download test data
import os
import urllib.request
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok = True)
urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz'))
urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz'))
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.
###Code
import pickle
import joblib
clf = joblib.load( os.path.join(os.getcwd(), 'sklearn_mnist_model.pkl'))
y_hat = clf.predict(X_test)
print(y_hat)
###Output
[7 2 1 ... 4 5 6]
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
[[ 960 0 2 2 1 4 6 3 1 1]
[ 0 1113 3 1 0 1 5 1 11 0]
[ 9 8 919 20 9 5 10 12 37 3]
[ 4 0 17 918 2 24 4 11 21 9]
[ 1 4 4 3 913 0 10 3 5 39]
[ 10 2 0 42 11 768 17 7 28 7]
[ 9 3 7 2 6 20 907 1 3 0]
[ 2 9 22 5 8 1 1 948 5 27]
[ 10 15 5 21 15 26 7 11 852 12]
[ 7 8 2 14 32 13 0 26 12 895]]
Overall accuracy: 0.9193
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Deploy as web serviceOnce you've tested the model and are satisfied with the results, deploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* An environment file to show what packages need to be installed* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
Writing score.py
###Markdown
Create environment fileNext, create an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs `scikit-learn` and `azureml-sdk`.
###Code
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies()
myenv.add_pip_package("scikit-learn==0.22.1")
myenv.add_pip_package("azureml-defaults")
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Review the content of the `myenv.yml` file.
###Code
with open("myenv.yml","r") as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 7-8 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Deploy as web serviceDeploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test the model Download test dataDownload the test data to the **./data/** directory
###Code
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
import glob
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0
y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
###Code
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Show predictionsTest the deployed model with a random sample of 30 images from the test data. 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Deploy as web serviceDeploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
import uuid
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4]
service = Model.deploy(workspace=ws,
name=service_name,
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test the model Download test dataDownload the test data to the **./data/** directory
###Code
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
import glob
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0
y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
###Code
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Show predictionsTest the deployed model with a random sample of 30 images from the test data. 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Retrieve the modelYou registered a model in your workspace in the previous tutorial. Now, load this workspace and download the model to your local directory.
###Code
from azureml.core import Workspace
from azureml.core.model import Model
import os
ws = Workspace.from_config()
model=Model(ws, 'sklearn_mnist')
model.download(target_dir=os.getcwd(), exist_ok=True)
# verify the downloaded model file
file_path = os.path.join(os.getcwd(), "sklearn_mnist_model.pkl")
os.stat(file_path)
###Output
_____no_output_____
###Markdown
Test model locallyBefore deploying, make sure your model is working locally by:* Downloading the test data if you haven't already* Loading test data* Predicting test data* Examining the confusion matrix Download test dataIf you haven't already, download the test data to the **./data/** directory
###Code
# download test data
import os
import urllib.request
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok = True)
urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz'))
urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz'))
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.
###Code
import pickle
import joblib
clf = joblib.load( os.path.join(os.getcwd(), 'sklearn_mnist_model.pkl'))
y_hat = clf.predict(X_test)
print(y_hat)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Deploy as web serviceOnce you've tested the model and are satisfied with the results, deploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* An environment file to show what packages need to be installed* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create environment fileNext, create an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs `scikit-learn` and `azureml-sdk`.
###Code
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies()
myenv.add_pip_package("scikit-learn==0.22.1")
myenv.add_pip_package("azureml-defaults")
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Review the content of the `myenv.yml` file.
###Code
with open("myenv.yml","r") as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 7-8 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test deployed serviceEarlier you scored all the test data with the local version of the model. Now, you can test the deployed model with a random sample of 30 images from the test data. The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
http://7905c1f8-553b-4abd-822f-bfbefd9b145f.eastus.azurecontainer.io/score
###Markdown
Test deployed serviceEarlier you scored all the test data with the local version of the model. Now, you can test the deployed model with a random sample of 30 images from the test data. The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
#headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
#resp = requests.post(service.scoring_uri, input_data, headers=headers)
#print("POST to url", service.scoring_uri)
print("input data:", input_data)
#print("label:", y_test[random_index])
#print("prediction:", resp.text)
###Output
input data: {"data": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3176470588235294, 0.5803921568627451, 0.996078431372549, 1.0, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.996078431372549, 0.8431372549019608, 0.10588235294117647, 0.00392156862745098, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6078431372549019, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.4235294117647059, 0.03137254901960784, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.33725490196078434, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.8235294117647058, 0.054901960784313725, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12549019607843137, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9215686274509803, 0.8941176470588236, 0.9450980392156862, 0.9725490196078431, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.3803921568627451, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.050980392156862744, 0.6941176470588235, 0.8705882352941177, 0.5372549019607843, 0.38823529411764707, 0.10588235294117647, 0.0, 0.19215686274509805, 0.803921568627451, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.8313725490196079, 0.10196078431372549, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.25882352941176473, 0.9254901960784314, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.24705882352941178, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08627450980392157, 0.0, 0.0, 0.0392156862745098, 0.29411764705882354, 0.6549019607843137, 0.9215686274509803, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9215686274509803, 0.058823529411764705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5843137254901961, 0.6823529411764706, 0.6823529411764706, 0.7490196078431373, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.8196078431372549, 0.23137254901960785, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6313725490196078, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.7333333333333333, 0.03529411764705882, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6313725490196078, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.7176470588235294, 0.03137254901960784, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5019607843137255, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9254901960784314, 0.3568627450980392, 0.3843137254901961, 0.9411764705882353, 0.9921568627450981, 0.9921568627450981, 0.49019607843137253, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03529411764705882, 0.4549019607843137, 0.4549019607843137, 0.21568627450980393, 0.16470588235294117, 0.0, 0.0, 0.4235294117647059, 0.9921568627450981, 0.9921568627450981, 0.9098039215686274, 0.07450980392156863, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.023529411764705882, 0.28627450980392155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09411764705882353, 0.5294117647058824, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.25098039215686274, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.24313725490196078, 0.9764705882352941, 0.49019607843137253, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4627450980392157, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.17647058823529413, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5607843137254902, 0.9921568627450981, 0.9764705882352941, 0.30980392156862746, 0.0, 0.0, 0.08235294117647059, 0.3686274509803922, 0.7333333333333333, 0.9607843137254902, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.09411764705882353, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.043137254901960784, 0.9411764705882353, 0.9921568627450981, 0.9921568627450981, 0.9176470588235294, 0.8745098039215686, 0.8745098039215686, 0.9019607843137255, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.8392156862745098, 0.058823529411764705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.050980392156862744, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.4627450980392157, 0.058823529411764705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.027450980392156862, 0.8, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.4627450980392157, 0.011764705882352941, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.33725490196078434, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.9921568627450981, 0.49019607843137253, 0.047058823529411764, 0.00784313725490196, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0392156862745098, 0.5058823529411764, 0.803921568627451, 0.9921568627450981, 0.9921568627450981, 0.6588235294117647, 0.5058823529411764, 0.14901960784313725, 0.0196078431372549, 0.00784313725490196, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]}
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Deploy as web serviceDeploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test the model Download test dataDownload the test data to the **./data/** directory
###Code
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / 255.0
y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
###Code
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Show predictionsTest the deployed model with a random sample of 30 images from the test data. 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Tutorial 2: Deploy an image classification model in Azure Container Instance (ACI)This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to:> * Set up your testing environment> * Retrieve the model from your workspace> * Test the model locally> * Deploy the model to ACI> * Test the deployed modelACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). PrerequisitesComplete the model training in the [Tutorial 1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
###Code
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
###Output
_____no_output_____
###Markdown
Set up the environmentStart by setting up a testing environment. Import packagesImport the Python packages needed for this tutorial.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Deploy as web serviceDeploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following:* A scoring script to show how to use the model* A configuration file to build the ACI* The model you trained before Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create configuration fileCreate a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
###Code
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
###Output
_____no_output_____
###Markdown
Deploy in ACIEstimated time to complete: **about 2-5 minutes**Configure the image and deploy. The following code goes through these steps:1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score.py`) * envrionment object created in previous step1. Deploy the model to the ACI container.1. Get the web service HTTP endpoint.
###Code
%%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
###Output
_____no_output_____
###Markdown
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test the model Download test dataDownload the test data to the **./data/** directory
###Code
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
###Output
_____no_output_____
###Markdown
Load test dataLoad the test data from the **./data/** directory created during the training tutorial.
###Code
from utils import load_data
import os
import glob
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0
y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1)
###Output
_____no_output_____
###Markdown
Predict test dataFeed the test dataset to the model to get predictions.The following code goes through these steps:1. Send the data as a JSON array to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
###Code
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
###Output
_____no_output_____
###Markdown
Examine the confusion matrixGenerate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
###Code
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
###Output
_____no_output_____
###Markdown
Show predictionsTest the deployed model with a random sample of 30 images from the test data. 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples. Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
You can also send raw HTTP request to test the web service.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Clean up resourcesTo keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
###Code
service.delete()
###Output
_____no_output_____ |
example/Predict.ipynb | ###Markdown
Predict使用神经网络预测波士顿房价
###Code
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from tqdm import tqdm
from easytorch.layer import Linear, ReLU, Tanh, Sequential
from easytorch.optim import SGD
from easytorch.tensor import Tensor
import easytorch.functional as F
###Output
_____no_output_____
###Markdown
1. 加载数据
###Code
dataset = load_boston()
data_x = dataset.data
data_y = dataset.target
data_name = dataset.feature_names
data_x = (data_x - data_x.mean(axis=0)) / (data_x.std(axis=0) + 1e-6)
data_x.shape, data_y.shape
train_x = Tensor(data_x)
train_y = Tensor(data_y)
###Output
_____no_output_____
###Markdown
2. 搭建模型及训练
###Code
model = Sequential(
Linear(13, 10),
ReLU(),
Linear(10, 1)
)
opt = SGD(model.parameters(), lr=3e-4)
loss_fn = F.l1_loss
loss_list = []
for _ in tqdm(range(500)):
sum_loss = 0
for x, y in zip(train_x, train_y):
pred = model(x)
loss = loss_fn(pred, y.reshape(1, 1))
sum_loss += loss.data
opt.zero_grad()
loss.backward()
opt.step()
loss_list.append(sum_loss / len(train_x))
###Output
100%|██████████| 500/500 [00:44<00:00, 11.21it/s]
###Markdown
3. 结果
###Code
plt.plot(loss_list)
plt.show()
pred = model(train_x)
loss = loss_fn(pred, train_y.reshape(-1, 1)).mean()
loss
###Output
_____no_output_____ |
digits_GAN.ipynb | ###Markdown
Digits generator
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Reshape, Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D, Conv2DTranspose, Conv2D
from keras.optimizers import SGD, RMSprop
###Output
Using TensorFlow backend.
###Markdown
Data
###Code
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
###Output
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
###Markdown
Visualisation
###Code
def reshape_image(pixels):
npix = int(np.sqrt(pixels.shape[0]))
return np.reshape(pixels, (npix, npix))
def draw_image(image):
image = np.reshape(image, (image.shape[0], image.shape[1]))
plt.imshow(image, cmap = plt.get_cmap('gray'))
plt.axis('off')
plt.show()
# draw_image(reshape_image(mnist.train.images[0]))
X_train = mnist.train.images.reshape(-1,
28,
28,
1).astype(np.float32)
y_train = mnist.train.labels
draw_image(X_train[10]), y_train[10]
X_train.shape
###Output
_____no_output_____
###Markdown
GAN example Generator
###Code
gen_dropout = 0.1
gen_input_dim = 7
gen_depth = 256
gen_noise_dim = 100
gennet = Sequential()
# Input
# fully-connected layer
gennet.add(Dense(gen_input_dim * gen_input_dim * gen_depth,
input_dim=gen_noise_dim))
gennet.add(BatchNormalization(momentum=0.9))
gennet.add(Activation('relu'))
gennet.add(Reshape((gen_input_dim, gen_input_dim, gen_depth)))
gennet.add(Dropout(gen_dropout))
# Deconvolution layers
gennet.add(UpSampling2D())
gennet.add(Conv2DTranspose(int(gen_depth / 2), 5, padding='same'))
gennet.add(BatchNormalization(momentum=0.9))
gennet.add(Activation('relu'))
gennet.add(UpSampling2D())
gennet.add(Conv2DTranspose(int(gen_depth / 4), 5, padding='same'))
gennet.add(BatchNormalization(momentum=0.9))
gennet.add(Activation('relu'))
gennet.add(Conv2DTranspose(int(gen_depth / 8), 5, padding='same'))
gennet.add(BatchNormalization(momentum=0.9))
gennet.add(Activation('relu'))
# Output
gennet.add(Conv2DTranspose(1, 5, padding='same'))
gennet.add(Activation('sigmoid'))
gennet.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_1 (Dense) (None, 12544) 1266944
_________________________________________________________________
batch_normalization_1 (Batch (None, 12544) 50176
_________________________________________________________________
activation_1 (Activation) (None, 12544) 0
_________________________________________________________________
reshape_1 (Reshape) (None, 7, 7, 256) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 7, 7, 256) 0
_________________________________________________________________
up_sampling2d_1 (UpSampling2 (None, 14, 14, 256) 0
_________________________________________________________________
conv2d_transpose_1 (Conv2DTr (None, 14, 14, 128) 819328
_________________________________________________________________
batch_normalization_2 (Batch (None, 14, 14, 128) 512
_________________________________________________________________
activation_2 (Activation) (None, 14, 14, 128) 0
_________________________________________________________________
up_sampling2d_2 (UpSampling2 (None, 28, 28, 128) 0
_________________________________________________________________
conv2d_transpose_2 (Conv2DTr (None, 28, 28, 64) 204864
_________________________________________________________________
batch_normalization_3 (Batch (None, 28, 28, 64) 256
_________________________________________________________________
activation_3 (Activation) (None, 28, 28, 64) 0
_________________________________________________________________
conv2d_transpose_3 (Conv2DTr (None, 28, 28, 32) 51232
_________________________________________________________________
batch_normalization_4 (Batch (None, 28, 28, 32) 128
_________________________________________________________________
activation_4 (Activation) (None, 28, 28, 32) 0
_________________________________________________________________
conv2d_transpose_4 (Conv2DTr (None, 28, 28, 1) 801
_________________________________________________________________
activation_5 (Activation) (None, 28, 28, 1) 0
=================================================================
Total params: 2,394,241
Trainable params: 2,368,705
Non-trainable params: 25,536
_________________________________________________________________
###Markdown
Discriminator
###Code
disc_depth = 64
disc_dropout = 0.1
discnet = Sequential()
# Input
discnet.add(Conv2D(disc_depth, 5, strides=2, input_shape=(28, 28, 1),
padding='same'))
discnet.add(Activation('relu'))
discnet.add(Dropout(disc_dropout))
discnet.add(Conv2D(disc_depth*2, 5, strides=2, padding='same'))
discnet.add(Activation('relu'))
discnet.add(Dropout(disc_dropout))
discnet.add(Conv2D(disc_depth*4, 5, strides=2, padding='same'))
discnet.add(Activation('relu'))
discnet.add(Dropout(disc_dropout))
discnet.add(Conv2D(disc_depth*8, 5, strides=2, padding='same'))
discnet.add(Activation('relu'))
discnet.add(Dropout(disc_dropout))
# Out: scalar estimation of probability
discnet.add(Flatten())
discnet.add(Dense(1))
discnet.add(Activation('sigmoid'))
discnet.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 14, 14, 64) 1664
_________________________________________________________________
activation_6 (Activation) (None, 14, 14, 64) 0
_________________________________________________________________
dropout_2 (Dropout) (None, 14, 14, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 7, 7, 128) 204928
_________________________________________________________________
activation_7 (Activation) (None, 7, 7, 128) 0
_________________________________________________________________
dropout_3 (Dropout) (None, 7, 7, 128) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 4, 4, 256) 819456
_________________________________________________________________
activation_8 (Activation) (None, 4, 4, 256) 0
_________________________________________________________________
dropout_4 (Dropout) (None, 4, 4, 256) 0
_________________________________________________________________
conv2d_4 (Conv2D) (None, 2, 2, 512) 3277312
_________________________________________________________________
activation_9 (Activation) (None, 2, 2, 512) 0
_________________________________________________________________
dropout_5 (Dropout) (None, 2, 2, 512) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 2048) 0
_________________________________________________________________
dense_2 (Dense) (None, 1) 2049
_________________________________________________________________
activation_10 (Activation) (None, 1) 0
=================================================================
Total params: 4,305,409
Trainable params: 4,305,409
Non-trainable params: 0
_________________________________________________________________
###Markdown
Discriminator model
###Code
discmodel = Sequential()
discmodel.add(discnet)
discmodel.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.0002, clipvalue=1.0, decay=6e-8),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Adversarial model
###Code
advmodel = Sequential()
advmodel.add(gennet)
advmodel.add(discnet)
advmodel.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.0001, clipvalue=1.0, decay=3e-8),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Training
###Code
batch_size = 256
train_iters = 5000
disc_losses = {"loss": [], "acc": []}
adv_losses = {"loss": [], "acc": []}
log_i = 0
for i in range(train_iters):
# Get random real images
images_true = X_train[np.random.randint(0,
X_train.shape[0],
size=batch_size)]
# Generate images from noise
noise = np.random.uniform(-1.0, 1.0,
size=[batch_size, gen_noise_dim])
images_fake = gennet.predict(noise)
# Compose training data for discriminator
x = np.concatenate((images_true, images_fake))
y = np.concatenate((np.ones((batch_size, 1)),
np.zeros((batch_size, 1))))
# Train discriminator on composed data
dics_loss = discmodel.train_on_batch(x, y)
disc_losses["loss"].append(dics_loss[0])
disc_losses["acc"].append(dics_loss[1])
# Compose trainig data for adversarial net
noise = np.random.uniform(-1.0, 1.0,
size=[batch_size, gen_noise_dim])
y = np.ones([batch_size, 1])
# Train adversarial net on composed data
adv_loss = advmodel.train_on_batch(noise, y)
adv_losses["loss"].append(adv_loss[0])
adv_losses["acc"].append(adv_loss[1])
# Print results
if i % 100 == 0:
print("{:4d}th batch of {:4d}|".format(i, train_iters) + \
"Disc loss {:.3f} |Disc acc {:.3f} |Adv loss {:.3f} |Adv acc {:.3}".format(dics_loss[0],
dics_loss[1],
adv_loss[0],
adv_loss[1]))
###Output
0th batch of 5000|Disc loss 0.667 |Disc acc 0.588 |Adv loss 0.701 |Adv acc 0.488
100th batch of 5000|Disc loss 0.667 |Disc acc 0.547 |Adv loss 0.662 |Adv acc 0.66
200th batch of 5000|Disc loss 0.673 |Disc acc 0.520 |Adv loss 1.251 |Adv acc 0.0
300th batch of 5000|Disc loss 0.664 |Disc acc 0.545 |Adv loss 1.360 |Adv acc 0.00391
400th batch of 5000|Disc loss 0.656 |Disc acc 0.586 |Adv loss 1.661 |Adv acc 0.0
500th batch of 5000|Disc loss 0.632 |Disc acc 0.619 |Adv loss 1.484 |Adv acc 0.00391
600th batch of 5000|Disc loss 0.705 |Disc acc 0.557 |Adv loss 1.293 |Adv acc 0.00781
700th batch of 5000|Disc loss 0.619 |Disc acc 0.676 |Adv loss 0.985 |Adv acc 0.156
800th batch of 5000|Disc loss 0.607 |Disc acc 0.688 |Adv loss 0.815 |Adv acc 0.391
900th batch of 5000|Disc loss 0.627 |Disc acc 0.643 |Adv loss 0.650 |Adv acc 0.633
1000th batch of 5000|Disc loss 0.648 |Disc acc 0.629 |Adv loss 0.979 |Adv acc 0.199
1100th batch of 5000|Disc loss 0.646 |Disc acc 0.615 |Adv loss 0.644 |Adv acc 0.664
1200th batch of 5000|Disc loss 0.614 |Disc acc 0.674 |Adv loss 0.801 |Adv acc 0.367
1300th batch of 5000|Disc loss 0.617 |Disc acc 0.666 |Adv loss 0.874 |Adv acc 0.289
1400th batch of 5000|Disc loss 0.616 |Disc acc 0.695 |Adv loss 0.955 |Adv acc 0.215
1500th batch of 5000|Disc loss 0.723 |Disc acc 0.566 |Adv loss 1.917 |Adv acc 0.00391
1600th batch of 5000|Disc loss 0.658 |Disc acc 0.598 |Adv loss 1.047 |Adv acc 0.113
1700th batch of 5000|Disc loss 0.640 |Disc acc 0.615 |Adv loss 0.784 |Adv acc 0.414
1800th batch of 5000|Disc loss 0.637 |Disc acc 0.650 |Adv loss 0.663 |Adv acc 0.574
1900th batch of 5000|Disc loss 0.627 |Disc acc 0.660 |Adv loss 0.801 |Adv acc 0.348
2000th batch of 5000|Disc loss 0.641 |Disc acc 0.609 |Adv loss 0.652 |Adv acc 0.602
2100th batch of 5000|Disc loss 0.731 |Disc acc 0.527 |Adv loss 1.460 |Adv acc 0.0
2200th batch of 5000|Disc loss 0.701 |Disc acc 0.572 |Adv loss 1.162 |Adv acc 0.0352
2300th batch of 5000|Disc loss 0.693 |Disc acc 0.594 |Adv loss 1.272 |Adv acc 0.0312
2400th batch of 5000|Disc loss 0.746 |Disc acc 0.545 |Adv loss 1.356 |Adv acc 0.0156
2500th batch of 5000|Disc loss 0.697 |Disc acc 0.559 |Adv loss 1.411 |Adv acc 0.00391
2600th batch of 5000|Disc loss 0.710 |Disc acc 0.541 |Adv loss 1.246 |Adv acc 0.0234
2700th batch of 5000|Disc loss 0.706 |Disc acc 0.545 |Adv loss 1.299 |Adv acc 0.0273
2800th batch of 5000|Disc loss 0.732 |Disc acc 0.518 |Adv loss 1.295 |Adv acc 0.0195
2900th batch of 5000|Disc loss 0.705 |Disc acc 0.531 |Adv loss 1.091 |Adv acc 0.0469
3000th batch of 5000|Disc loss 0.715 |Disc acc 0.525 |Adv loss 1.188 |Adv acc 0.0234
3100th batch of 5000|Disc loss 0.730 |Disc acc 0.537 |Adv loss 1.153 |Adv acc 0.0234
3200th batch of 5000|Disc loss 0.729 |Disc acc 0.518 |Adv loss 1.346 |Adv acc 0.0
3300th batch of 5000|Disc loss 0.720 |Disc acc 0.557 |Adv loss 1.104 |Adv acc 0.0352
3400th batch of 5000|Disc loss 0.740 |Disc acc 0.516 |Adv loss 1.330 |Adv acc 0.0
3500th batch of 5000|Disc loss 0.713 |Disc acc 0.561 |Adv loss 1.235 |Adv acc 0.0117
3600th batch of 5000|Disc loss 0.739 |Disc acc 0.531 |Adv loss 1.183 |Adv acc 0.00781
3700th batch of 5000|Disc loss 0.709 |Disc acc 0.527 |Adv loss 0.485 |Adv acc 0.898
3800th batch of 5000|Disc loss 0.663 |Disc acc 0.609 |Adv loss 0.718 |Adv acc 0.453
3900th batch of 5000|Disc loss 0.658 |Disc acc 0.600 |Adv loss 0.610 |Adv acc 0.652
4000th batch of 5000|Disc loss 0.660 |Disc acc 0.607 |Adv loss 0.752 |Adv acc 0.395
4100th batch of 5000|Disc loss 0.655 |Disc acc 0.609 |Adv loss 0.700 |Adv acc 0.484
4200th batch of 5000|Disc loss 0.682 |Disc acc 0.547 |Adv loss 0.801 |Adv acc 0.293
4300th batch of 5000|Disc loss 0.667 |Disc acc 0.574 |Adv loss 0.761 |Adv acc 0.383
4400th batch of 5000|Disc loss 0.662 |Disc acc 0.588 |Adv loss 0.742 |Adv acc 0.438
4500th batch of 5000|Disc loss 0.660 |Disc acc 0.615 |Adv loss 0.717 |Adv acc 0.488
4600th batch of 5000|Disc loss 0.653 |Disc acc 0.596 |Adv loss 0.685 |Adv acc 0.508
4700th batch of 5000|Disc loss 0.665 |Disc acc 0.600 |Adv loss 0.730 |Adv acc 0.43
4800th batch of 5000|Disc loss 0.667 |Disc acc 0.588 |Adv loss 0.724 |Adv acc 0.441
4900th batch of 5000|Disc loss 0.654 |Disc acc 0.592 |Adv loss 0.740 |Adv acc 0.41
###Markdown
Discriminator losses
###Code
plt.plot(list(range(train_iters)), disc_losses["loss"])
plt.grid()
###Output
_____no_output_____
###Markdown
Adversarial net losses
###Code
plt.plot(list(range(train_iters)), adv_losses["loss"])
plt.grid()
###Output
_____no_output_____
###Markdown
Accuracies
###Code
plt.plot(list(range(train_iters)), disc_losses["acc"])
plt.plot(list(range(train_iters)), adv_losses["acc"])
plt.grid()
###Output
_____no_output_____
###Markdown
An attempt to generate images
###Code
noise = np.random.uniform(-1.0, 1.0, size=[10, gen_noise_dim])
fake_images = gennet.predict(noise)
for i in range(9):
plt.subplot(330 + 1 + i)
plt.imshow(fake_images[i].reshape(28, 28), cmap=plt.get_cmap('gray'))
plt.axis('off')
plt.show()
###Output
_____no_output_____ |
notebooks/4_01--tuning_kernels_on_OJ_287_long_cadence.ipynb | ###Markdown
4. Tuning the kernel "by eye" I. Hand-tuning GP kernels on OJ 287 long cadenceM. Gully-Santiago July 2019Kepler/K2 GO Office
###Code
import numpy as np
from astropy.io import fits
import copy
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.timeseries import LombScargle
import scope
import lightkurve as lk
import everest
###Output
_____no_output_____
###Markdown
Let's turn off `DEBUG` logging messages.
###Code
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
bok_log = logging.getLogger('bokeh')
bok_log.setLevel(logging.WARNING)
logging.basicConfig()
logging.getLogger().setLevel(logging.WARNING)
###Output
_____no_output_____
###Markdown
What does the background look like in a PSD? First let's get nearby star background pixels.
###Code
search_result = lk.search_targetpixelfile('OJ 287', mission='K2', cadence='long', campaign=5, radius=21*u.arcminute)
search_result
tpfs = search_result.download_all()
###Output
_____no_output_____
###Markdown
Data pre-processing Make sure all tpfs share the same cadence quality mask.
###Code
all_cadence_set = set(tpfs[0].cadenceno)
for tpf in tpfs:
all_cadence_set.intersection_update(set(tpf.cadenceno))
cad_array = np.array(list(all_cadence_set))
tpf = tpfs[0]
tpf = tpf[np.in1d(tpf.cadenceno, cad_array)]
len(tpf.time)
lc = tpf.to_lightcurve()
###Output
_____no_output_____
###Markdown
What do do about non-uniform time sampling? Nothing for now! You could fill the gaps if you want.
###Code
t_gaps = ((lc.time[1:] - lc.time[0:-1])*u.day).to(u.hour)
np.max(t_gaps)
###Output
_____no_output_____
###Markdown
The largest gap is four consecutive 30-minute cadences.
###Code
t = (lc.time*u.day).to(u.second).value
NN = len(lc.time)
###Output
_____no_output_____
###Markdown
Compute the power spectrum for each empty pixel...and explore the range of values.
###Code
fake_omega = np.logspace(-6, -4, base=10)
fake_pow = fake_omega**(-2) *1e5
f = np.fft.rfftfreq(len(t), t[1] - t[0])
def bkg_flux_model(t, y, y_unc, poly_order=2):
'''Makes a polynomial model of the already-subtracted background'''
A = np.vander(t, 1+poly_order)
ATA = np.dot(A.T, A / y_unc[:, None]**2)
#sigma_w = np.linalg.inv(ATA)
mean_w = np.linalg.solve(ATA, np.dot(A.T, y/y_unc**2))
return np.dot(A, mean_w)
# Compute the LS based power spectrum estimates
power_ls = []
power_white_noise = []
power_bkg = []
for tpf in tpfs:
tpf = tpf[np.in1d(tpf.cadenceno, cad_array)]
background_mask = ~tpf.create_threshold_mask(threshold=0.001, reference_pixel=None)
ncad, nx, ny = tpf.flux.shape
y_vector = tpf.flux.reshape(ncad, nx*ny)[:, background_mask.reshape(nx*ny)]
noise_vector = tpf.flux_err.reshape(ncad, nx*ny)[:, background_mask.reshape(nx*ny)]
bkg_vector = tpf.flux_bkg.reshape(ncad, nx*ny)[:, background_mask.reshape(nx*ny)]
i=0
for pixel_lc in y_vector.T:
model = LombScargle(t, pixel_lc)
power_ls.append(model.power(f[1:-1], method="fast", normalization="psd"))
for noise_amp in noise_vector.T:
y0 = np.random.normal(loc=np.zeros(len(noise_amp)), scale=noise_amp)
model = LombScargle(t, y0)
power_white_noise.append(model.power(f[1:-1], method="fast", normalization="psd"))
for pixel_bkg in bkg_vector.T:
y_hat = bkg_flux_model(t, pixel_bkg, pixel_bkg/100.0, poly_order=3)
model = LombScargle(t, pixel_bkg-y_hat)
power_bkg.append(model.power(f[1:-1], method="fast", normalization="psd"))
power_ls = np.array(power_ls)
power_white_noise = np.array(power_white_noise)
power_bkg = np.array(power_bkg)
# >>> To get the LS based PSD in the correct units, normalize by N <<<
power_ls /= NN
power_white_noise /= NN
power_bkg /= NN
###Output
_____no_output_____
###Markdown
The AGN target of interest.
###Code
tpf = tpfs[0]
tpf = tpf[np.in1d(tpf.cadenceno, cad_array)]
aperture_mask = tpf.create_threshold_mask()
tpf.plot(aperture_mask=aperture_mask);
lc = tpf.to_lightcurve(aperture_mask=aperture_mask)
n_pix = aperture_mask.sum()
n_pix
model = LombScargle(t, lc.flux/n_pix)
power_AGN = model.power(f[1:-1], method="fast", normalization="psd") / NN
bkg_flux = np.sum(tpf.flux_bkg[:, aperture_mask ], axis=1)
bkg_flux_err = np.sqrt(np.sum(tpf.flux_bkg_err[:, aperture_mask ]**2, axis=1))
ax = (lc/n_pix).plot(normalize=False)
plt.ylim(0)
t_motion = (6.0*u.hour).to(u.second)
plt.figure(figsize=(9,6))
plt.axvline(1.0/t_motion.value, color='#aaaaaa', linestyle='dashed', label='6 hours', alpha=0.5)
percentile = 50
#plt.step(f[1:-1], np.nanpercentile(power_bkg,percentile, axis=0), "-", label="Background Estimate", color='#2ecc71')
plt.step(f[1:-1], np.nanpercentile(power_ls,percentile, axis=0), label="Ostensibly empty pixels", color='#3498db', where='mid')
plt.fill_between(f[1:-1],
np.nanpercentile(power_ls,5, axis=0),
np.nanpercentile(power_ls,95, axis=0),
label=None, color='#3498db', alpha=0.3, step='mid')
plt.plot(f[1:-1], np.nanpercentile(power_white_noise,percentile, axis=0), "-", label="White Noise", color='#e74c3c')
plt.fill_between(f[1:-1],
np.nanpercentile(power_white_noise,5, axis=0),
np.nanpercentile(power_white_noise,95, axis=0),
label=None, color='#e74c3c', alpha=0.1, step='mid')
plt.step(f[1:-1], power_AGN, label="OJ 287 Long Cadence", color='#8e44ad', lw=1)
plt.yscale("log")
plt.xscale("log")
plt.xlim(f[1:].min(), f.max())
plt.ylabel("power [$\propto \mathrm{ppm}^2/\mathrm{Hz}$]")
plt.xlabel("frequency [Hz]")
plt.plot(fake_omega, fake_pow/np.min(fake_pow)*10, 'k-', label='$S \propto f^{-2}$')
plt.legend(loc='best')
plt.savefig('OJ287_C05_PSD_v_empty_pixels.png', dpi=300, bbox_inches='tight');
###Output
_____no_output_____
###Markdown
Model the PSD with a Damped Random Walk
###Code
t_char = (0.3*u.year).to(u.second)
c_val = 2.0*np.pi * 1/(t_char)
c_val.to(u.Hz) # units of Hertz
###Output
_____no_output_____
###Markdown
What about the value of $a$? It should have units of $(\mathrm{dimensionless\; power}\cdot \mathrm{Hz})$, so it should be comparable to $c$ to achieve near-unity power.
###Code
a_val = c_val*5e6
a_val.to(u.Hz)
###Output
_____no_output_____
###Markdown
We can use `celerite` to simulate the PSD.
###Code
from astropy.stats import LombScargle
import celerite
from celerite import terms
###Output
_____no_output_____
###Markdown
Note that celerité follows the numpy convention of **natural logs** $\ln{}$ for all $\log{}$ functions, unless explicitly providing base 10.>Args: - log_a (float): The log of the amplitude of the term. - log_c (float): The log of the exponent of the term.
###Code
sigma = np.median(lc.flux_err)
true_logc, true_loga = np.log(c_val.value) , np.log(a_val.value)
kernel = terms.RealTerm(log_a=true_loga, log_c=true_logc) + terms.JitterTerm(log_sigma=np.log(sigma))
gp = celerite.GP(kernel, fit_mean=True, mean=0)
gp.compute(t)
y_many = gp.sample(size=1000)
power_draw = []
for y0 in y_many:
model = LombScargle(t, y0)
power_draw.append(model.power(f[1:-1], method="fast", normalization="psd"))
power_draw = np.array(power_draw)
# >>> To get the LS based PSD in the correct units, normalize by N <<<
power_draw /= NN
power_true = kernel.get_psd(2*np.pi*f) / (0.5 * (t[-1] - t[0]))
###Output
_____no_output_____
###Markdown
Let's plot the model in the time domain.
###Code
plt.figure(figsize=(18,5))
plt.step(lc.time, y_many[9,:],label='simulation', color='#f39c12')
plt.axhline(0, linestyle='dashed', color='k')
plt.title('Simulated noise draw of per-pixel artifact signal with $S \propto f^{-2}$ and typical read noise')
plt.ylabel('e/s')
plt.xlabel('Time (days)');
###Output
_____no_output_____
###Markdown
Indeed, the draw from the random walk process has more high-frequency structure than the genuine source. The power law slope of OJ 287 is more negative than $-2$.
###Code
plt.figure(figsize=(15,9))
plt.axvline(1.0/t_motion.value, color='#aaaaaa', linestyle='dashed', label='6 hours', alpha=0.5)
percentile = 50
#plt.step(f[1:-1], np.nanpercentile(power_bkg,percentile, axis=0), "-", label="Background Estimate", color='#2ecc71')
plt.step(f[1:-1], np.nanpercentile(power_ls,percentile, axis=0), label="Ostensibly empty pixels", color='#3498db', where='mid')
plt.fill_between(f[1:-1],
np.nanpercentile(power_ls,5, axis=0),
np.nanpercentile(power_ls,95, axis=0),
label=None, color='#3498db', alpha=0.3, step='mid')
plt.plot(f[1:-1], np.nanpercentile(power_white_noise,percentile, axis=0), "-", label="White Noise", color='#e74c3c')
plt.fill_between(f[1:-1],
np.nanpercentile(power_white_noise,5, axis=0),
np.nanpercentile(power_white_noise,95, axis=0),
label=None, color='#e74c3c', alpha=0.1, step='mid')
plt.step(f[1:-1], power_AGN, label="OJ 287 Long Cadence", color='#8e44ad', lw=1)
plt.step(f[1:-1], np.nanmedian(power_draw, axis=0), label="Noise model", color='#f39c12', lw=1)
plt.step(f, power_true, '--', color='#f39c12', lw=1,label="Analytic model", )
plt.yscale("log")
plt.xscale("log")
plt.xlim(f[1:].min(), f.max())
plt.ylabel("power [$\propto \mathrm{ppm}^2/\mathrm{Hz}$]")
plt.xlabel("frequency [Hz]")
plt.plot(fake_omega, fake_pow/np.min(fake_pow)*10, 'k-', label='$S \propto f^{-2}$')
plt.legend(loc='best')
plt.savefig('OJ287_C05_PSD_v_empty_model.png', dpi=300, bbox_inches='tight');
###Output
_____no_output_____ |
training/homogeneity-artists.ipynb | ###Markdown
Load data.
###Code
training_data_folder = '/Users/pasquale/git/recommender/training_data'
emb_folder = '/Users/pasquale/git/music-embeddings'
doremus_data.init(training_data_folder, emb_folder)
vectors, uris, lbs, heads, heads_print = doremus_data.get_embeddings('artist')
pd.DataFrame(heads_print)
all_training = doremus_data.all_training('artist')
_l = 3
for t in all_training:
temp_playlists = []
for pl in t['playlists']:
missing = [art not in uris for art in pl['data']]
pl['data'] = np.delete(pl['data'], np.where(missing))
if len(pl['data']) < 6 :
continue
for i in np.arange(len(pl['data']) - _l):
temp_playlists.append(pl['data'][i:i+_l])
t['groups'] = [{'name':str(index), 'data': pl} for index, pl in enumerate(temp_playlists)]
print('%s\t\t%d' % (t['name'], len(temp_playlists)))
def training_stats(t):
num_playlists = len(t['playlists'])
num_track = [len(p['data']) for p in t['playlists']]
distinct = len(np.unique(np.concatenate([p['data'] for p in t['playlists']])))
return num_playlists, np.sum(num_track), np.mean(num_track), distinct
names = [t['name'] for t in all_training]
# for t in all_training:
# num_playlists, num_track = training_stats(t)
# print('%s\t\t%d' % (t['name'], num_playlists))
pd.DataFrame([training_stats(t) for t in all_training], index=names, columns='playlists,tracks,tracks per pl, distinct tracks'.split(','))
###Output
_____no_output_____
###Markdown
Data pre-processing
###Code
negVector = -2. * np.ones_like(vectors[0], dtype=np.float32)
def get_embs(x, masked=False):
# uri to embedding
v = vectors[np.argwhere(uris == x)]
if v.size == 0:
print(x)
result = np.array(negVector)
else:
result = np.array(v[0][0])
if masked:
result = np.ma.array(result, mask=result < -1.)
return result
def get_label(x):
l = lbs[np.argwhere(uris == x)]
return l[0][0] if l.size > 0 else 'none'
np.set_printoptions(2)
def compute_playlist_stats(playlist, to_print=False):
pl = playlist['data']
embeddings = np.array([get_embs(xi) for xi in pl])
emb_len = len(embeddings[0])
ma_embeddings = np.ma.array(embeddings, mask=embeddings < -1.)
# I do not want to mean dimensions with single values
mul_values = np.where(np.sum(embeddings >= -1., axis=0) > 1, False, True)
mul_values = np.repeat([mul_values], len(pl), axis=0)
ma_embeddings = np.ma.array(ma_embeddings, mask=mul_values)
_mean = ma_embeddings.mean(axis=0)
_median = np.ma.median(ma_embeddings, axis=0)
_min = np.ma.min(ma_embeddings, axis=0)
_max = np.ma.max(ma_embeddings, axis=0)
_std = np.ma.std(ma_embeddings, axis=0)
if to_print:
plt.errorbar(np.arange(len(_mean)), _mean, _std, fmt='ok',
ecolor='black', elinewidth=1.5, lw=3, capsize=2)
plt.errorbar(np.arange(len(_mean)), _mean, [_mean - _min, _max - _mean],
fmt='.k', ecolor='gray', elinewidth=0.5, lw=1, capsize=1)
plt.errorbar(np.arange(len(_mean)), _median, fmt='_g', lw=1)
plt.xticks(range(len(heads)), heads, rotation=40)
plt.show()
return _mean, _median, _std
###Output
_____no_output_____
###Markdown
Sample playlist
###Code
base = all_training[3]
print('Base: ' + base['name'])
pl = base['groups'][1023]
print('Playlist: ' + pl['name'])
m, md, s = compute_playlist_stats(pl, True)
for d in pl['data']:
print(d.replace('data', 'overture'))
print(get_label(d))
print(np.ma.array(get_embs(d), mask=get_embs(d) < -1.))
###Output
Base: spotify_pl
Playlist: 1023
###Markdown
Standard Deviation among all playlists
###Code
population_tot = np.sum([len(pl['data']) for tr in all_training for pl in tr['groups'] ])
std = {}
population = {}
mean = {}
stdw = {} # std within
stdb = {} # std between
for index, tr in enumerate(all_training):
name = tr['name']
std[name] = []
population[name] = []
mean[name] = []
for index, pl in enumerate(tr['groups']):
_mean, _median, _std = compute_playlist_stats(pl, False)
pop = len(pl['data'])
population[name].append(pop)
mean[name].append(_mean)
ww = (pop - 1) / (population_tot - 1)
std[name].append((_std**2)*ww)
stdw[name] = np.ma.sum(std[name], axis=0).filled(0)
weighted_means = [np.ma.mean(mean[name], axis=0)*np.sum(population[name]) for name in mean]
mtot = np.ma.sum(weighted_means, axis=0)
mtot /= population_tot
fig, ax = plt.subplots(figsize=(20,5))
width = 0.2
pos = np.arange(len(vectors[0]))
colors = ['#3668C9', '#DA3B21', '#FD9827', '#1D9424']
for index, tr in enumerate(all_training):
name = tr['name']
ww = np.array([pop / (population_tot - 1) for pop in population[name]])
mg = np.ma.power(np.ma.array(mean[name]).filled(mtot) - mtot, 2)
stdb[name] = np.ma.sum(mg * ww.reshape(len(ww),1), axis=0)
plt.bar([p + index * width for p in pos],
stdb[name],
width,
alpha=0.5,
color=colors[index],
label=name + ' (sdt between)')
plt.bar([p + index * width for p in pos],
stdw[name] ,
width,
edgecolor='#000000',
alpha=.5,
color=colors[index],
label=name + ' (sdt within)')
flat_std = [stdw[name] for name in stdw]
mstd = np.ma.mean(flat_std, axis=0)
sstd = np.ma.std(flat_std, axis=0)
smin = np.ma.min(flat_std, axis=0)
smax = np.ma.max(flat_std, axis=0)
ax.plot(pos, mstd, '--', label='mean')
plt.xticks(range(len(heads)), heads, rotation=40)
ax.set_ylabel('Standard Deviation')
ax.set_xticks([p + 1.5 * width for p in pos])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc='upper left')
plt.show()
print('Standard Deviation: mean' )
print(np.array(mstd))
print('Standard Deviation: minimum' )
print(np.array(smin))
flat_std_b = [stdb[name] for name in stdb]
mstd_b = np.ma.mean(flat_std_b, axis=0)
print('Standard Deviation between: mean' )
print(np.array(mstd_b))
###Output
_____no_output_____
###Markdown
ExplainationWhen the standard deviation _within_ (black border) is smaller then the standard deviation _between_ (no border), this means that for that dimension the values are more homogeneous _inside_ the group than _outside_.If this difference in homogeneity inside/outside is important, I can state that this dimension drives the playlist generation. Graphs property by property
###Code
def display_graph(feat):
pos = np.where(np.array(heads) == feat)[0]
pos_slide = np.arange(len(pos))
fig, ax = plt.subplots(figsize=(20,5))
for index, tr in enumerate(all_training):
name = tr['name']
plt.bar([p + index * width for p in pos_slide],
stdb[name][pos],
width,
alpha=0.5,
color=colors[index],
label=name + ' (sdt between)')
plt.bar([p + index * width for p in pos_slide],
stdw[name][pos] ,
width,
alpha=0.5,
edgecolor='#000000',
color=colors[index],
label=name + ' (sdt within)')
ax.plot(pos_slide, mstd[pos], '--', label='mean')
plt.xticks(pos_slide, np.array(heads)[pos], rotation=40)
ax.set_ylabel('Standard Deviation')
ax.set_xticks([p + 1.5 * width for p in pos_slide])
# handles, labels = ax.get_legend_handles_labels()
# ax.legend(handles, labels, loc='upper left')
plt.show()
for _f in heads_print[0]:
display_graph(_f)
###Output
_____no_output_____
###Markdown
For concerts
###Code
flat_std = [s for name in ['pp_concerts','itema3_concerts'] for s in std[name]]
mstd = np.ma.mean(flat_std, axis=0)
sstd = np.ma.std(flat_std, axis=0)
smin = np.ma.min(flat_std, axis=0)
smax = np.ma.max(flat_std, axis=0)
print('Standard Deviation: mean' )
print(np.array(mstd))
print('Standard Deviation: minimum' )
print(np.array(smin))
print('Standard Deviation: maximum' )
print(np.array(smax))
###Output
Standard Deviation: mean
[7.56e-08 1.69e-08 4.53e-08 2.50e-07 2.46e-07 7.11e-08 1.27e-08 1.73e-08
8.84e-09 5.24e-09 2.42e-08 5.97e-09 4.89e-08 1.39e-08 2.34e-07 3.16e-07
2.67e-07]
Standard Deviation: minimum
[0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 1.49e-18 1.58e-17 2.49e-16
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 1.01e-12 9.83e-14
1.87e-14]
Standard Deviation: maximum
[2.46e-07 8.29e-08 2.86e-07 1.17e-05 1.17e-05 6.58e-07 2.30e-07 2.16e-07
2.30e-07 7.32e-07 7.96e-07 7.02e-08 3.29e-07 2.40e-07 3.29e-06 4.17e-06
2.74e-06]
###Markdown
For playlists
###Code
flat_std = [s for name in ['web-radio','spotify_pl'] for s in std[name]]
mstd = np.ma.mean(flat_std, axis=0)
sstd = np.ma.std(flat_std, axis=0)
smin = np.ma.min(flat_std, axis=0)
smax = np.ma.max(flat_std, axis=0)
print('Standard Deviation: mean' )
print(np.array(mstd))
print('Standard Deviation: minimum' )
print(np.array(smin))
print('Standard Deviation: maximum' )
print(np.array(smax))
###Output
Standard Deviation: mean
[7.19e-08 1.37e-08 4.08e-08 3.20e-07 3.62e-07 4.15e-08 1.56e-08 2.81e-08
1.77e-09 2.54e-09 3.61e-09 4.16e-09 2.60e-08 1.59e-08 1.91e-07 2.52e-07
2.56e-07]
Standard Deviation: minimum
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
Standard Deviation: maximum
[5.17e-07 1.32e-07 1.69e-07 9.69e-06 9.73e-06 9.46e-07 2.22e-07 3.84e-07
5.79e-07 1.65e-06 7.90e-07 1.02e-07 3.63e-07 2.29e-07 4.10e-06 5.30e-06
5.08e-06]
###Markdown
Computing gaps
###Code
def get_std_gap(_chosen):
# return (stdb[_chosen] - stdw[_chosen]) / ((stdw[_chosen] + stdb[_chosen]) / 2 )
return stdb[_chosen] / stdw[_chosen]
def display_distances(_chosen):
better =(stdw[_chosen]<=stdb[_chosen]).tolist()
distance = get_std_gap(_chosen)
pd.set_option('precision', 3)
return pd.DataFrame([better, distance.tolist(), stdw[_chosen].tolist(), stdb[_chosen].tolist()],
index=['homongeneous', '% rate', 'std within', 'std between'], columns=heads)
display_distances('spotify_pl')
display_distances('web-radio')
display_distances('pp_concerts')
display_distances('itema3_concerts')
###Output
_____no_output_____
###Markdown
2 different tendences between concerts and playlists.In **concerts** all the dimensions (where they exist) are more homogeneous. This is true in particular for the **casting** (not to be used instead for playlists), and this is reasonable.The 1st dimension of **composer** is also one to take in account.In Itema3 this is not visible probably because of bad interlinking of artists.For the keys, the values are not so relevant and not stable in positiveness.Always positive (in **bold** the largely ones):- **composer[0]**- composer[1,2]- **genre[2]**- composition_date- key[2]- casting[2]Positive only in concerts:- **casting[all]**- **genre[all]**- **composition_date** ---- Tuning the recommender system
###Code
def compute_weights(threshold=1.4, fallback = .6, datasets =['spotify_pl']):
dist = [get_std_gap(_chosen) for _chosen in datasets]
dist = np.ma.mean(dist, axis=0).filled()
return np.where(dist > threshold, dist, fallback)
w = compute_weights()
pd.DataFrame([w], columns=heads)
def get_pool_from_datasets(datasets=['web-radio','spotify_pl']):
all_song = []
for t in all_training:
if t['name'] in datasets:
for pl in t['playlists']:
missing = [art not in uris for art in pl['data']]
pl['data'] = np.delete(pl['data'], np.where(missing))
all_song = np.concatenate([all_song, pl['data']])
all_song = np.unique(all_song)
print('Pool size: %d' % len(all_song))
all_song_vec = np.ma.array([get_embs(xi, masked=True) for xi in all_song])
# all_song_vec = np.ma.array(all_song_vec, mask=all_song_vec < -1.)
all_song_labels = np.array([get_label(xi) for xi in all_song])
return all_song, all_song_vec, all_song_labels
def computeSimilarity(seed, target, w):
b1 = np.where(seed.mask==True)[0]
b2 = np.where(target.mask==True)[0]
bad_pos = np.unique(np.concatenate([b1, b2]))
_seed = np.delete(seed, bad_pos, axis=0)
_target = np.delete(target, bad_pos, axis=0)
_w = np.delete(w, bad_pos, axis=0)
if len(_seed) == 0:
return 0
# distance
d = weightedL2(_seed, _target, _w)
# how much info I am not finding
penalty = len([x for x in b2 if x not in b1]) / len(seed)
# score
s = (max_distance - d) / max_distance
return s * (1 - penalty)
def weightedL2(a, b, w=1):
# return distance.cosine(a,b)
# https://stackoverflow.com/a/8861999/1218213
q = a - b
return np.sqrt((w * q * q).sum())
# return (w * q * q).sum()
_ones = np.ones(vectors[0].shape)
max_distance = weightedL2(_ones,-_ones, _ones)
def find(seed, n=4, w=None, _print=True, pool=get_pool_from_datasets()):
global max_distance
_uris = pool[0]
_vectors = pool[1]
_lbs = pool[2]
f_length = len(seed)
_seed = seed
if w is None:
w = np.ones(len(_seed))
w = w / w.sum()
else:
w = np.array(w)
# temp = [np.ones(f_length[k]) * w[k] for k in range(len(w))]
# w = np.array([item for sublist in temp for item in sublist])
max_distance = weightedL2(np.ones(len(_seed)), np.ones(len(_seed)) * -1, w)
if _print==True: print('computing scores')
scores = np.array([[computeSimilarity(_seed, x.astype(float), w) for x in _vectors]])
full = np.concatenate([_uris.reshape(len(_uris), 1), scores.transpose(), _lbs.reshape(len(_uris), 1)], axis=1)
# remove the seed from the list
# full = np.delete(full, pos, 0)
# sort
full_sorted = sorted(full, key=lambda _x: float(_x[1]), reverse=True)
most_similar = full_sorted[:n]
if _print==True: print('\n'.join('%s %s\n%s' % (f[0], f[1], f[2]) for f in most_similar))
return [{'uri': _a[0], 'score': float(_a[1])} for _a in most_similar]
find(get_embs('http://data.doremus.org/artist/b34f92ab-ad86-361b-a8b8-5c3a4db784d0', masked=True))
find(get_embs('http://data.doremus.org/artist/b82c0771-5280-39af-ad2e-8ace2f4ebda3', masked=True))
find(get_embs('http://data.doremus.org/artist/03954109-0253-35d6-a70e-89ab27dea09c', masked=True))
find(get_embs('http://data.doremus.org/artist/bcf39e82-e208-3049-b550-1feaae6071a6', masked=True))
def recommend_compare(playlist, w, pos=-1, num_candidates=[100, 200, 500],
verbose=True, pool=get_pool_from_datasets(), overture=True):
pl_data = playlist['data']
pl_population = len(pl_data)
if verbose: print('%d items | %s' % (pl_population, playlist['name'].split('/')[-1]))
_replcm = 'overture.' if overture else 'data.'
if pos < 0:
pos = random.randrange(pl_population)
chosen = pl_data[pos]
targets = pl_data
# [max(pos-7,0):min(pos+7, len(pl_data))]
targets_pop = len(targets)
# print(max(pos-5,0))
# print(min(pos+5, len(pl_data)))
# print(targets_pop)
if verbose:
print('seed: %d) %s' % (pos, get_label(chosen)))
print('\t '+ chosen.replace('data.', _replcm))
first = get_embs(chosen, masked=True)
candidates = find(first, n=np.max(num_candidates), _print=False, pool=pool )
candidates_2 = find(first, n=np.max(num_candidates), _print=False, w = w, pool=pool )
results = np.zeros((len(num_candidates), 3))
for qi, q in enumerate(num_candidates):
trues_flat = 0
for index, c in enumerate(candidates[:q]):
if c['uri'] == chosen: continue
if c['uri'] in targets: trues_flat+=1
# if verbose:
# _sig = ' X ' if c['uri'] in targets else ' '
# print('%d \t %.5f'% (index, c['score']) + '\t' + _sig + '\t' + get_label(c['uri']))
# print('\t\t\t\t'+ c['uri'].replace('data.', 'overture.'))
# display(pd.DataFrame(np.ma.array( candidates[c]).reshape(1, 13)))
trues = 0
for index, c in enumerate(candidates_2[:q]):
if c['uri'] == chosen: continue
if c['uri'] in targets: trues+=1
# if verbose:
# _sig = ' X ' if c['uri'] in pl_data else ' '
# print('%d \t %.5f'% (index, c['score']) + '\t' + _sig + '\t' + get_label(c['uri']))
# print('\t\t\t\t'+ c['uri'].replace('data.', 'overture.'))
# # display(pd.DataFrame(np.ma.array( candidates[c]).reshape(1, 13)))
if verbose: print('%d | flat %d | weighted %d | diff %d' % (q, trues_flat, trues, trues-trues_flat))
results[qi] = [trues / targets_pop, trues_flat / targets_pop, (trues-trues_flat) / targets_pop]
return results
# w = compute_weights(threshold=1.2, fallback=1., datasets=['spotify_pl'])
pl = all_training[3]['playlists'][12]
recommend_compare(pl, w)
###Output
92 items | 37i9dQZF1DWWEJlAGA9gs0.Classical Essentials.artist.txt
seed: 88) Giacomo Puccini
http://overture.doremus.org/artist/5bcd515f-354f-30f8-ae0a-c42b65536d9f
100 | flat 17 | weighted 21 | diff 4
200 | flat 37 | weighted 38 | diff 1
500 | flat 55 | weighted 54 | diff -1
###Markdown
Computation on all playlists
###Code
out_path = './out-artists'
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
return file_path
from tqdm import tqdm_notebook as tqdm
def test_recommendation(pool, playlist, pos=-1, mode='random', w=None, name='', verbose=0, overture=False, write=False):
pl_data = playlist['data']
pl_population = len(pl_data)
pl_name = playlist['name'].split('/')[-1].replace('.json', '')
_replcm = 'overture.' if overture else 'data.'
if pos < 0:
pos = random.randrange(pl_population)
chosen = pl_data[pos]
pl_data = np.delete(pl_data, pos, axis=0)
if verbose > 0:
print('%d items | %s' % (pl_population, pl_name))
print('seed: %d) %s' % (pos, get_label(chosen)))
print('\t '+ chosen.replace('data.', _replcm))
first = get_embs(chosen, masked=True)
num_candidates=[100, 200, 500]
max_candidates = np.max(num_candidates)
if mode == 'flat':
candidates = find(first, n=max_candidates, _print=False, pool=pool)
elif mode == 'weighted':
candidates = find(first, n=max_candidates, _print=False, w = w, pool=pool)
else : # random
candidates = list(map(lambda x: {'uri': x, 'score': 0}, random.sample(pool[0].tolist(), 500)))
results = np.zeros(len(num_candidates))
candidates_uri = list(map(lambda x: x['uri'], candidates))
for qi, q in enumerate(num_candidates):
trues = len(set(candidates_uri[:q]).intersection(set(pl_data)))
if verbose > 0:
print('%d | positive %d | population %d' % (q, trues, pl_population))
results[qi] = trues
if verbose > 1:
for index, c in enumerate(candidates[:max_candidates]):
_sig = ' X ' if c['uri'] in pl_data else ' '
print('%d \t %.5f'% (index, c['score']) + '\t' + _sig + '\t' + get_label(c['uri']))
print('\t\t\t\t'+ c['uri'].replace('data.', _replcm))
# display(pd.DataFrame(np.ma.array( candidates[c]).reshape(1, 13)))
if write:
to_write =[';'.join([
str(index),
str(c['score']),
'1' if c['uri'] in pl_data else '0',
get_label(c['uri']),
c['uri'].replace('[;\n"]', ' ')
])
for index, c in enumerate(candidates[:max_candidates])]
filename = pl_name + '.' + str(pos) + '.csv'
with open(ensure_dir(os.path.join(out_path, 'detail', name, filename)), 'w') as file:
file.write('index;score;predicted;label;uri\n')
file.write('\n'.join(to_write))
return results
def run_for_dataset(id_dataset, pool, mode='random', w=None, name=''):
with open(ensure_dir(os.path.join(out_path, 'summary', name + '.csv')), 'w') as file:
file.write('index;playlist;population;predicted100;predicted200;predicted500\n')
testset = all_training[id_dataset]['playlists']
pbar = tqdm(total=len(testset))
for index, pl in enumerate(testset):
population = len(pl['data'])
pl_name = pl['name'].split('/')[-1].replace('.json', '').replace('"','')
results = [test_recommendation(pool=pool, playlist=pl, pos=pos,
mode=mode, w=w, write=False, name=name)
for pos, work in enumerate(pl['data'])]
results = np.mean(results,axis=0)
pbar.update(1)
if not 'ndarray' in str(type(results)):
# print(type(results))
continue
file.write(';'.join([str(index), pl_name, str(population),
str(results[0]), str(results[1]), str(results[2])]))
file.write('\n')
pbar.close()
# test_recommendation(pp_pool, playlist=all_training[0]['playlists'][4], mode='weighted', name='pp.w5-06', w=_wpp, verbose=2 )
# itema3_pool = get_pool_from_datasets(['itema3_concerts'])
run_for_dataset(1, itema3_pool, mode='random', name='itema3.rand')
run_for_dataset(1, itema3_pool, mode='flat', name='itema3.flat')
_wi3 = compute_weights(threshold=1.4, datasets=['itema3_concerts'])
run_for_dataset(1, itema3_pool, mode='weighted', name='itema3.w14-06', w= _wi3)
_wi3 = compute_weights(threshold=8, datasets=['itema3_concerts'])
run_for_dataset(1, itema3_pool, mode='weighted', name='itema3.w8-06', w= _wi3)
_wi3 = compute_weights(threshold=10, datasets=['itema3_concerts'])
run_for_dataset(1, itema3_pool, mode='weighted', name='itema3.w10-06', w= _wi3)
_wi3 = compute_weights(threshold=8, fallback=1., datasets=['itema3_concerts'])
run_for_dataset(1, itema3_pool, mode='weighted', name='itema3.w8-1', w= _wi3)
_wi3 = compute_weights(threshold=5, datasets=['itema3_concerts', 'pp_concerts'])
run_for_dataset(1, itema3_pool, mode='weighted', name='itema3.wp5-06', w= _wi3)
pp_pool = get_pool_from_datasets(['pp_concerts'])
run_for_dataset(0, pp_pool, mode='random', name='pp.rand')
run_for_dataset(0, pp_pool, mode='flat', name='pp.flat')
_wpp = compute_weights(threshold=2, datasets=['pp_concerts'])
pd.DataFrame([_wpp], columns=heads)
run_for_dataset(0, pp_pool, mode='weighted', name='pp.w2-06', w=_wpp)
spo_pool = get_pool_from_datasets(['spotify_pl'])
run_for_dataset(3, spo_pool, mode='random', name='spotify.rand')
run_for_dataset(3, spo_pool, mode='flat', name='spotify.flat')
_wspo = compute_weights(threshold=1.2, datasets=['spotify_pl'])
run_for_dataset(3, spo_pool, mode='weighted', name='spotify.w12-06', w=_wspo)
_wspo = compute_weights(threshold=1.4, datasets=['spotify_pl'])
run_for_dataset(3, spo_pool, mode='weighted', name='spotify.w14-06', w=_wspo)
_wspo = compute_weights(threshold=1.5, datasets=['spotify_pl'])
run_for_dataset(3, spo_pool, mode='weighted', name='spotify.w15-06', w=_wspo)
radio_pool = get_pool_from_datasets(['web-radio'])
run_for_dataset(2, radio_pool, mode='random', name='web-radio.rand')
run_for_dataset(2, radio_pool, mode='flat', name='web-radio.flat')
_wradio = compute_weights(threshold=1.4, datasets=['web-radio'])
run_for_dataset(2, radio_pool, mode='weighted', name='web-radio.w14-06', w=_wradio)
_wradio = compute_weights(threshold=1.33, datasets=['web-radio'])
pd.DataFrame([_wradio], columns=heads)
run_for_dataset(2, radio_pool, mode='weighted', name='web-radio.w133-06', w=_wradio)
_wradio = compute_weights(threshold=1.45, datasets=['web-radio'])
run_for_dataset(2, radio_pool, mode='weighted', name='web-radio.w145-06', w=_wradio)
_wradio = compute_weights(threshold=1.3, fallback=.8, datasets=['web-radio'])
run_for_dataset(2, radio_pool, mode='weighted', name='web-radio.w13-07', w=_wradio)
_wradio = compute_weights(threshold=1.2, fallback=.8, datasets=['web-radio'])
run_for_dataset(2, radio_pool, mode='weighted', name='web-radio.w12-07', w=_wradio)
_wradio = compute_weights(threshold=1.1, fallback=.9, datasets=['web-radio'])
run_for_dataset(2, radio_pool, mode='weighted', name='web-radio.w11-09', w=_wradio)
summary_path = os.path.join(out_path, 'summary')
columns = ['name', 'r100', 'r200', 'r500']
summary = pd.DataFrame(columns=columns)
for index, filename in enumerate(sorted(os.listdir(summary_path))):
table = pd.read_csv(os.path.join(summary_path,filename), sep=';')
table['r100'] = table.apply(lambda row: row['predicted100']/row['population'], axis=1)
table['r200'] = table.apply(lambda row: row['predicted200']/row['population'], axis=1)
table['r500'] = table.apply(lambda row: row['predicted500']/row['population'], axis=1)
r100 = table['r100'].mean()
r200 = table['r200'].mean()
r500 = table['r500'].mean()
summary.loc[index] = [filename, r100, r200, r500]
summary
###Output
_____no_output_____ |
totalExposureLog_insight.ipynb | ###Markdown
数据预览
###Code
f, ax = plt.subplots(figsize=(15, 5))
ax = sns.violinplot(data=bid_counts,x='motify_date',y='value')
f.autofmt_xdate()
df
df.nunique()
df[['id','request_timestamp','position','uid','aid','imp_ad_size','bid','pctr','quality_ecpm','totalEcpm']]
df.describe()
import matplotlib.pyplot as plt
plt.boxplot(df['totalEcpm'],vert = False)#箱线图
plt.show()
plt.plot(df['id'], df['totalEcpm'], 'o', color='black')#散点图
df['totalEcpm'].describe()#描述性统计
def count_box(Q_3,Q_1):#Q_3为75%分位数(第三四分位数),Q_1为25%分位数(第一四分位数)
IQR=Q_3-Q_1
down_line=Q_1-1.5*IQR
up_line=Q_3+1.5*IQR
print("异常值上限:",up_line," 异常值下限:",down_line)
count_box(1866,342)
###Output
_____no_output_____ |
Machine-Learning/Transformer/TransformerI.ipynb | ###Markdown
Introduction Until 2017, natural language processing (NLP) was dependent on using recurrent neural networks (RNNs) which has many shortcomings: 1. no support for parallelization, 2. vanishing gradient for long sequences. For instance, in translation which is mostly seq2seq architectures, both the encoder and decoder consists of RNNs. In 2014, Bahdanau et al , suggested the use of a mechanism called "attention" to resolve the bottleneck of seq2seq models [1]. However, RNNs were still the main building blocks for such models and others like image captioning by Kelvin Xu et al [2]. The use of attention in [1, 2] gave a better visualization of such models. But, what if we get rid of all RNNs in the first place. Well, that is a big claim, but it worked really well in 2017. The paper was rightly called "Attention is all you need" by Vaswani et al. from Google AI. This is a breakthrough, since RNNs are very slow which makes parallel computations easy to implement. The model was called 'Transfomer model', in this notebook we will explain in details its main components. Transformer EncoderThe transofmer, like other architectures for machine translation consists of encoder and decoder modules. In this notebook, we will only focus on the encoder, and in part II will focus on the decoder.  TokenizationGiven a set of statements we would like to convert these statements to a set of integers. However, this will result in a huge vocabulary which makes training very difficult. Google offers [SentencePiece](https://github.com/google/sentencepiece) which is a model that could be trained to generate tokens for arbitrary languages. From the name, you could deduce that the model can generate pieces of words as well. In this example, we will train a model on a set of expressions from the English language.
###Code
# train the model on the epxressions with a fixed vocabulary size, this will create a model named en.model
spm.SentencePieceTrainer.Train('--input=expressions.txt --model_prefix=en --vocab_size=1000 --model_type=bpe')
# create the process object then load the trained model
sp = spm.SentencePieceProcessor()
sp.load('en.model')
###Output
_____no_output_____
###Markdown
Let us try the model on an arbitrary statement, note how the tokens can be pieces of words
###Code
sp.encode_as_pieces('A calmness settled on his spirit')
###Output
_____no_output_____
###Markdown
Now, let us load our dataset and convert them to padded vectors
###Code
# load the expressions
with open('expressions.txt', 'r') as f:
sentences = f.read().splitlines()
# convert the vectors to ids
sequences = []
max_length = 0
for sentence in sentences:
sequence = sp.encode_as_ids(sentence)
sequences.append(sequence)
max_length = max(max_length, len(sequence))
# pad the vectors according to the longest sequence
x = tf.keras.preprocessing.sequence.pad_sequences(sequences, maxlen = max_length)
###Output
_____no_output_____
###Markdown
ParametersA set of parameters that are taken from the original paper. Don't worry about them right now.
###Code
# dimension of the vectors between consecutive modules layers
dm = 512
# number of heads in attention modules
h = 8
# dimensnion of queries and keys
dk = dm // 8
# dimension of values
dv = dm // 8
# sequence length
n = max_length
# size of vocabulary
vocab_size = 1000
# number of neurons in the feed forward network
dff = 2048
###Output
_____no_output_____
###Markdown
EncodingSince the transformer model has no meaning of sequences of operations (they are mostly parallel) for a set of words, it is important that we keep track of the order input words. To do that, we use positional encoding. Before doing that, we will first pass the vectors through an embedding layer
###Code
e = tf.keras.layers.Embedding(vocab_size, dm)(x)
###Output
_____no_output_____
###Markdown
To encode the position of each word within a sequence of models we use sines and cosines. These encodings are just added to the embedding layer output so no change in the shape of e: [None, dm]. Given pos as the position within the sequence which takes the values $0 \cdots n-1$ and $i$ as the position within the embedding dimension which takes values $0 \cdots d_m-1$. We evaluate the encoded position for even and odd values as $$\text{PE}(\text{pos},2i)= \sin\left(\frac{\text{pos}}{10000^{2i/d_{model}}}\right)$$$$\text{PE}(\text{pos},2i+1)= \cos\left(\frac{\text{pos}}{10000^{2i/d_{model}}}\right)$$We can combine the two formulas as $$\text{PE}(\text{pos},i)= \sin\left(\frac{\text{pos}}{10000^{(i-g(i))/d_{model}}}+ g(i) \times \frac{\pi}{2}\right)$$Where $g(i) = i \,\% \,2$ which takes 0 for even values and 1 for odd values. Note that $\sin(x + \pi/2) = \cos(x)$
###Code
def g(ids):
return ids % 2
def positional_encoding(x):
# 1. create positions within sequence and ids within dm
# out ids: [1, dm] pos: [n, 1]
ids = tf.expand_dims(tf.range(0, dm), 0)
pos = tf.expand_dims(tf.range(0, n), 1)
# 2. create numerator and denominator
# inp ids: [1, dm] pos: [n, 1]
# out den: [1, dm] num: [n, 1]
den = tf.pow(10000.0, tf.cast((ids - g(ids))/ dm, tf.float32))
num = tf.cast(pos, tf.float32)
# 3. division
# inp num: [n, 1] den: [1, dm]
# out encoding: [n, dm]
encoding = tf.sin(tf.divide(num, den) + tf.cast(g(ids), tf.float32)* (np.pi/2))
return x + encoding
z = positional_encoding(e)
###Output
_____no_output_____
###Markdown
Let us visualize the encoding for a specific sequence of words. As we see from the figure there is a unique pattern for the sequence position in the y-axis.
###Code
plt.figure(figsize = (10, 5))
plt.pcolormesh(z[0])
plt.ylim((0, max_length))
plt.colorbar()
plt.show()
###Output
_____no_output_____
###Markdown
Multi-head Self-attention  Let us start by the output of the positional encoding function $z \in \mathbb{R}^{n \times d_m}$. Note that, for simplicity we assume that we just have one batch. From the value $z$ we extract three types of vectors by applying three dense layers. The types are $K \in \mathbb{R}^{n \times d_k} Q \in \mathbb{R}^{n \times d_k}$ and $V \in \mathbb{R}^{n \times d_v}$ called keys, queries and values respectively. In simple terms, the keys are used by the query vector to search for values. In attention, we are trying to find the closest key-value pair to the given query vector. In other words, we are trying to search for words that are closer in meaning or may refer to in the current context. For instance, in the statement `John is smart and handsome` we expect that the query words `smart` and `handsome` to have an high attention value for the key word `John`. To evaluate closensess, we a scaled dot product $$\text{Attention}(Q, K, V) = \text{softmax}\left( \frac{QK^T}{\sqrt{d_k}}\right)V$$This function will result in a weight vector for each query word multiplied by the value vector. For instance, for the query word `smart` we expect something like `[0.3, 0.05, 0.1, 0.05, 0.55] x v`. Hence, smaller values of the attention weight vector will result in smaller values and hence lower gradient. The authors, realized that for larger values the dot product might grow large so they divide by $\sqrt{d_k}$ as a normalizing factor inside the softmax function. In general, this is called additive attention which is usually preferred because is fast and space efficient because we can use highly optimized matrices product to compute it. Note that, in the definition we assume that $Q, K, V$ are matrices hence it can be computed really fast for a large sequence and even batch size. There exists another form of attention, which is called product attention which was used in [1, 2]. In general this approach is called self-attention because it uses the same input vector $z$ to generate the query, key and value vectors. In the paper, the authors suggest using self-attention multiple times i.e multiple head attention. This is done by repeating the same operation multiple times which in general gave more robust results. They use $h = 8$ and then concatenate the output at the end. In the following function we will create a single head attention operation. The input vector has dimensions $z \in \mathbb{R}^{\text{None} \times n \times d_m}$ and the output $H \in \mathbb{R}^{\text{None} \times n \times d_m}$ note how we use $\text{None}$ to represent the batch size which could be of variable size. To make the code clearer, for each operation I show the input and output dimension shapes.
###Code
def single_head_attention(z):
# 1. projection
# inp z: [None, n, dm]
# out Q: [None, n, dk] K: [None, n, dk] V: [None, n, dv]
V = tf.keras.layers.Dense(units = dv)(z)
Q = tf.keras.layers.Dense(units = dk)(z)
K = tf.keras.layers.Dense(units = dk)(z)
# 2. scaled dot product
# inp Q: [None, n, dk] K: [None, n, dk]
# out score : [None, n, n]
score = tf.matmul(Q, K, transpose_b=True)/ tf.sqrt(dk*1.0)
# 3. evaluate the weights
# inp score: [None, n, n]
# out W: [None, n, n]
W = tf.nn.softmax(score, axis = 1)
# 4. evaluate the context vector
# inp W: [None, n, n] V: [None, n, dv]
# out H: [None, n, dv]
H = tf.matmul(W, V)
return H
###Output
_____no_output_____
###Markdown
After applying single attention we repeat the operation multiple times and then concatenate the output
###Code
def multi_head_attention(z):
# according to the paper "We found it beneficial to linearly project the queries, keys
# and values h times with different, learned linear projections
# to dk, dk and dv dimensions, respectively".
Hs = []
# 1. apply h times
# inp z : [None, n, dm]
# out Hs: [[None, n, dv], ..., [None, n, dv]]
for i in range(0, h):
# single head attention
# inp z: [None, n, dm]
# out H: [None, n, dv]
H = single_head_attention(z)
Hs.append(H)
# 2. concatenate
# inp Hs: [[None, n, dv], ..., [None, n, dv]]
# out z : [None, n, dv * 8] => [None, n , dm]
z = tf.concat(Hs, axis = -1)
return z
###Output
_____no_output_____
###Markdown
Finally, the encoder layer will be ready by applying fully connected layers. Note that the authors used residual connections $$\text{LayerNorm}(x + \text{Sublayer}(x))$$
###Code
def EncoderLayer(z):
# 1. self-attention
# inp z : [None, n, dm]
# out dz: [None, n, dm]
dz = multi_head_attention(z)
# 2. normalization
# inp z: [None, n, dm] dz: [None, n, dm]
# out z: [None, n, dm]
z = tf.keras.layers.LayerNormalization()(z + dz)
# 3. feed forward
# inp z : [None, n, dm]
# out dz: [None, n, dm]
dz = tf.keras.layers.Dense(units = dff, activation = 'relu')(z)
dz = tf.keras.layers.Dense(units = dm)(dz)
# 4. normalization
# inp z: [None, n, dm] dz: [None, n, dm]
# out z: [None, n, dm]
z = tf.keras.layers.LayerNormalization()(z + dz)
return z
EncoderLayer(z).shape
###Output
_____no_output_____ |
9_Machine Learning with Python/2-2-2.NoneLinearRegression.ipynb | ###Markdown
Non Linear Regression AnalysisEstimated time needed: **20** minutes ObjectivesAfter completing this lab you will be able to:* Differentiate between linear and non-linear regression* Use non-linear regression model in Python If the data shows a curvy trend, then linear regression will not produce very accurate results when compared to a non-linear regression since linear regression presumes that the data is linear.Let's learn about non linear regressions and apply an example in python. In this notebook, we fit a non-linear model to the datapoints corrensponding to China's GDP from 1960 to 2014. Importing required libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Although linear regression can do a great job at modeling some datasets, it cannot be used for all datasets. First recall how linear regression, models a dataset. It models the linear relationship between a dependent variable y and the independent variables x. It has a simple equation, of degree 1, for example y = $2x$ + 3.
###Code
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = 2*(x) + 3
y_noise = 2 * np.random.normal(size=x.size)
ydata = y + y_noise
#plt.figure(figsize=(8,6))
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
Non-linear regression is a method to model the non-linear relationship between the independent variables $x$ and the dependent variable $y$. Essentially any relationship that is not linear can be termed as non-linear, and is usually represented by the polynomial of $k$ degrees (maximum power of $x$). For example:$$ \ y = a x^3 + b x^2 + c x + d \ $$Non-linear functions can have elements like exponentials, logarithms, fractions, and so on. For example: $$ y = \log(x)$$We can have a function that's even more complicated such as :$$ y = \log(a x^3 + b x^2 + c x + d)$$ Let's take a look at a cubic function's graph.
###Code
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = 1*(x**3) + 1*(x**2) + 1*x + 3
y_noise = 20 * np.random.normal(size=x.size)
ydata = y + y_noise
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, this function has $x^3$ and $x^2$ as independent variables. Also, the graphic of this function is not a straight line over the 2D plane. So this is a non-linear function. Some other types of non-linear functions are: Quadratic $$ Y = X^2 $$
###Code
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = np.power(x,2)
y_noise = 2 * np.random.normal(size=x.size)
ydata = y + y_noise
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
Exponential An exponential function with base c is defined by $$ Y = a + b c^X$$ where b ≠0, c > 0 , c ≠1, and x is any real number. The base, c, is constant and the exponent, x, is a variable.
###Code
X = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
Y= np.exp(X)
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
LogarithmicThe response $y$ is a results of applying the logarithmic map from the input $x$ to the output $y$. It is one of the simplest form of **log()**: i.e. $$ y = \log(x)$$Please consider that instead of $x$, we can use $X$, which can be a polynomial representation of the $x$ values. In general form it would be written as\\begin{equation}y = \log(X)\end{equation}
###Code
X = np.arange(-5.0, 5.0, 0.1)
Y = np.log(X)
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
###Output
<ipython-input-6-04d9a16879f0>:3: RuntimeWarning: invalid value encountered in log
Y = np.log(X)
###Markdown
Sigmoidal/Logistic $$ Y = a + \frac{b}{1+ c^{(X-d)}}$$
###Code
X = np.arange(-5.0, 5.0, 0.1)
Y = 1-4/(1+np.power(3, X-2))
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
Non-Linear Regression example For an example, we're going to try and fit a non-linear model to the datapoints corresponding to China's GDP from 1960 to 2014. We download a dataset with two columns, the first, a year between 1960 and 2014, the second, China's corresponding annual gross domestic income in US dollars for that year.
###Code
import numpy as np
import pandas as pd
#downloading dataset
!wget -nv -O china_gdp.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%202/data/china_gdp.csv
df = pd.read_csv("china_gdp.csv")
df.head(10)
###Output
2021-11-06 20:41:15 URL:https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%202/data/china_gdp.csv [1218/1218] -> "china_gdp.csv" [1]
###Markdown
**Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) Plotting the DatasetThis is what the datapoints look like. It kind of looks like an either logistic or exponential function. The growth starts off slow, then from 2005 on forward, the growth is very significant. And finally, it decelerates slightly in the 2010s.
###Code
plt.figure(figsize=(8,5))
x_data, y_data = (df["Year"].values, df["Value"].values)
plt.plot(x_data, y_data, 'ro')
plt.ylabel('GDP')
plt.xlabel('Year')
plt.show()
###Output
_____no_output_____
###Markdown
Choosing a modelFrom an initial look at the plot, we determine that the logistic function could be a good approximation,since it has the property of starting with a slow growth, increasing growth in the middle, and then decreasing again at the end; as illustrated below:
###Code
X = np.arange(-5.0, 5.0, 0.1)
Y = 1.0 / (1.0 + np.exp(-X))
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
###Output
_____no_output_____
###Markdown
The formula for the logistic function is the following:$$ \hat{Y} = \frac1{1+e^{\beta\_1(X-\beta\_2)}}$$$\beta\_1$: Controls the curve's steepness,$\beta\_2$: Slides the curve on the x-axis. Building The ModelNow, let's build our regression model and initialize its parameters.
###Code
def sigmoid(x, Beta_1, Beta_2):
y = 1 / (1 + np.exp(-Beta_1*(x-Beta_2)))
return y
###Output
_____no_output_____
###Markdown
Lets look at a sample sigmoid line that might fit with the data:
###Code
beta_1 = 0.10
beta_2 = 1990.0
#logistic function
Y_pred = sigmoid(x_data, beta_1 , beta_2)
#plot initial prediction against datapoints
plt.plot(x_data, Y_pred*15000000000000.)
plt.plot(x_data, y_data, 'ro')
###Output
_____no_output_____
###Markdown
Our task here is to find the best parameters for our model. Lets first normalize our x and y:
###Code
# Lets normalize our data
xdata =x_data/max(x_data)
ydata =y_data/max(y_data)
###Output
_____no_output_____
###Markdown
How we find the best parameters for our fit line?we can use **curve_fit** which uses non-linear least squares to fit our sigmoid function, to data. Optimize values for the parameters so that the sum of the squared residuals of sigmoid(xdata, \*popt) - ydata is minimized.popt are our optimized parameters.
###Code
from scipy.optimize import curve_fit
popt, pcov = curve_fit(sigmoid, xdata, ydata)
#print the final parameters
print(" beta_1 = %f, beta_2 = %f" % (popt[0], popt[1]))
###Output
beta_1 = 690.447531, beta_2 = 0.997207
###Markdown
Now we plot our resulting regression model.
###Code
x = np.linspace(1960, 2015, 55)
x = x/max(x)
plt.figure(figsize=(8,5))
y = sigmoid(x, *popt)
plt.plot(xdata, ydata, 'ro', label='data')
plt.plot(x,y, linewidth=3.0, label='fit')
plt.legend(loc='best')
plt.ylabel('GDP')
plt.xlabel('Year')
plt.show()
###Output
_____no_output_____
###Markdown
PracticeCan you calculate what is the accuracy of our model?
###Code
# split data into train/test
msk = np.random.rand(len(df)) < 0.8
train_x = xdata[msk]
test_x = xdata[~msk]
train_y = ydata[msk]
test_y = ydata[~msk]
# build the model using train set
popt, pcov = curve_fit(sigmoid, train_x, train_y)
# predict using test set
y_hat = sigmoid(test_x, *popt)
# evaluation
print("Mean absolute error: %.2f" % np.mean(np.absolute(y_hat - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((y_hat - test_y) ** 2))
from sklearn.metrics import r2_score
print("R2-score: %.2f" % r2_score(y_hat , test_y) )
###Output
Mean absolute error: 0.03
Residual sum of squares (MSE): 0.00
R2-score: -0.26
|
student-notebooks/15.01-Accounting-for-the-lipid-bilayer.ipynb | ###Markdown
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
###Code
NAME = ""
COLLABORATORS = ""
###Output
_____no_output_____
###Markdown
--- *This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).* Setting up a membrane protein in the bilayerKeywords: membrane, bilayer, AddMembraneMover, OCTOPUS Getting Started: Setting up the protein in the lipid bilayerTo start modeling membrane proteins, we must place the protein in the lipid bilayer. This begs an important question: how should the protein be oriented? The orientation of a protein in the bilayer is driven by a number of biophysical factors, such as burying nonpolar side chains in the hydrophobic membrane. For RosettaMP, there are three ways to choose the initial orientation. The choice is up to you, and often depends on how much information you have about your protein beforehand.
###Code
# Notebook setup
import sys
if 'google.colab' in sys.modules:
!pip install pyrosettacolabsetup
import pyrosettacolabsetup
pyrosettacolabsetup.mount_pyrosetta_install()
print ("Notebook is set for PyRosetta use in Colab. Have fun!")
from pyrosetta import *
pyrosetta.init()
###Output
_____no_output_____
###Markdown
Make sure you are in the right directory for accessing the `.pdb` files:`cd google_drive/My\ Drive/student-notebooks/`
###Code
#cd google_drive/My\ Drive/student-notebooks/
###Output
_____no_output_____
###Markdown
Option 1: Download a pre-transformed PDB from the OPM database
###Code
from pyrosetta.toolbox import cleanATOM
cleanATOM("inputs/1afo.pdb")
pose = pose_from_pdb("inputs/1afo.clean.pdb")
###Output
_____no_output_____
###Markdown
Then, initialize RosettaMP using AddMembraneMover. In this option, the orientation is known and you can estimate the transmembrane spans from the orientation. Therefore, we tell RosettaMP to estimate the spanning topology from structure:
###Code
from pyrosetta.rosetta.protocols.membrane import *
addmem = AddMembraneMover("from_structure")
addmem.apply(pose)
###Output
_____no_output_____
###Markdown
Option 2: Estimate the transmembrane spans and use this information to choose an orientation In this option, you will need to figure out what the transmembrane spans are. For this, you can used a sequence-based server such as OCTOPUS (http://octopus.cbr.su.se ). You will need to find the sequence of 1AFO on the PDB, copy/paste the sequence of one of the chains into OCTOPUS, and then save the output as a text file. Then, you will need to convert the output from OCTOPUS to the Rosetta format using the `octopus2memb` script. Next, initialize RosettaMP with AddMembraneMover. Here, instead of specifying “from_structure”, you will specify the path to your spanning topology file:
###Code
from pyrosetta.rosetta.protocols.membrane import *
if not os.getenv("DEBUG"):
addmem = AddMembraneMover("inputs/1afo.span")
addmem.apply(pose)
###Output
_____no_output_____
###Markdown
Key Concepts for the membrane representation1. AddMembraneMover adds an additional residue to the protein called the Membrane residue. It is not a physical residue, but it contains information about the membrane. Note that AddMembraneMover attaches the MEM residue to the protein in Rosetta’s representation, but it does not physically exist as a residue. This is a special kind of connection called a “jump edge” whereas connections between the actual residues like are called “peptide edges” (more on that in the fold tree section). 2. The spanning information is stored in a SpanningTopology object Let’s check some information about our current pose: print(pose.conformation()) print(pose.conformation().membrane_info())pose.conformation() shows information about all residues in the pose, fold_tree() shows information about the Edges of the FoldTree, and membrane_info() shows information about the membrane residue.
###Code
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
###Output
_____no_output_____
###Markdown
**Questions:**How many residues compose 1AFO? ___Which residue is the Membrane residue? ___How many transmembrane spans does membrane_info() say there are? Fold TreeUnderstanding the fold tree is necessary to use movers that move parts of the protein with respect to other parts of the protein. For example, TiltMover requires a jump number and tilts the section after the jump number by a specified amount. SpinAroundPartnerMover spins one partner with respect to another, which also requires a jump number. We will explain the terminology shortly! Enter this code in the Python command line:`print(pose.conformation().fold_tree())`
###Code
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
###Output
_____no_output_____
###Markdown
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
###Code
NAME = ""
COLLABORATORS = ""
###Output
_____no_output_____
###Markdown
--- *This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).* Setting up a membrane protein in the bilayerKeywords: membrane, bilayer, AddMembraneMover, OCTOPUS Getting Started: Setting up the protein in the lipid bilayerTo start modeling membrane proteins, we must place the protein in the lipid bilayer. This begs an important question: how should the protein be oriented? The orientation of a protein in the bilayer is driven by a number of biophysical factors, such as burying nonpolar side chains in the hydrophobic membrane. For RosettaMP, there are three ways to choose the initial orientation. The choice is up to you, and often depends on how much information you have about your protein beforehand.
###Code
# Notebook setup
import sys
if 'google.colab' in sys.modules:
!pip install pyrosettacolabsetup
import pyrosettacolabsetup
pyrosettacolabsetup.setup()
print ("Notebook is set for PyRosetta use in Colab. Have fun!")
from pyrosetta import *
pyrosetta.init()
###Output
_____no_output_____
###Markdown
Make sure you are in the right directory for accessing the `.pdb` files:`cd google_drive/My\ Drive/student-notebooks/`
###Code
#cd google_drive/My\ Drive/student-notebooks/
###Output
_____no_output_____
###Markdown
Option 1: Download a pre-transformed PDB from the OPM database
###Code
from pyrosetta.toolbox import cleanATOM
cleanATOM("inputs/1afo.pdb")
pose = pose_from_pdb("inputs/1afo.clean.pdb")
###Output
_____no_output_____
###Markdown
Then, initialize RosettaMP using AddMembraneMover. In this option, the orientation is known and you can estimate the transmembrane spans from the orientation. Therefore, we tell RosettaMP to estimate the spanning topology from structure:
###Code
from pyrosetta.rosetta.protocols.membrane import *
addmem = AddMembraneMover("from_structure")
addmem.apply(pose)
###Output
_____no_output_____
###Markdown
Option 2: Estimate the transmembrane spans and use this information to choose an orientation In this option, you will need to figure out what the transmembrane spans are. For this, you can used a sequence-based server such as OCTOPUS (http://octopus.cbr.su.se ). You will need to find the sequence of 1AFO on the PDB, copy/paste the sequence of one of the chains into OCTOPUS, and then save the output as a text file. Then, you will need to convert the output from OCTOPUS to the Rosetta format using the `octopus2memb` script. Next, initialize RosettaMP with AddMembraneMover. Here, instead of specifying “from_structure”, you will specify the path to your spanning topology file:
###Code
from pyrosetta.rosetta.protocols.membrane import *
if not os.getenv("DEBUG"):
addmem = AddMembraneMover("inputs/1afo.span")
addmem.apply(pose)
###Output
_____no_output_____
###Markdown
Key Concepts for the membrane representation1. AddMembraneMover adds an additional residue to the protein called the Membrane residue. It is not a physical residue, but it contains information about the membrane. Note that AddMembraneMover attaches the MEM residue to the protein in Rosetta’s representation, but it does not physically exist as a residue. This is a special kind of connection called a “jump edge” whereas connections between the actual residues like are called “peptide edges” (more on that in the fold tree section). 2. The spanning information is stored in a SpanningTopology object Let’s check some information about our current pose: print(pose.conformation()) print(pose.conformation().membrane_info())pose.conformation() shows information about all residues in the pose, fold_tree() shows information about the Edges of the FoldTree, and membrane_info() shows information about the membrane residue.
###Code
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
###Output
_____no_output_____
###Markdown
**Questions:**How many residues compose 1AFO? ___Which residue is the Membrane residue? ___How many transmembrane spans does membrane_info() say there are? Fold TreeUnderstanding the fold tree is necessary to use movers that move parts of the protein with respect to other parts of the protein. For example, TiltMover requires a jump number and tilts the section after the jump number by a specified amount. SpinAroundPartnerMover spins one partner with respect to another, which also requires a jump number. We will explain the terminology shortly! Enter this code in the Python command line:`print(pose.conformation().fold_tree())`
###Code
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
###Output
_____no_output_____ |
structure/LDA/LDA_coarse_i.ipynb | ###Markdown
Data import (labeled and unlabeled) Import data and add labels
###Code
corona_5g = pd.read_csv("~/Desktop/twitterAnalysis/FakeNews/dataset/graphs/5g_corona_conspiracy/feature_df_5g_corona_conspiracy.csv")
corona_5g['label'] = 'corona_5g'
corona_5g['conspiracy'] = '1'
non_conspiracy = pd.read_csv("~/Desktop/twitterAnalysis/FakeNews/dataset/graphs/non_conspiracy/feature_df_non_conspiracy.csv")
non_conspiracy['label'] = 'non_conspiracy'
non_conspiracy['conspiracy'] = '0'
other_conspiracy = pd.read_csv("~/Desktop/twitterAnalysis/FakeNews/dataset/graphs/other_conspiracy/feature_df_other_conspiracy.csv")
other_conspiracy['label'] = 'other_conspiracy'
other_conspiracy['conspiracy'] = '0'
all_data = corona_5g.append(non_conspiracy)
all_data = all_data.append(other_conspiracy)
all_data = all_data.dropna()
###Output
_____no_output_____
###Markdown
Split into train and test sets
###Code
x_unprocessed = all_data[all_data.columns[0:18]]
x = StandardScaler().fit_transform(x_unprocessed)
y = all_data[all_data.columns[19]]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
###Output
/Users/maria/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/data.py:645: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
return self.partial_fit(X, y)
/Users/maria/anaconda3/lib/python3.7/site-packages/sklearn/base.py:464: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
return self.fit(X, **fit_params).transform(X)
###Markdown
Import unlabeled test data
###Code
official_test = pd.read_csv("/Users/maria/Desktop/twitterAnalysis/FakeNews/dataset/graphs/test_graphs/feature_df_test.csv")
official_test_complete = official_test.dropna()
official_test_complete_std = StandardScaler().fit_transform(official_test_complete)
###Output
/Users/maria/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/data.py:645: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
return self.partial_fit(X, y)
/Users/maria/anaconda3/lib/python3.7/site-packages/sklearn/base.py:464: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
return self.fit(X, **fit_params).transform(X)
###Markdown
Model training Run LDA with cross-fold validation using SVD
###Code
clf = LinearDiscriminantAnalysis()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
precision, recall, fscore, support = metrics.precision_recall_fscore_support(y_test, y_pred)
labels = ['non-conspiracy', 'conspiracy']
acc = metrics.accuracy_score(y_test, y_pred)
mcc = metrics.matthews_corrcoef(y_test, y_pred)
prec = metrics.precision_score(y_test, y_pred, average='macro')
rec = metrics.recall_score(y_test, y_pred, average='macro')
print("Overall accuracy:",metrics.accuracy_score(y_test, y_pred), '\n')
print("MCC: " + str(mcc), '\n')
print(tabulate({"Label": labels,
"Precision": precision,
"Recall": recall,
"F-score": fscore,
"Support": support}, headers="keys"))
workbook = op.load_workbook("/Users/maria/Desktop/twitterAnalysis/FakeNews/scripts/graphs/model_summary.xlsx")
worksheet = workbook.worksheets[0]
row_n = worksheet.max_row+1
worksheet.cell(row=row_n, column=1).value = 'LDA_SVD_coarse'
worksheet.cell(row=row_n, column=2).value = 'LDA'
worksheet.cell(row=row_n, column=3).value = 'Structural Data'
worksheet.cell(row=row_n, column=4).value = 'Coarse'
worksheet.cell(row=row_n, column=5).value = mcc
worksheet.cell(row=row_n, column=6).value = acc
worksheet.cell(row=row_n, column=7).value = prec
worksheet.cell(row=row_n, column=8).value = rec
worksheet.cell(row=row_n, column=9).value = datetime.now()
workbook.save('/Users/maria/Desktop/twitterAnalysis/FakeNews/scripts/graphs/model_summary.xlsx')
official_test_complete['label'] = clf.predict(official_test_complete_std)
official_test_all = pd.concat([official_test, official_test_complete], axis=1)
labels = official_test_all['label']
labels.to_csv('LDA_SVD_coarse.csv', header=False)
###Output
/Users/maria/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""Entry point for launching an IPython kernel.
###Markdown
Run LDA with cross-fold validation using lsqr
###Code
clf = LinearDiscriminantAnalysis(solver='lsqr')
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
precision, recall, fscore, support = metrics.precision_recall_fscore_support(y_test, y_pred)
labels = ['non-conspiracy', 'conspiracy']
acc = metrics.accuracy_score(y_test, y_pred)
mcc = metrics.matthews_corrcoef(y_test, y_pred)
prec = metrics.precision_score(y_test, y_pred, average='macro')
rec = metrics.recall_score(y_test, y_pred, average='macro')
print("Overall accuracy:",metrics.accuracy_score(y_test, y_pred), '\n')
print("MCC: " + str(mcc), '\n')
print(tabulate({"Label": labels,
"Precision": precision,
"Recall": recall,
"F-score": fscore,
"Support": support}, headers="keys"))
workbook = op.load_workbook("/Users/maria/Desktop/twitterAnalysis/FakeNews/scripts/graphs/model_summary.xlsx")
worksheet = workbook.worksheets[0]
row_n = worksheet.max_row+1
worksheet.cell(row=row_n, column=1).value = 'LDA_LSQR_coarse'
worksheet.cell(row=row_n, column=2).value = 'LDA'
worksheet.cell(row=row_n, column=3).value = 'Structural Data'
worksheet.cell(row=row_n, column=4).value = 'Coarse'
worksheet.cell(row=row_n, column=5).value = mcc
worksheet.cell(row=row_n, column=6).value = acc
worksheet.cell(row=row_n, column=7).value = prec
worksheet.cell(row=row_n, column=8).value = rec
worksheet.cell(row=row_n, column=9).value = datetime.now()
workbook.save('/Users/maria/Desktop/twitterAnalysis/FakeNews/scripts/graphs/model_summary.xlsx')
official_test_complete['label'] = clf.predict(official_test_complete_std)
official_test_all = pd.concat([official_test, official_test_complete], axis=1)
labels = official_test_all['label']
labels.to_csv('LDA_LSQR_coarse.csv', header=False)
###Output
/Users/maria/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""Entry point for launching an IPython kernel.
###Markdown
Run LDA with cross-fold validation using lsqr and auto shrinkage
###Code
clf = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto')
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
precision, recall, fscore, support = metrics.precision_recall_fscore_support(y_test, y_pred)
labels = ['non-conspiracy', 'conspiracy']
acc = metrics.accuracy_score(y_test, y_pred)
mcc = metrics.matthews_corrcoef(y_test, y_pred)
prec = metrics.precision_score(y_test, y_pred, average='macro')
rec = metrics.recall_score(y_test, y_pred, average='macro')
print("Overall accuracy:",metrics.accuracy_score(y_test, y_pred), '\n')
print("MCC: " + str(mcc), '\n')
print(tabulate({"Label": labels,
"Precision": precision,
"Recall": recall,
"F-score": fscore,
"Support": support}, headers="keys"))
workbook = op.load_workbook("/Users/maria/Desktop/twitterAnalysis/FakeNews/scripts/graphs/model_summary.xlsx")
worksheet = workbook.worksheets[0]
row_n = worksheet.max_row+1
worksheet.cell(row=row_n, column=1).value = 'LDA_LSQR_shrinkage_coarse'
worksheet.cell(row=row_n, column=2).value = 'LDA'
worksheet.cell(row=row_n, column=3).value = 'Structural Data'
worksheet.cell(row=row_n, column=4).value = 'Coarse'
worksheet.cell(row=row_n, column=5).value = mcc
worksheet.cell(row=row_n, column=6).value = acc
worksheet.cell(row=row_n, column=7).value = prec
worksheet.cell(row=row_n, column=8).value = rec
worksheet.cell(row=row_n, column=9).value = datetime.now()
workbook.save('/Users/maria/Desktop/twitterAnalysis/FakeNews/scripts/graphs/model_summary.xlsx')
official_test_complete['label'] = clf.predict(official_test_complete_std)
official_test_all = pd.concat([official_test, official_test_complete], axis=1)
labels = official_test_all['label']
labels.to_csv('LDA_LSQR_shrinkage_coarse.csv', header=False)
# sources
# https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html
###Output
_____no_output_____ |
20-nlp_regex/lecture-20-regex.ipynb | ###Markdown
Introduction to Data Science – Regular Expressions*COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/* In this lecture we'll learn about regular expressions. Regular expressions are a way to match strings. They are very useful to find (and replace) text, to extract structured information such as e-mails, phone numbers, etc., or for cleaning up text that was entered by humans, and many other applications. In Python, regular expressions are available as part of the [`re`](https://docs.python.org/3/library/re.htmlmodule-re) module. There are various [good](https://docs.python.org/3/howto/regex.html) [tutorials](https://developers.google.com/edu/python/regular-expressions) available on which this document is partially based. The basic syntax to search for a match in a string is this: ```pythonmatch = re.search(pattern, text)```Here, `pattern` is the regular expression, `text` is the text that the regular expression is applied to. Match holds the search result that matches the string in an object.[`search()`](https://docs.python.org/3/library/re.htmlre.search) returns only the first occurrence of a match, in contrast, [`findall()`](https://docs.python.org/3/library/re.htmlre.findall) returns all matches.Another useful function is [`split()`](https://docs.python.org/3/library/re.htmlre.split), which splits a string based on a regex pattern – we'll use all of these functions and others where appropriate. Mostly, we'll use search to learn about the syntax, but sometimes we'll use split instead of search to explain a pattern. There are other functions which we'll use later. A simple ExampleWe'll use a regular expression: ```python'animal:\w\w\w'```To match the substring 'animal:' followed by a three letter word, encoded by '\w\w\w'
###Code
import re
# example text
text = "an example animal:cat!! animal:dog! animal:hedgehog"
# running the search, r before the string denotes a raw string
match = re.search(r"animal:\w\w\w", text)
# If-statement after search() tests if it succeeded
if match:
print ("found:", match.group())
else:
print ("did not find")
###Output
found: animal:cat
###Markdown
Here, the `r` before the string denotes that this should be treated as a raw string literal, i.e., that python shouldn't try to interpret the backslashes as escape characters, as it would, e.g., for `\n` – new line. This is quite useful for regular expressions, because we'd have to write the above query like this otherwise:```"animal:\\w\\w\\w"```The specific match can be retrieved using [`match.group()`](https://docs.python.org/3/library/re.htmlre.Match.group). Basic PatternsOrdinary characters, such as "`a, X, 9, <`" match themselves literally.
###Code
# search for occurence of "sc"
re.search(r"sc", "datascience").group()
# search for occurence of <
re.search(r"<", "data<science").group()
###Output
_____no_output_____
###Markdown
Special characters do not match themselves because they are part of the language. These are `. ^ $ * + ? { [ ] \ | ( )`.
###Code
# search for the beginning of the string, not the ^ symbol
re.search(r"^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
We can escape special characters to match literally with a backslash `\`.
###Code
# search for the ^ symbol by escaping it
re.search(r"\^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
A period `.` matches a single character, but not a newline character.
###Code
# search for the first single character
re.search(r".", "datascience.net").group()
###Output
_____no_output_____
###Markdown
`\w` matches a "word" character: a letter or digit or underbar `[a-zA-Z0-9_]`. Note that it only matches a single word char, not a whole word.
###Code
# search for the first word char
re.search(r"\w", "datascience").group()
# search for the first word char - note that < doesn't match
re.search(r"\w", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\W` (upper case W) matches any non-word character.
###Code
# search for the first non-word char
re.search(r"\W", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\s` matches a single whitespace character: space, newline `\n`, return `\r`, tab `\t`, and others.
###Code
# split by whitespace - searching for whitespace is boring
re.split(r"\s", "Intro datascience")
###Output
_____no_output_____
###Markdown
`\S` (upper case S) matches any non-whitespace character.
###Code
# search for first non-whitespace character
re.search(r"\S", " Intro datascience").group()
###Output
_____no_output_____
###Markdown
`\t`, `\n`, and `\r` match tab, newline, and return respectively.
###Code
# split the string based on tab \t
print("Intro\tdatascience 2019")
re.split(r"\t", "Intro\tdatascience 2019")
###Output
Intro datascience 2019
###Markdown
`\d` matches a decimal digit [0-9].
###Code
re.search(r"\d", "Intro datascience 2019").group()
###Output
_____no_output_____
###Markdown
`^` matches the start and `$` matches the end of the string. These are useful in context of a larger regular expressions, but not very useful in isolation. Repetition QualifiersA key concept in regex is repetition. `+` matches 1 or more occurrences of the pattern to its left.
###Code
# this matches as much as it can
re.search(r"o+", "Introoooo datascience").group()
###Output
_____no_output_____
###Markdown
`*` matches 0 or more occurrences of the pattern on its left
###Code
# serch for digits \d possibly seprated by one ore more whitespaces
re.search(r'\d\s*\d\s*\d', 'xx1 2 3xx').group()
# note that this also works if there are no whitespaces as * indicates 0-n matches
re.search(r'\d\s*\d\s*\d', 'xx123xx').group()
###Output
_____no_output_____
###Markdown
We can use this, for example to look for words starting with a certain character:
###Code
# d\w* start with a d, then match zero or more word characters
re.search(r"d\w*", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
`?` matches 0 or 1 occurrences of the pattern on its left:
###Code
# d\w? start with a d, then match zero or one characters. Why is the result "da" not "d"?
re.search(r"d\w?", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
This matches `da` not `d` because all these repetition qualifiers are greedy, i.e., match as much as possible. We'll talk more about this below. Be aware that the zero or more condition can be tricky. For example, if we want to match a `dd` with `*` and do it like this, we get a zero match, because the **start of the string** already matches the "or zero" condition. The correct pattern here would be `d+`.
###Code
re.search(r"d*", "Introoooo ddatascience !").group()
re.search(r"d+", "Introoooo ddatascience !").group()
###Output
_____no_output_____
###Markdown
Example: E-MailsLet's take a look at how we can use regular expressions. Suppose you're a spammer and you want to scrape e-mail addresses from websites. Here is an example:
###Code
html = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
# a first attempt:
# \w+ 1-n word letters,
# @ for the literal @
# 1-n word letters
re.search(r'\w+@\w+', html).group()
###Output
_____no_output_____
###Markdown
That didn't work because `.` doesn't match for `\w`. We can write a more specific query:
###Code
# \w+ 1-n word letters
# @
# \w+ 1-n word letters
# \. a period (escaped)
# \w+ 1-n word letters
# \. another period
# \w+ and more 1-n word letters
re.search(r'\w+@\w+\.+\w+\.\w+', html).group()
###Output
_____no_output_____
###Markdown
That worked! But it's easy to see that this isn't very general, i.e., it doesn't work for every legal e-mail.
###Code
html2 = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
match = re.search(r'\w+@\w+\.+\w+\.\w+', html2)
if match:
print(match.group())
else:
print ("didn't match")
###Output
didn't match
###Markdown
Here the e-mail [email protected] wasn't matched at all.
###Code
html3 = "You can reach me <a href='mailto:[email protected]'>by e-mail</a> if necessary."
# \w+ 1-n word letters, @,
match = re.search(r'\w+@\w+\.+\w+\.\w+', html3)
if match:
print(match.group())
else:
print ("didn't match")
###Output
[email protected]
###Markdown
Here, something matched but it's the wrong e-mail! It's not [email protected], but [email protected]. To fix this, we need another concept: Sets of legal chars We need another tool: **square brackets** `[]`. When using square brackets to enclose an expression, all the characters in the expression match:
###Code
#[\w.-]+ matches all strings that are made up of one or more word character, a period ., or dash - characters.
re.search(r'[\w.-]+@[\w.-]+', html).group()
re.search(r'[\w.-]+@[\w.-]+', html3).group()
###Output
_____no_output_____
###Markdown
That worked wonderfully! See how easy it is to extract an e-mail from a website. Also note that we didn't escape the `.`. That's because inside square brackets, only `^`, `-`, `]`, and `\` need to be escpaed, all others, like `.`, `^`, and `$`, are treated as literals. However, this pattern matches valid e-mail addresses, but it also matches non-valid ones. So this is a fine regex if you want to extract e-mail addresses, but not if you want to validate an e-mail address:
###Code
html4 = "alexander@sci..."
re.search(r'[\w.-]+@[\w.-]+', html4).group()
###Output
_____no_output_____
###Markdown
GroupingIf we want to be more specific about repeating substrings, for example, we need to be able to group a part of a regular expression. You can group with round brackets `()`:
###Code
# (da)+ gives us 1+ matches of the string "da", e.g., this will match da dada dadada, etc.
re.search(r"(da)+", "Introoooo dadadadascience 2016").group()
###Output
_____no_output_____
###Markdown
Groups are also a handy way to match a larger string, but only extract what is nested within a group. The [`group()`](https://docs.python.org/3/library/re.htmlre.match.group) method we've been using provides access to matched groups independently. Here is an example of extracting a URL from a string:
###Code
url = 'Visit the course website <a href="http://datasciencecourse.net">here</a>'
# legal characters in a url are \w, :, slash /, period .
# we use the href="" part to identify only URLs contained within that attribute
# but we don't actually want to match that.
match = re.search(r'href="([\w:/.]+)"', url)
print("The whole match:", match.group())
# Here we retreive the first individual group:
print("Only the match within the second group at index 1:", match.group(1))
###Output
The whole match: href="http://datasciencecourse.net"
Only the match within the second group at index 1: http://datasciencecourse.net
###Markdown
Exercise 2.1You're an evil Spammer who's observed that many people try to obfuscate their e-mail using this notation: "`alex at utah dot edu`". Below are three examples of such e-mails text. Try to extract "alex at utah dot edu", etc. Start with the first string. Then extend your regular expression to work on all of them at the same time. Note that the second and third are slightly harder to do!
###Code
html_smart = "You can reach me: alex at utah dot edu"
html_smart2 = "You can reach me: alex dot lex at utah dot edu"
html_smart3 = "You can reach me: alex dot lex at sci dot utah dot edu"
def testRegex(regex):
for html in (html_smart, html_smart2, html_smart3):
print(re.search(regex, html).group())
# TODO write your regex here
mail_regex = r"\w"
testRegex(mail_regex)
###Output
Y
Y
Y
###Markdown
Find All OccurrencesInstead of finding only a single occurrence of a match, we can also find all occurrences. Here is an example:
###Code
findall_html = 'You can reach us at <a href=\"mailto:[email protected]\">Alex\'s</a> ' \
'or <a href="mailto:[email protected]">Braxton\'s</a> e-mail if necessary.'
e_mail_re = r'[\w.-]+@[\w.-]+'
re.findall(e_mail_re, findall_html)
###Output
_____no_output_____
###Markdown
You can also combine the findall with groups:
###Code
# separating username and domain
e_mail_re_groups = r'([\w.-]+)@([\w.-]+)'
re.findall(e_mail_re_groups, findall_html)
###Output
_____no_output_____
###Markdown
If we want to use parentheses only for logic, not for grouping, we can use the `(?:)` syntax (a non-capturing grouping):
###Code
re.findall(r'(?:[\w.-]+)@(?:[\w.-]+)', findall_html)
###Output
_____no_output_____
###Markdown
Greedy vs Non-GreedyBy default, regular expressions are greedy. In this example, we try to match HTML tags:
###Code
html_tags = "The <b>amount and complexity</b> of information produced in <i>science</i>..."
# start with <, repeat any character 1-n times, close with >
re.findall("<.+>", html_tags)
###Output
_____no_output_____
###Markdown
This wasn't what we tried to do – the greedy nature of regex matched from the first opening tag < to the last closing tag. We can modify this behavior with the `?` character, which signals that the expression on the left should not be greedy:
###Code
# start with <, repeat any character 1-n times in a non-greedy way, terminat at the first >
re.findall("<.+?>", html_tags)
###Output
_____no_output_____
###Markdown
Greedy applies to the `*`, `+` and `?` operators – so these are legal sequences: `*?`, `+?`, `??`. Custom character subsetsYou can also define custom character sets by specifying a range with a dash:
###Code
re.search(r"[2-9]+", "0123405").group()
###Output
_____no_output_____
###Markdown
When combined with character sets, we can use the `^` operator to invert a match.
###Code
re.search(r"[^0-2]+", "0123405").group()
###Output
_____no_output_____
###Markdown
Specifying number of copies`{m}` Specifies that exactly m copies of the previous RE that should be matched. Fewer matches cause the entire RE not to match.
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, this is common in twelve (12) countries and one (1) state"
# match exactly three digits enclosed in brackets
re.findall("\(([0-9]{3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
{m,n} specifies that m to n copies match:
###Code
# match two to three digits enclosed in brackets
re.findall("\(([0-9]{2,3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
Or expressionWe can use the pipe `|` to define an or between any regular expression:
###Code
weekdays = "We could meet Monday or Wednesday"
re.findall("Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday", weekdays)
###Output
_____no_output_____
###Markdown
Replacing stringsWe can use the [`sub()`](https://docs.python.org/3/library/re.htmlre.sub) to dynamically replace content.
###Code
re.sub("Monday|Tuesday|Wednesday|Thursday|Friday", "Weekday", weekdays)
###Output
_____no_output_____
###Markdown
Other FunctionsWe've covered a lot, but not all of the functionality of regex. A couple of other functions that could be helpful:* [finditer](https://docs.python.org/3/library/re.htmlre.finditer) returns an iterator* the [IGNORECASE](https://docs.python.org/3/library/re.htmlre.IGNORECASE) option* the [DOTALL](https://docs.python.org/3/library/re.htmlre.DOTALL) option makes a . match a new line character too. Exercises Exercise 2.2: Find AdverbsWrite a regular expression that finds all adverbs in a sentence. Adverbs are characterized by ending in "ly".
###Code
text = "He was carefully disguised but captured quickly by police."
###Output
_____no_output_____
###Markdown
Exercise 2.3: Phone NumbersExtract the phone numbers that follow a (xxx) xxx-xxxx pattern from the text:
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, but this one (12) 13044441 shouldnt match. Also, this is common in twelve (12) countries and one (1) state"
###Output
_____no_output_____
###Markdown
Exercise 2.4: HTML ContentExtract the content between the `` and `` tags but not the other tags:
###Code
html_tags = "This is <b>important</b> and <u>very</u><i>timely</i>"
###Output
_____no_output_____
###Markdown
Introduction to Data Science – Regular Expressions*COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/* In this lecture we'll learn about regular expressions. Regular expressions are a way to match strings. They are very useful to find (and replace) text, to extract structured information such as e-mails, phone numbers, etc., or for cleaning up text that was entered by humans, and many other applications. In Python, regular expressions are available as part of the [`re`](https://docs.python.org/3/library/re.htmlmodule-re) module. There are various [good](https://docs.python.org/3/howto/regex.html) [tutorials](https://developers.google.com/edu/python/regular-expressions) available on which this document is partially based. The basic syntax to search for a match in a string is this: ```pythonmatch = re.search(pattern, text)```Here, `pattern` is the regular expression, `text` is the text that the regular expression is applied to. Match holds the search result that matches the string in an object.[`search()`](https://docs.python.org/3/library/re.htmlre.search) returns only the first occurrence of a match, in contrast, [`findall()`](https://docs.python.org/3/library/re.htmlre.findall) returns all matches.Another useful function is [`split()`](https://docs.python.org/3/library/re.htmlre.split), which splits a string based on a regex pattern – we'll use all of these functions and others where appropriate. Mostly, we'll use search to learn about the syntax, but sometimes we'll use split instead of search to explain a pattern. There are other functions which we'll use later. A simple ExampleWe'll use a regular expression: ```python'animal:\w\w\w'```To match the substring 'animal:' followed by a three letter word, encoded by '\w\w\w'
###Code
import re
# example text
text = "an example animal:cat!! animal:dog! animal:hedgehog"
# running the search, r before the string denotes a raw string
match = re.search(r"animal:\w\w\w", text)
# If-statement after search() tests if it succeeded
if match:
print ("found:", match.group()) ## 'found word:cat'
else:
print ("did not find")
###Output
found: animal:cat
###Markdown
Here, the `r` before the string denotes that this should be treated as a raw string literal, i.e., that python shouldn't try to interpret the backslashes as escape characters, as it would, e.g., for `\n` - new line. This is quite useful for regular expressions, because we'd have to write the above query like this otherwise:```"animal:\\w\\w\\w"```The specific match can be retrieved using [`match.group()`](https://docs.python.org/3/library/re.htmlre.Match.group). Basic PatternsOrdinary characters, such as "`a, X, 9, <`" match themselves literally.
###Code
# search for occurence of "sc"
re.search(r"sc", "datascience").group()
# search for occurence of <
re.search(r"<", "data<science").group()
###Output
_____no_output_____
###Markdown
Special characters do not match themselves because they are part of the language. These are `. ^ $ * + ? { [ ] \ | ( )`.
###Code
# search for the beginning of the string, not the ^ symbol
re.search(r"^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
We can escape special characters to match litteraly with a backslash `\`.
###Code
# search for the ^ symbol by escaping it
re.search(r"\^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
A period `.` matches a single character, but not a newline character.
###Code
# search for the first single character
re.search(r".", "datascience.net").group()
###Output
_____no_output_____
###Markdown
`\w` matches a "word" character: a letter or digit or underbar `[a-zA-Z0-9_]`. Note that it only matches a single word char, not a whole word.
###Code
# search for the first word char
re.search(r"\w", "datascience").group()
# search for the first word char - note that < doesn't match
re.search(r"\w", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\W` (upper case W) matches any non-word character.
###Code
# search for the first non-word char
re.search(r"\W", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\s` matches a single whitespace character: space, newline `\n`, return `\r`, tab `\t`, and others.
###Code
# split by whitespace - searching for whitespace is boring
re.split(r"\s", "Intro datascience")
###Output
_____no_output_____
###Markdown
`\S` (upper case S) matches any non-whitespace character.
###Code
# search for first non-whitespace character
re.search(r"\S", " Intro datascience").group()
###Output
_____no_output_____
###Markdown
`\t`, `\n`, and `\r` match tab, newline, and return respectively.
###Code
# split the string based on tab \t
print("Intro\tdatascience 2019")
re.split(r"\t", "Intro\tdatascience 2019")
###Output
Intro datascience 2019
###Markdown
`\d` matches a decimal digit [0-9].
###Code
re.search(r"\d", "Intro datascience 2019").group()
###Output
_____no_output_____
###Markdown
`^` matches the start and `$` matches the end of the string. These are useful in context of a larger regular expressions, but not very useful in isolation. Repetition QualifiersA key concept in regex is repetition. `+` matches 1 or more occurrences of the pattern to its left.
###Code
# this matches as much as it can
re.search(r"o+", "Introoooo datascience").group()
###Output
_____no_output_____
###Markdown
`*` matches 0 or more occurrences of the pattern on its left
###Code
# serch for digits \d possibly seprated by one ore more whitespaces
re.search(r'\d\s*\d\s*\d', 'xx1 2 3xx').group()
# note that this also works if there are no whitespaces as * indicates 0-n matches
re.search(r'\d\s*\d\s*\d', 'xx123xx').group()
###Output
_____no_output_____
###Markdown
We can use this, for example to look for words starting with a certain character:
###Code
# d\w* start with a d, then match zero or more word characters
re.search(r"d\w*", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
`?` matches 0 or 1 occurrences of the pattern on its left:
###Code
# d\w? start with a d, then match zero or one characters. Why is the result "da" not "d"?
re.search(r"d\w?", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
This matches `da` not `d` because all these repetition qualifiers are greedy, i.e., match as much as possible. We'll talk more about this below. Be aware that the zero or more condition can be tricky. For example, if we want to match a `dd` with `*` and do it like this, we get a zero match, because the **start of the string** already matches the "or zero" condition. The correct pattern here would be `d+`.
###Code
re.search(r"d*", "Introoooo ddatascience !").group()
re.search(r"d+", "Introoooo ddatascience !").group()
###Output
_____no_output_____
###Markdown
Example: E-MailsLet's take a look at how we can use regular expressions. Suppose you're a spammer and you want to scrape e-mail addresses from website. Here is an example:
###Code
html = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
# a first attempt:
# \w+ 1-n word letters,
# @ for the literal @
# 1-n word letters
re.search(r'\w+@\w+', html).group()
###Output
_____no_output_____
###Markdown
That didn't work because `.` doesn't match for `\w`. We can write a more specific query:
###Code
# \w+ 1-n word letters
# @
# 1-n word letters
# a period \.
# 1-n word letters
# another period \.,
# and more 1-n word letters
re.search(r'\w+@\w+\.+\w+\.\w+', html).group()
###Output
_____no_output_____
###Markdown
That worked! But it's easy to see that this isn't very general, i.e., it doesn't work for every legal e-mail.
###Code
html2 = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
match = re.search(r'\w+@\w+\.+\w+\.\w+', html2)
if match:
print(match.group())
else:
print ("didn't match")
###Output
didn't match
###Markdown
Here the e-mail [email protected] wasn't matched at all.
###Code
html3 = "You can reach me <a href='mailto:[email protected]'>by e-mail</a> if necessary."
# \w+ 1-n word letters, @,
match = re.search(r'\w+@\w+\.+\w+\.\w+', html3)
if match:
print(match.group())
else:
print ("didn't match")
###Output
[email protected]
###Markdown
Here, something matched but it's the wrong e-mail! It's not [email protected], but [email protected]. To fix this, we need another concept: Sets of legal chars We need another tool: **square brackets** `[]`. When using square brackets to enclose an expression, all the characters in the expression match:
###Code
#[\w.-]+ matches all strings that are made up of one or more word character, a period ., or dash - characters.
re.search(r'[\w.-]+@[\w.-]+', html).group()
re.search(r'[\w.-]+@[\w.-]+', html3).group()
###Output
_____no_output_____
###Markdown
That worked wonderfully! See how easy it is to extract an e-mail from a website. However, this pattern matches valid e-mail addresses, but it also matches non-valid ones. So this is a fine regex if you want to extract e-mail addresses, but not if you want to validate an e-mail address:
###Code
html4 = "alexander@sci..."
re.search(r'[\w.-]+@[\w.-]+', html4).group()
###Output
_____no_output_____
###Markdown
GroupingIf we want to be more specific about repeating substrings, for example, we need to be able to group a part of a regular expression. You can group with round brackets `()`:
###Code
# (da)+ gives us 1+ matches of the string "da", e.g., this will match da dada dadada, etc.
re.search(r"(da)+", "Introoooo dadadadascience 2016").group()
###Output
_____no_output_____
###Markdown
Groups are also a handy way to match a larger string, but only extract what is nested within a group. The [`group()`](https://docs.python.org/3/library/re.htmlre.match.group) method we've been using provides access to matched groups independently. Here is an example of extracting a URL from a string:
###Code
url = 'Visit the course website <a href="http://datasciencecourse.net">here</a>'
# legal characters in a url are \w, :, slash /, period ., which we have to escape to \.
# we use the href="" part to identify only URLs contained within that attribute
# but we don't actually want to match that.
match = re.search(r'href="([\w:/\.]+)"', url)
print("The whole match:", match.group())
# Here we retreive the first individual group:
print("Only the match within the second group at index 1:", match.group(1))
###Output
The whole match: href="http://datasciencecourse.net"
Only the match within the second group at index 1: http://datasciencecourse.net
###Markdown
Exercise 2.1You're an evil Spammer who's observed that many people try to obfuscate their e-mail using this notation: "`alex at utah dot edu`". Below are three examples of such e-mails text. Try to extract "alex at utah dot edu", etc. Start with the first string. Then extend your regular expression to work on all of them at the same time. Note that the second and third are slightly harder to do!
###Code
html_smart = "You can reach me: alex at utah dot edu"
html_smart2 = "You can reach me: alex dot lex at utah dot edu"
html_smart3 = "You can reach me: alex dot lex at sci dot utah dot edu"
def testRegex(regex):
for html in (html_smart, html_smart2, html_smart3):
print(re.search(regex, html).group())
# TODO write your regex here
mail_regex = r"\w"
testRegex(mail_regex)
###Output
_____no_output_____
###Markdown
Find All OccurrencesInstead of finding only a single occurrence of a match, we can also find all occurrences. Here is an example:
###Code
findall_html = 'You can reach us at <a href=\"mailto:[email protected]\">Alex\'s</a> ' \
'or <a href="mailto:[email protected]">Braxton\'s</a> e-mail if necessary.'
e_mail_re = r'[\w.-]+@[\w.-]+'
re.findall(e_mail_re, findall_html)
###Output
_____no_output_____
###Markdown
You can also combine the findall with groups:
###Code
# separating username and domain
e_mail_re_groups = r'([\w.-]+)@([\w.-]+)'
re.findall(e_mail_re_groups, findall_html)
###Output
_____no_output_____
###Markdown
If we want to use parentheses only for logic, not for grouping, we can use the `(?:)` syntax (a non-capturing grouping):
###Code
re.findall(r'(?:[\w.-]+)@(?:[\w.-]+)', findall_html)
###Output
_____no_output_____
###Markdown
Greedy vs Non-GreedyBy default, regular expressions are greedy. In this example, we try to match HTML tags:
###Code
html_tags = "The <b>amount and complexity</b> of information produced in <i>science</i>..."
# start with <, repeat any character 1-n times, close with >
re.findall("<.+>", html_tags)
###Output
_____no_output_____
###Markdown
This wasn't what we tried to do - the greedy nature of regex matched from the first opening tag < to the last closing tag. We can modify this behavior with the `?` character, which signals that the expression on the left should not be greedy:
###Code
# start with <, repeat any character 1-n times in a non-greedy way, terminat at the first >
re.findall("<.+?>", html_tags)
###Output
_____no_output_____
###Markdown
Greedy applies to the `*`, `+` and `?` operators – so these are legal sequences: `*?`, `+?`, `??`. Custom character subsetsYou can also define custom character sets by specifying a range with a dash:
###Code
re.search(r"[2-9]+", "0123405").group()
###Output
_____no_output_____
###Markdown
When combined with character sets, we can use the `^` operator to invert a match.
###Code
re.search(r"[^0-2]+", "0123405").group()
###Output
_____no_output_____
###Markdown
Specifying number of copies`{m}` Specifies that exactly m copies of the previous RE that should be matched. Fewer matches cause the entire RE not to match.
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, this is common in twelve (12) countries and one (1) state"
# match exactly three digits enclosed in brackets
re.findall("\(([0-9]{3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
{m,n} specifies that m to n copies match:
###Code
# match two to three digits enclosed in brackets
re.findall("\(([0-9]{2,3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
Or expressionWe can use the pipe `|` to define an or between any regular expression:
###Code
weekdays = "We could meet Monday or Wednesday"
re.findall("Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday", weekdays)
###Output
_____no_output_____
###Markdown
Replacing stringsWe can use the [`sub()`](https://docs.python.org/3/library/re.htmlre.sub) to dynamically replace content.
###Code
re.sub("Monday|Tuesday|Wednesday|Thursday|Friday", "Weekday", weekdays)
###Output
_____no_output_____
###Markdown
Other FunctionsWe've covered a lot, but not all of the functionality of regex. A couple of other functions that could be helpful:* [finditer](https://docs.python.org/3/library/re.htmlre.finditer) returns an iterator* the [IGNORECASE](https://docs.python.org/3/library/re.htmlre.IGNORECASE) option* the [DOTALL](https://docs.python.org/3/library/re.htmlre.DOTALL) option makes a . match a new line character too. Exercises Exercise 2.2: Find AdverbsWrite a regular expression that finds all adverbs in a sentence. Adverbs are characterized by ending in "ly".
###Code
text = "He was carefully disguised but captured quickly by police."
###Output
_____no_output_____
###Markdown
Exercise 2.3: Phone NumbersExtract the phone numbers that follow a (xxx) xxx-xxxx pattern from the text:
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, but this one (12) 13044441 shouldnt match. Also, this is common in twelve (12) countries and one (1) state"
###Output
_____no_output_____
###Markdown
Exercise 2.4: HTML ContentExtract the content between the `` and `` tags but not the other tags:
###Code
html_tags = "This is <b>important</b> and <u>very</u><i>timely</i>"
###Output
_____no_output_____
###Markdown
Introduction to Data Science – Regular Expressions*COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/* In this lecture we'll learn about regular expressions. Regular expressions are a way to match strings. They are very useful to find (and replace) text, to extract structured information such as e-mails, phone numbers, etc., or for cleaning up text that was entered by humans, and many other applications. In Python, regular expressions are available as part of the [`re`](https://docs.python.org/3/library/re.htmlmodule-re) module. There are various [good](https://docs.python.org/3/howto/regex.html) [tutorials](https://developers.google.com/edu/python/regular-expressions) available on which this document is partially based. The basic syntax to search for a match in a string is this: ```pythonmatch = re.search(pattern, text)```Here, `pattern` is the regular expression, `text` is the text that the regular expression is applied to. Match holds the search result that matches the string in an object.[`search()`](https://docs.python.org/3/library/re.htmlre.search) returns only the first occurrence of a match, in contrast, [`findall()`](https://docs.python.org/3/library/re.htmlre.findall) returns all matches.Another useful function is [`split()`](https://docs.python.org/3/library/re.htmlre.split), which splits a string based on a regex pattern – we'll use all of these functions and others where appropriate. Mostly, we'll use search to learn about the syntax, but sometimes we'll use split instead of search to explain a pattern. There are other functions which we'll use later. A simple ExampleWe'll use a regular expression: ```python'animal:\w\w\w'```To match the substring 'animal:' followed by a three letter word, encoded by '\w\w\w'
###Code
import re
# example text
text = "an example animal:cat!! animal:dog! animal:hedgehog"
# running the search, r before the string denotes a raw string
match = re.search(r"animal:\w\w\w", text)
# If-statement after search() tests if it succeeded
if match:
print ("found:", match.group())
else:
print ("did not find")
###Output
found: animal:cat
###Markdown
Here, the `r` before the string denotes that this should be treated as a raw string literal, i.e., that python shouldn't try to interpret the backslashes as escape characters, as it would, e.g., for `\n` – new line. This is quite useful for regular expressions, because we'd have to write the above query like this otherwise:```"animal:\\w\\w\\w"```The specific match can be retrieved using [`match.group()`](https://docs.python.org/3/library/re.htmlre.Match.group). Basic PatternsOrdinary characters, such as "`a, X, 9, <`" match themselves literally.
###Code
# search for occurence of "sc"
re.search(r"sc", "datascience").group()
# search for occurence of <
re.search(r"<", "data<science").group()
###Output
_____no_output_____
###Markdown
Special characters do not match themselves because they are part of the language. These are `. ^ $ * + ? { [ ] \ | ( )`.
###Code
# search for the beginning of the string, not the ^ symbol
re.search(r"^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
We can escape special characters to match literally with a backslash `\`.
###Code
# search for the ^ symbol by escaping it
re.search(r"\^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
A period `.` matches a single character, but not a newline character.
###Code
# search for the first single character
re.search(r".", "datascience.net").group()
###Output
_____no_output_____
###Markdown
`\w` matches a "word" character: a letter or digit or underbar `[a-zA-Z0-9_]`. Note that it only matches a single word char, not a whole word.
###Code
# search for the first word char
re.search(r"\w", "datascience").group()
# search for the first word char - note that < doesn't match
re.search(r"\w", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\W` (upper case W) matches any non-word character.
###Code
# search for the first non-word char
re.search(r"\W", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\s` matches a single whitespace character: space, newline `\n`, return `\r`, tab `\t`, and others.
###Code
# split by whitespace - searching for whitespace is boring
re.split(r"\s", "Intro datascience")
###Output
_____no_output_____
###Markdown
`\S` (upper case S) matches any non-whitespace character.
###Code
# search for first non-whitespace character
re.search(r"\S", " Intro datascience").group()
###Output
_____no_output_____
###Markdown
`\t`, `\n`, and `\r` match tab, newline, and return respectively.
###Code
# split the string based on tab \t
print("Intro\tdatascience 2019")
re.split(r"\t", "Intro\tdatascience 2019")
###Output
Intro datascience 2019
###Markdown
`\d` matches a decimal digit [0-9].
###Code
re.search(r"\d", "Intro datascience 2019").group()
###Output
_____no_output_____
###Markdown
`^` matches the start and `$` matches the end of the string. These are useful in context of a larger regular expressions, but not very useful in isolation. Repetition QualifiersA key concept in regex is repetition. `+` matches 1 or more occurrences of the pattern to its left.
###Code
# this matches as much as it can
re.search(r"o+", "Introoooo datascience").group()
###Output
_____no_output_____
###Markdown
`*` matches 0 or more occurrences of the pattern on its left
###Code
# serch for digits \d possibly seprated by one ore more whitespaces
re.search(r'\d\s*\d\s*\d', 'xx1 2 3xx').group()
# note that this also works if there are no whitespaces as * indicates 0-n matches
re.search(r'\d\s*\d\s*\d', 'xx123xx').group()
###Output
_____no_output_____
###Markdown
We can use this, for example to look for words starting with a certain character:
###Code
# d\w* start with a d, then match zero or more word characters
re.search(r"d\w*", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
`?` matches 0 or 1 occurrences of the pattern on its left:
###Code
# d\w? start with a d, then match zero or one characters. Why is the result "da" not "d"?
re.search(r"d\w?", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
This matches `da` not `d` because all these repetition qualifiers are greedy, i.e., match as much as possible. We'll talk more about this below. Be aware that the zero or more condition can be tricky. For example, if we want to match a `dd` with `*` and do it like this, we get a zero match, because the **start of the string** already matches the "or zero" condition. The correct pattern here would be `d+`.
###Code
re.search(r"d*", "Introoooo ddatascience !").group()
re.search(r"d+", "Introoooo ddatascience !").group()
###Output
_____no_output_____
###Markdown
Example: E-MailsLet's take a look at how we can use regular expressions. Suppose you're a spammer and you want to scrape e-mail addresses from websites. Here is an example:
###Code
html = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
# a first attempt:
# \w+ 1-n word letters,
# @ for the literal @
# 1-n word letters
re.search(r'\w+@\w+', html).group()
###Output
_____no_output_____
###Markdown
That didn't work because `.` doesn't match for `\w`. We can write a more specific query:
###Code
# \w+ 1-n word letters
# @
# \w+ 1-n word letters
# \. a period (escaped)
# \w+ 1-n word letters
# \. another period
# \w+ and more 1-n word letters
re.search(r'\w+@\w+\.+\w+\.\w+', html).group()
###Output
_____no_output_____
###Markdown
That worked! But it's easy to see that this isn't very general, i.e., it doesn't work for every legal e-mail.
###Code
html2 = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
match = re.search(r'\w+@\w+\.+\w+\.\w+', html2)
if match:
print(match.group())
else:
print ("didn't match")
###Output
didn't match
###Markdown
Here the e-mail [email protected] wasn't matched at all.
###Code
html3 = "You can reach me <a href='mailto:[email protected]'>by e-mail</a> if necessary."
# \w+ 1-n word letters, @,
match = re.search(r'\w+@\w+\.+\w+\.\w+', html3)
if match:
print(match.group())
else:
print ("didn't match")
###Output
[email protected]
###Markdown
Here, something matched but it's the wrong e-mail! It's not [email protected], but [email protected]. To fix this, we need another concept: Sets of legal chars We need another tool: **square brackets** `[]`. When using square brackets to enclose an expression, all the characters in the expression match:
###Code
#[\w.-]+ matches all strings that are made up of one or more word character, a period ., or dash - characters.
re.search(r'[\w.-]+@[\w.-]+', html).group()
re.search(r'[\w.-]+@[\w.-]+', html3).group()
###Output
_____no_output_____
###Markdown
That worked wonderfully! See how easy it is to extract an e-mail from a website. Also note that we didn't escape the `.`. That's because inside square brackets, only `^`, `-`, `]`, and `\` need to be escpaed, all others, like `.`, `^`, and `$`, are treated as literals. However, this pattern matches valid e-mail addresses, but it also matches non-valid ones. So this is a fine regex if you want to extract e-mail addresses, but not if you want to validate an e-mail address:
###Code
html4 = "alexander@sci..."
re.search(r'[\w.-]+@[\w.-]+', html4).group()
###Output
_____no_output_____
###Markdown
GroupingIf we want to be more specific about repeating substrings, for example, we need to be able to group a part of a regular expression. You can group with round brackets `()`:
###Code
# (da)+ gives us 1+ matches of the string "da", e.g., this will match da dada dadada, etc.
re.search(r"(da)+", "Introoooo dadadadascience 2016").group()
###Output
_____no_output_____
###Markdown
Groups are also a handy way to match a larger string, but only extract what is nested within a group. The [`group()`](https://docs.python.org/3/library/re.htmlre.match.group) method we've been using provides access to matched groups independently. Here is an example of extracting a URL from a string:
###Code
url = 'Visit the course website <a href="http://datasciencecourse.net">here</a>'
# legal characters in a url are \w, :, slash /, period .
# we use the href="" part to identify only URLs contained within that attribute
# but we don't actually want to match that.
match = re.search(r'href="([\w:/.]+)"', url)
print("The whole match:", match.group())
# Here we retreive the first individual group:
print("Only the match within the second group at index 1:", match.group(1))
###Output
The whole match: href="http://datasciencecourse.net"
Only the match within the second group at index 1: http://datasciencecourse.net
###Markdown
Exercise 2.1You're an evil Spammer who's observed that many people try to obfuscate their e-mail using this notation: "`alex at utah dot edu`". Below are three examples of such e-mails text. Try to extract "alex at utah dot edu", etc. Start with the first string. Then extend your regular expression to work on all of them at the same time. Note that the second and third are slightly harder to do!
###Code
html_smart = "You can reach me: alex at utah dot edu"
html_smart2 = "You can reach me: alex dot lex at utah dot edu"
html_smart3 = "You can reach me: alex dot lex at sci dot utah dot edu"
def testRegex(regex):
for html in (html_smart, html_smart2, html_smart3):
print(re.search(regex, html).group())
# TODO write your regex here
mail_regex = r"\w"
testRegex(mail_regex)
###Output
Y
Y
Y
###Markdown
Find All OccurrencesInstead of finding only a single occurrence of a match, we can also find all occurrences. Here is an example:
###Code
findall_html = 'You can reach us at <a href=\"mailto:[email protected]\">Alex\'s</a> ' \
'or <a href="mailto:[email protected]">Braxton\'s</a> e-mail if necessary.'
e_mail_re = r'[\w.-]+@[\w.-]+'
re.findall(e_mail_re, findall_html)
###Output
_____no_output_____
###Markdown
You can also combine the findall with groups:
###Code
# separating username and domain
e_mail_re_groups = r'([\w.-]+)@([\w.-]+)'
re.findall(e_mail_re_groups, findall_html)
###Output
_____no_output_____
###Markdown
If we want to use parentheses only for logic, not for grouping, we can use the `(?:)` syntax (a non-capturing grouping):
###Code
re.findall(r'(?:[\w.-]+)@(?:[\w.-]+)', findall_html)
###Output
_____no_output_____
###Markdown
Greedy vs Non-GreedyBy default, regular expressions are greedy. In this example, we try to match HTML tags:
###Code
html_tags = "The <b>amount and complexity</b> of information produced in <i>science</i>..."
# start with <, repeat any character 1-n times, close with >
re.findall("<.+>", html_tags)
###Output
_____no_output_____
###Markdown
This wasn't what we tried to do – the greedy nature of regex matched from the first opening tag < to the last closing tag. We can modify this behavior with the `?` character, which signals that the expression on the left should not be greedy:
###Code
# start with <, repeat any character 1-n times in a non-greedy way, terminat at the first >
re.findall("<.+?>", html_tags)
###Output
_____no_output_____
###Markdown
Greedy applies to the `*`, `+` and `?` operators – so these are legal sequences: `*?`, `+?`, `??`. Custom character subsetsYou can also define custom character sets by specifying a range with a dash:
###Code
re.search(r"[2-9]+", "0123405").group()
###Output
_____no_output_____
###Markdown
When combined with character sets, we can use the `^` operator to invert a match.
###Code
re.search(r"[^0-2]+", "0123405").group()
###Output
_____no_output_____
###Markdown
Specifying number of copies`{m}` Specifies that exactly m copies of the previous RE that should be matched. Fewer matches cause the entire RE not to match.
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, this is common in twelve (12) countries and one (1) state"
# match exactly three digits enclosed in brackets
re.findall("\(([0-9]{3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
{m,n} specifies that m to n copies match:
###Code
# match two to three digits enclosed in brackets
re.findall("\(([0-9]{2,3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
Or expressionWe can use the pipe `|` to define an or between any regular expression:
###Code
weekdays = "We could meet Monday or Wednesday"
re.findall("Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday", weekdays)
###Output
_____no_output_____
###Markdown
Replacing stringsWe can use the [`sub()`](https://docs.python.org/3/library/re.htmlre.sub) to dynamically replace content.
###Code
re.sub("Monday|Tuesday|Wednesday|Thursday|Friday", "Weekday", weekdays)
###Output
_____no_output_____
###Markdown
Other FunctionsWe've covered a lot, but not all of the functionality of regex. A couple of other functions that could be helpful:* [finditer](https://docs.python.org/3/library/re.htmlre.finditer) returns an iterator* the [IGNORECASE](https://docs.python.org/3/library/re.htmlre.IGNORECASE) option* the [DOTALL](https://docs.python.org/3/library/re.htmlre.DOTALL) option makes a . match a new line character too. Exercises Exercise 2.2: Find AdverbsWrite a regular expression that finds all adverbs in a sentence. Adverbs are characterized by ending in "ly".
###Code
text = "He was carefully disguised but captured quickly by police."
###Output
_____no_output_____
###Markdown
Exercise 2.3: Phone NumbersExtract the phone numbers that follow a (xxx) xxx-xxxx pattern from the text:
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, but this one (12) 13044441 shouldnt match. Also, this is common in twelve (12) countries and one (1) state"
###Output
_____no_output_____
###Markdown
Exercise 2.4: HTML ContentExtract the content between the `` and `` tags but not the other tags:
###Code
html_tags = "This is <b>important</b> and <u>very</u><i>timely</i>"
###Output
_____no_output_____
###Markdown
Introduction to Data Science – Regular Expressions*COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/* In this lecture we'll learn about regular expressions. Regular expressions are a way to match strings. They are very useful to find (and replace) text, to extract structured information such as e-mails, phone numbers, etc., or for cleaning up text that was entered by humans, and many other applications. In Python, regular expressions are available as part of the [`re`](https://docs.python.org/3/library/re.htmlmodule-re) module. There are various [good](https://docs.python.org/3/howto/regex.html) [tutorials](https://developers.google.com/edu/python/regular-expressions) available on which this document is partially based. The basic syntax to search for a match in a string is this: ```pythonmatch = re.search(pattern, text)```Here, `pattern` is the regular expression, `text` is the text that the regular expression is applied to. Match holds the search result that matches the string in an object.[`search()`](https://docs.python.org/3/library/re.htmlre.search) returns only the first occurrence of a match, in contrast, [`findall()`](https://docs.python.org/3/library/re.htmlre.findall) returns all matches.Another useful function is [`split()`](https://docs.python.org/3/library/re.htmlre.split), which splits a string based on a regex pattern – we'll use all of these functions and others where appropriate. Mostly, we'll use search to learn about the syntax, but sometimes we'll use split instead of search to explain a pattern. There are other functions which we'll use later. A simple ExampleWe'll use a regular expression: ```python'animal:\w\w\w'```To match the substring 'animal:' followed by a three letter word, encoded by '\w\w\w'
###Code
import re
# example text
text = "an example animal:cat!! animal:dog! animal:hedgehog"
# running the search, r before the string denotes a raw string
match = re.search(r"animal:\w\w\w", text)
# If-statement after search() tests if it succeeded
if match:
print ("found:", match.group())
else:
print ("did not find")
###Output
found: animal:cat
###Markdown
Here, the `r` before the string denotes that this should be treated as a raw string literal, i.e., that python shouldn't try to interpret the backslashes as escape characters, as it would, e.g., for `\n` – new line. This is quite useful for regular expressions, because we'd have to write the above query like this otherwise:```"animal:\\w\\w\\w"```The specific match can be retrieved using [`match.group()`](https://docs.python.org/3/library/re.htmlre.Match.group). Basic PatternsOrdinary characters, such as "`a, X, 9, <`" match themselves literally.
###Code
# search for occurence of "sc"
re.search(r"sc", "datascience").group()
# search for occurence of <
re.search(r"<", "data<science").group()
###Output
_____no_output_____
###Markdown
Special characters do not match themselves because they are part of the language. These are `. ^ $ * + ? { [ ] \ | ( )`.
###Code
# search for the beginning of the string, not the ^ symbol
re.search(r"^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
We can escape special characters to match literally with a backslash `\`.
###Code
# search for the ^ symbol by escaping it
re.search(r"\^", "datascience^2").group()
###Output
_____no_output_____
###Markdown
A period `.` matches a single character, but not a newline character.
###Code
# search for the first single character
re.search(r".", "datascience.net").group()
###Output
_____no_output_____
###Markdown
`\w` matches a "word" character: a letter or digit or underbar `[a-zA-Z0-9_]`. Note that it only matches a single word char, not a whole word.
###Code
# search for the first word char
re.search(r"\w", "datascience").group()
# search for the first word char - note that < doesn't match
re.search(r"\w", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\W` (upper case W) matches any non-word character.
###Code
# search for the first non-word char
re.search(r"\W", "<datascience>").group()
###Output
_____no_output_____
###Markdown
`\s` matches a single whitespace character: space, newline `\n`, return `\r`, tab `\t`, and others.
###Code
# split by whitespace - searching for whitespace is boring
re.split(r"\s", "Intro datascience")
###Output
_____no_output_____
###Markdown
`\S` (upper case S) matches any non-whitespace character.
###Code
# search for first non-whitespace character
re.search(r"\S", " Intro datascience").group()
###Output
_____no_output_____
###Markdown
`\t`, `\n`, and `\r` match tab, newline, and return respectively.
###Code
# split the string based on tab \t
print("Intro\tdatascience 2019")
re.split(r"\t", "Intro\tdatascience 2019")
###Output
Intro datascience 2019
###Markdown
`\d` matches a decimal digit [0-9].
###Code
re.search(r"\d", "Intro datascience 2019").group()
###Output
_____no_output_____
###Markdown
`^` matches the start and `$` matches the end of the string. These are useful in context of a larger regular expressions, but not very useful in isolation. Repetition QualifiersA key concept in regex is repetition. `+` matches 1 or more occurrences of the pattern to its left.
###Code
# this matches as much as it can
re.search(r"o+", "Introoooo datascience").group()
###Output
_____no_output_____
###Markdown
`*` matches 0 or more occurrences of the pattern on its left
###Code
# serch for digits \d possibly seprated by one ore more whitespaces
re.search(r'\d\s*\d\s*\d', 'xx1 2 3xx').group()
# note that this also works if there are no whitespaces as * indicates 0-n matches
re.search(r'\d\s*\d\s*\d', 'xx123xx').group()
###Output
_____no_output_____
###Markdown
We can use this, for example to look for words starting with a certain character:
###Code
# d\w* start with a d, then match zero or more word characters
re.search(r"d\w*", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
`?` matches 0 or 1 occurrences of the pattern on its left:
###Code
# d\w? start with a d, then match zero or one characters. Why is the result "da" not "d"?
re.search(r"d\w?", "Introoooo datascience !").group()
###Output
_____no_output_____
###Markdown
This matches `da` not `d` because all these repetition qualifiers are greedy, i.e., match as much as possible. We'll talk more about this below. Be aware that the zero or more condition can be tricky. For example, if we want to match a `dd` with `*` and do it like this, we get a zero match, because the **start of the string** already matches the "or zero" condition. The correct pattern here would be `d+`.
###Code
re.search(r"d*", "Introoooo ddatascience !").group()
re.search(r"d+", "Introoooo ddatascience !").group()
###Output
_____no_output_____
###Markdown
Example: E-MailsLet's take a look at how we can use regular expressions. Suppose you're a spammer and you want to scrape e-mail addresses from websites. Here is an example:
###Code
html = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
# a first attempt:
# \w+ 1-n word letters,
# @ for the literal @
# 1-n word letters
re.search(r'\w+@\w+', html).group()
###Output
_____no_output_____
###Markdown
That didn't work because `.` doesn't match for `\w`. We can write a more specific query:
###Code
# \w+ 1-n word letters
# @
# \w+ 1-n word letters
# \. a period (escaped)
# \w+ 1-n word letters
# \. another period
# \w+ and more 1-n word letters
re.search(r'\w+@\w+\.+\w+\.\w+', html).group()
###Output
_____no_output_____
###Markdown
That worked! But it's easy to see that this isn't very general, i.e., it doesn't work for every legal e-mail.
###Code
html2 = 'You can reach me <a href="mailto:[email protected]">by e-mail</a> if necessary.'
match = re.search(r'\w+@\w+\.+\w+\.\w+', html2)
if match:
print(match.group())
else:
print ("didn't match")
###Output
didn't match
###Markdown
Here the e-mail [email protected] wasn't matched at all.
###Code
html3 = "You can reach me <a href='mailto:[email protected]'>by e-mail</a> if necessary."
# \w+ 1-n word letters, @,
match = re.search(r'\w+@\w+\.+\w+\.\w+', html3)
if match:
print(match.group())
else:
print ("didn't match")
###Output
[email protected]
###Markdown
Here, something matched but it's the wrong e-mail! It's not [email protected], but [email protected]. To fix this, we need another concept: Sets of legal chars We need another tool: **square brackets** `[]`. When using square brackets to enclose an expression, all the characters in the expression match:
###Code
#[\w.-]+ matches all strings that are made up of one or more word character, a period ., or dash - characters.
re.search(r'[\w.-]+@[\w.-]+', html).group()
re.search(r'[\w.-]+@[\w.-]+', html3).group()
###Output
_____no_output_____
###Markdown
That worked wonderfully! See how easy it is to extract an e-mail from a website. Also note that we didn't escape the `.`. That's because inside square brackets, only `^`, `-`, `]`, and `\` need to be escpaed, all others, like `.`, `^`, and `$`, are treated as literals. However, this pattern matches valid e-mail addresses, but it also matches non-valid ones. So this is a fine regex if you want to extract e-mail addresses, but not if you want to validate an e-mail address:
###Code
html4 = "alexander@sci..."
re.search(r'[\w.-]+@[\w.-]+', html4).group()
###Output
_____no_output_____
###Markdown
GroupingIf we want to be more specific about repeating substrings, for example, we need to be able to group a part of a regular expression. You can group with round brackets `()`:
###Code
# (da)+ gives us 1+ matches of the string "da", e.g., this will match da dada dadada, etc.
re.search(r"(da)+", "Introoooo dadadadascience 2016").group()
###Output
_____no_output_____
###Markdown
Groups are also a handy way to match a larger string, but only extract what is nested within a group. The [`group()`](https://docs.python.org/3/library/re.htmlre.match.group) method we've been using provides access to matched groups independently. Here is an example of extracting a URL from a string:
###Code
url = 'Visit the course website <a href="http://datasciencecourse.net">here</a>'
# legal characters in a url are \w, :, slash /, period .
# we use the href="" part to identify only URLs contained within that attribute
# but we don't actually want to match that.
match = re.search(r'href="([\w:/.]+)"', url)
print("The whole match:", match.group())
# Here we retreive the first individual group:
print("Only the match within the second group at index 1:", match.group(1))
###Output
The whole match: href="http://datasciencecourse.net"
Only the match within the second group at index 1: http://datasciencecourse.net
###Markdown
Exercise 2.1You're an evil Spammer who's observed that many people try to obfuscate their e-mail using this notation: "`alex at utah dot edu`". Below are three examples of such e-mails text. Try to extract "alex at utah dot edu", etc. Start with the first string. Then extend your regular expression to work on all of them at the same time. Note that the second and third are slightly harder to do!
###Code
html_smart = "You can reach me: alex at utah dot edu"
html_smart2 = "You can reach me: alex dot lex at utah dot edu"
html_smart3 = "You can reach me: alex dot lex at sci dot utah dot edu"
def testRegex(regex):
for html in (html_smart, html_smart2, html_smart3):
print(re.search(regex, html).group())
# TODO write your regex here
mail_regex = r"\w"
testRegex(mail_regex)
###Output
Y
Y
Y
###Markdown
Find All OccurrencesInstead of finding only a single occurrence of a match, we can also find all occurrences. Here is an example:
###Code
findall_html = 'You can reach us at <a href=\"mailto:[email protected]\">Alex\'s</a> ' \
'or <a href="mailto:[email protected]">Braxton\'s</a> e-mail if necessary.'
e_mail_re = r'[\w.-]+@[\w.-]+'
re.findall(e_mail_re, findall_html)
###Output
_____no_output_____
###Markdown
You can also combine the findall with groups:
###Code
# separating username and domain
e_mail_re_groups = r'([\w.-]+)@([\w.-]+)'
re.findall(e_mail_re_groups, findall_html)
###Output
_____no_output_____
###Markdown
If we want to use parentheses only for logic, not for grouping, we can use the `(?:)` syntax (a non-capturing grouping):
###Code
re.findall(r'(?:[\w.-]+)@(?:[\w.-]+)', findall_html)
###Output
_____no_output_____
###Markdown
Greedy vs Non-GreedyBy default, regular expressions are greedy. In this example, we try to match HTML tags:
###Code
html_tags = "The <b>amount and complexity</b> of information produced in <i>science</i>..."
# start with <, repeat any character 1-n times, close with >
re.findall("<.+>", html_tags)
###Output
_____no_output_____
###Markdown
This wasn't what we tried to do – the greedy nature of regex matched from the first opening tag < to the last closing tag. We can modify this behavior with the `?` character, which signals that the expression on the left should not be greedy:
###Code
# start with <, repeat any character 1-n times in a non-greedy way, terminat at the first >
re.findall("<.+?>", html_tags)
###Output
_____no_output_____
###Markdown
Greedy applies to the `*`, `+` and `?` operators – so these are legal sequences: `*?`, `+?`, `??`. Custom character subsetsYou can also define custom character sets by specifying a range with a dash:
###Code
re.search(r"[2-9]+", "0123405").group()
###Output
_____no_output_____
###Markdown
When combined with character sets, we can use the `^` operator to invert a match.
###Code
re.search(r"[^0-2]+", "0123405").group()
###Output
_____no_output_____
###Markdown
Specifying number of copies`{m}` Specifies that exactly m copies of the previous RE that should be matched. Fewer matches cause the entire RE not to match.
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, this is common in twelve (12) countries and one (1) state"
# match exactly three digits enclosed in brackets
re.findall("\(([0-9]{3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
{m,n} specifies that m to n copies match:
###Code
# match two to three digits enclosed in brackets
re.findall("\(([0-9]{2,3})\)", phone_numbers)
###Output
_____no_output_____
###Markdown
Or expressionWe can use the pipe `|` to define an or between any regular expression:
###Code
weekdays = "We could meet Monday or Wednesday"
re.findall("Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday", weekdays)
###Output
_____no_output_____
###Markdown
Replacing stringsWe can use the [`sub()`](https://docs.python.org/3/library/re.htmlre.sub) to dynamically replace content.
###Code
re.sub("Monday|Tuesday|Wednesday|Thursday|Friday", "Weekday", weekdays)
###Output
_____no_output_____
###Markdown
Other FunctionsWe've covered a lot, but not all of the functionality of regex. A couple of other functions that could be helpful:* [finditer](https://docs.python.org/3/library/re.htmlre.finditer) returns an iterator* the [IGNORECASE](https://docs.python.org/3/library/re.htmlre.IGNORECASE) option* the [DOTALL](https://docs.python.org/3/library/re.htmlre.DOTALL) option makes a . match a new line character too. Exercises Exercise 2.2: Find AdverbsWrite a regular expression that finds all adverbs in a sentence. Adverbs are characterized by ending in "ly".
###Code
text = "He was carefully disguised but captured quickly by police."
###Output
_____no_output_____
###Markdown
Exercise 2.3: Phone NumbersExtract the phone numbers that follow a (xxx) xxx-xxxx pattern from the text:
###Code
phone_numbers = "(857) 131-2235, (801) 134-2215, but this one (12) 13044441 shouldnt match. Also, this is common in twelve (12) countries and one (1) state"
###Output
_____no_output_____
###Markdown
Exercise 2.4: HTML ContentExtract the content between the `` and `` tags but not the other tags:
###Code
html_tags = "This is <b>important</b> and <u>very</u><i>timely</i>"
###Output
_____no_output_____ |
Task 1 To Explore Supervised Machine Learning/Task 1 Supervised Machine Learning (1).ipynb | ###Markdown
Prediction Using Supervised ML
###Code
# importing libraries required in this task
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
#import dataset
url = "http://bit.ly/w-data"
data = pd.read_csv(url)
print("Data imported successfully")
data
X = data.iloc[:, :-1].values
#print X
X
Y = data.iloc[:,1].values
# print Y
Y
data.describe()
###Output
_____no_output_____
###Markdown
DATA VISUALIZATION Plotting data in a scatter form so as to get a clear sense of it
###Code
# Score distribution scatter plot
data.plot(x='Hours', y='Scores', style='o')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
###Output
_____no_output_____
###Markdown
Plotting Linear Regression Model Splitting data into training and testing data
###Code
# Splitting the data
x = data.iloc[:,:-1].values
y=data.iloc[:,1].values
x_train, x_test, y_train, y_test= train_test_split(x, y,train_size=0.80,test_size=0.20,random_state=0)
###Output
_____no_output_____
###Markdown
Model and Algorithm Training
###Code
from sklearn.linear_model import LinearRegression
linearRegressor= LinearRegression()
linearRegressor.fit(x_train, y_train)
y_predict= linearRegressor.predict(x_train)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
print("Training has been completed!")
# Plot Orange regression line
line = regressor.coef_*x+regressor.intercept_
plt.scatter(x, y)
plt.plot(x, line, color = 'orange');
plt.show()
print(x_test)
y_pred= regressor.predict(x_test)
###Output
[[1.5]
[3.2]
[7.4]
[2.5]
[5.9]]
###Markdown
Gauging accuracy of the training and testing dataset
###Code
print("Test score ")
print(regressor.score(x_test, y_test))
print("Training Score ")
print(regressor.score(x_train, y_train))
###Output
Test score
0.9454906892105356
Training Score
0.9515510725211552
###Markdown
Predictions
###Code
df= pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
# Predicting score for 9.25 hours
print('Predicted score if a student studies 9.25 hour/day', regressor.predict([[9.25]]))
###Output
Predicted score if a student studies 9.25 hour/day [93.69173249]
###Markdown
Checking efficiency of the model
###Code
from sklearn import metrics
print("Mean Absolute Error is ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error is ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error is ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
###Output
Mean Absolute Error is 4.183859899002975
Mean Squared Error is 21.5987693072174
Root Mean Squared Error is 4.6474476121003665
|
site/ko/r1/tutorials/eager/custom_layers.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
사용자 정의 층 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 참여하려면[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ko)로메일을 보내주시기 바랍니다. 신경망을 구축하기 위해서 고수준 API인 `tf.keras`를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
###Code
import tensorflow.compat.v1 as tf
###Output
_____no_output_____
###Markdown
층: 유용한 연산자 집합머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.텐서플로는 [전체 케라스](https://keras.io) API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
###Code
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
###Output
_____no_output_____
###Markdown
미리 구성되어있는 층은 다음 [문서](https://www.tensorflow.org/api_docs/python/tf/keras/layers)에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
###Code
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
###Output
_____no_output_____
###Markdown
사용자 정의 층 구현사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다. * `__init__` 에서 층에 필요한 매개변수를 입력 받습니다.. * `build`, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다. * `call`, 정방향 연산(forward computation)을 진행 할 수 있습니다.변수를 생성하기 위해 `build`가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 `__init__`에 생성할 수도 있습니다. 그러나 `build`에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, `__init__`에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
###Code
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
###Output
_____no_output_____
###Markdown
다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 [깃허브](http://github.com/tensorflow/tensorflow/issues/new)에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요. 모델: 층 구성머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다. 다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
###Code
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
###Output
_____no_output_____
###Markdown
그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
###Code
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
###Output
_____no_output_____ |
Exercises/Exercise1.ipynb | ###Markdown
Exercises 1 - Create a list of the squares of numbers from 0 to 10.- Make a copy of the list- Find all the methods/functions that one can apply to that list- Find and apply a method that allows you to flip the order of elements- Verify what happened to the list and its copy- Can you find a way to make two truly independent copies ? Solutions 1
###Code
#create list
square_list = [i**2 for i in range(10)]
#make a copy
square_list2 = square_list
#find all methods
dir(square_list)
#use the revere method
square_list.reverse()
#check first list
square_list
#check second list
square_list2
###Output
_____no_output_____
###Markdown
Through the simple assignement list1 = list2, one creates a "true" copy a a list in the sense that if one is modified, the other too.To avoid that one can use the copy() method, that creates a new independent object
###Code
square_list3 = square_list.copy()
square_list3
square_list.reverse()
square_list
square_list2
square_list3
###Output
_____no_output_____ |
Duflo_2019_Wu.ipynb | ###Markdown
--- Project for the course in Microeconometrics | Summer 2020, M.Sc. Economics, Bonn University | [Ying-Xuan Wu](https://github.com/amanda8412383) Replication of Duflo E, Dupas P, Ginn T, Barasa GM, Baraza M, Pouliquen V, et al. (2019) --- This notebook contains my replication of the results from the following paper:> Duflo E, Dupas P, Ginn T, Barasa GM, Baraza M, Pouliquen V, et al. (2019) HIV prevention among youth: A randomized controlled trial of voluntary counseling and testing for HIV and male condom distribution in rural Kenya<!-- Downloading and viewing this notebook:* The best way to view this notebook is by downloading it and the repository it is located in from [GitHub](https://github.com/HumanCapitalAnalysis/microeconometrics-course-project-amanda8412383). * Other viewing options like _MyBinder_ or _NBViewer_ .* The original paper, as well as the data provided by the authors can be accessed [here](https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/CVOPZL). Information about replication and individual contributions:* Due to the unavailability of original code and the massiveness of the dataset, the estimations all differ from the original paper in various degrees.* One of the most important outcome variable isn't found, the attempts on searching it is documented in section 7* For the replication, I try to remain true to the original structure of the paper, all the panels and rows are lined as they appear in Duflo et al. (2019) and named identically.* some of the columns feature in my replication appear as second-row indexes compared to the original tables, and the incidence rate has become independent to suit my workflow in Python. --> Table of Contents1. Introduction2. Study Design3. Identification Problems4. Empirical Strategy 5. Replication of Duflo et al. (2019) 5.1. Data and Primary cleaning 5.2. Tables 5.2.1. Table 1: Summary statistics at basline5.2.2. Table 2: Intention to treat analysis5.2.3. Table 3: Effects of Intervention on other outcomes5.2.4. Table 4: Heterogenetiy test 6. Discussion 7. Debug: HSV-2 at baseline 7.1 Searching dataset and verifying 7.2 Others 8. References
###Code
from auxiliary import *
from IPython.display import display
from IPython.display import Image
pd.options.display.max_rows = None
###Output
_____no_output_____
###Markdown
--- 1. Introduction --- Duflo et al. (2019) examine the effects of Voluntary Counseling and Testing for HIV (VCT) and increasing access to male condoms compared to standard available HIV prevention services, using biological and behavioral outcomes, among youth in Western Kenya. VCT, serving as the entry to HIV treatment and care, is a test of accessing one’s HIV serostatus, in addition to receiving individualized risk-reduction counseling. VCT is supposed to help individuals reduce risky sexual behaviors and prevent themselves and their partners from HIV and other sexually transmitted infections such as Herpes Simplex Type 2 (HSV-2). Even though some of the previous studies show the reduction of risky sexual behavior in testing-positive individuals, other studies bring about the concern of disinhibition among those testing negative (Sherr et al. 2007). Through a four-arm, unblinded, individually randomized controlled trial implemented between 2009 and 2012 in four districts of Kenya’s Western Province, including samples of 10,245 youth aged 17 to 24 randomly assigned to receive community-based VCT, 150 male condoms, both VCT and condoms, or neither program, Duflo et al. (2019) examine the effect of these two interventions among the youth. The result suggests there are no statistically significant effects on the risk of HIV, or other behavioral or knowledge outcomes including self-reported pregnancy rates.**Main variables** | **Interventions** | **Main outcomes** ||------------------------|-------------------------|| community-based VCT | HIV || 150 male condoms | HSV-2 || both | || neither | | In this notebook, I attempt to replicate the results presented in the paper by Duflo et al. (2019) but only acquire similar result and failed to find one of the most important outcome variable.In section 2, the methodology adopted by Duflo et al. (2019) is presented, regarding how sampling, treatment, randomizing and tracking are conducted. In Section 3, possible identification is brought out from 3 different aspects, containing selection bias, measure error from self report, and externality. Section 4 briefly discusses the methodology used by the authors. Section 5 shows my replication of the results in the paper and discussion thereof. Section 6 offers discussion on insignificant results. Section 7 reveals my failing attemps on finding HSV-2 testing result at baseline. --- 2. Study Design--- **Sample**The samples are from a previous study of the authors ( Duflo, Dupas, and Krewer 2015) which is clustered randomized controlled trial regarding teacher HIV training program and distribution of free uniform, consisting of 19289 enrolled students in grade 6 of 328 schools in 2003. 55% of them, 10245 individuals are trackable during the first round tracking between March 2009 to July 2010. In total, they represent 85% of aged 17 - 24 in 2009 western Kenya. According to the criteria, the participants must :1. Participated in the previous study, which implied at least attended grade 62. Residency in the study district3. Provided informed consent **Randomizing** Stratification by gender, primary school, matriculation of secondary school in as from July 2007, ever pregnant as of 2007, randomly assigned to four arms, 25% each. **Treatment** There are standard VCT available to everyone at local health facilities, however, the intervention provided in the trial within community lowering travel time and cost, and social cost of visiting a HIV testing center. For the condom intervention arm, participants were provided 150 male condoms, they could take all or only of these condoms. **Tracking**Followed up survey is conducted from April 2011 to May 2013, if the respondent did not show up, field officers would track down the person. 25% of those whose acquittances providing contact information and 10% of those who can’t be contacted at all are randomly selected for further intensive tracking. Intensive tracking weight is included in the estimation. --- 3. Identification Problems---Considering Duflo et al. (2019) do not obtain any significant result, it might be more suitable to discuss possible identification problems in this section instead of identification. I am not trying to suggest obtaining significant result means problem-free, it just doesn’t seem meaningful to discuss how their identifications work in this case. **casaul graph**For a randomized control trial, it is expected to block all the back door part of unobservables and other controlled variable and obtain a clean casal effect. However, in this paper it's either fail to block backdoor path successfully or the treatments have no effect to it. I'll discuss some of the possible non-blocked backdoor part at later part of this section. In discussion, the possible correlations suggest by authors are listed.
###Code
Image(filename='causal graph.jpg')
###Output
_____no_output_____
###Markdown
**Selection bias**As a randomized control trial, this estimation is supposed to be free from selection bias. Nevertheless, in study design, Duflo et al. (2019) mention that the sample is based on its previous study which restricts the participants of this study to those who have attended at least 6 grade and accepting HIV related knowledges. Considering the following model from Greene (2006): \begin{equation}yi = xi' \beta + ei\\zi^* = wi' \gamma + ui\\zi = 1(zi^* > c)\end{equation}Where $yi$ is our outcome variable. $xi$ is the treatment group. $zi$ is attending grade 6 or not. wi is are exogenous factors decided to attend 6 grade or not. $ei, ui$ are error terms.Then we can rewrite the function into:\begin{equation}E(yi|wi, ui) = xi' \beta + E(ei|wi, ui)\\ = xi' \beta + E(ei|ui)\end{equation}For $E(ei|ui)$ not equals to zero, we might obtain a biased estimator. And such an example is easily thinking of, for example, children from poverty background may be more likely to drop out, and higher chance to become sex worker, can't afford to buy condoms, missing sex education in class are all factors that could increase their chance of getting STI. In the latter discussion, the authors do not bring about this issue, but do admit lower external validity, while stressing as HIV prevention scaled up, increasing population would have access to prevention information. **Measure error from self report**Because of the behavioral outcomes are acquired through surveys, one common problem from self-reported is the imprecision of the data, possibly resulting from causes such as forgetfulness or deception. considering models from Hansen (2020):\begin{equation}yi = \beta xi^* + ui\\\textrm{where} \quad E[ui|xi^*] = 0\\xi = xi^* + vi\end{equation}Where $yi$ is one of our outcome variables, $xi$ is reported behavior, $ui$ is the error term, and vi are deviation originated from forgetfulness or deception.rewritten the function as:\begin{equation}yi = \beta xi^* + ui\\\textrm{where} \quad ei = -\beta vi + ui\\\end{equation}Then we can get the covariance of our treatment and residual is the variance of this reporting mistake. As we need $xi$ and residual to be independent.But I would say measurement error has limited influence in this case. Only a few behavior problems serving as independent variables, for example ever pregnant and school enrollment in 2007 and is theoretically possible to do fact checks on these two variables. Most of the behavioral outcomes are serving as dependent variables in the logistic regression stated at section 4, in which case affect our estimation much lesser. **Externalty**One intriguing question to ask is whether these treatments induce externality, Miguel, Edward, and Kremer (2004) finds out in a randomized trial on 75 Kenya primary schools that medical treatment for intestinal helminths in the treatment group also reducing the transmission within the control group, even their randomized trial is taking place across schools. Using male condoms would undoubtedly reduce the possibility of infecting sex partners and thus contaminate the obtaining result. If the subject having any behavioral change after receiving VCT, it’ll also affect their sexual partners. Such an externality exists hinder us to detach treatment effects from the treatment group. I haven’t found any paper having empirical evidence on how would externality affecting HIV research, but some of the papers point this out and try to discuss the possible influence, such as Canning D. (2006).---**NOTE**: the model used in this paper is logistic regression while I use linear notations in section 3 for simplicity. For nonlinear model, endogeneity and externality are still problems that affect large sample property of estimators which have an in-depth discussion in _A Logit Model With Endogenous Explanatory Variables and Network Externalities_ ( de Grange et al. 2015) --- 4. Empirical Strategy---the following listed methods are statistic analysis used In Duflo et al. (2019). **Descriptive statistic**For both baseline and phase 2, descriptive data is shown. Estimation in stage 2 takes into account of weighted follow-up rate. The share of the total sample and comparison among males and females is shown and a second analysis to test for heterogeneity in the result by gender, childbearing experience at baseline, and baseline belief of HIV infection likelihood. **Incidence rate**HSV-2 incidence was calculated by dividing respondents who were negative at baseline to new HSV-2 cases in each treatment arm by the total person-years of exposure. If the person remained negative, person-years of exposure is calculated as the total time from the baseline test to the follow-up. For the newly infected cases, it’s calculated as half the time between the baseline test and the positive test. **Weighted logistic regression**For estimating the effect of interventions on outcomes, the following logistic regression weighted by survey tracking strategy is run to compare outcomes in each treatment arm to the control, and the adjusted odds ratio and confidence interval are reported. Regressors are listed as follow:|**regressors** | |------------------------|| age group || months between 2 surveys || gender || secondary school enrollment in 2007 || ever pregnant by 2007 || treatment arm | --- 5. Replication of Duflo et al. (2019)--- 5.1. Data and Primary cleaning Duflo et al. (2019) use a data set with 2374 variables and 19293 participants. The following list concluding main sources of these variable. I prune the dataset because of even lft can't afford to upload original data set.- answers, remarks of baseline questionnaire- answers, remarks of phase 2 questionnaire- school properties such as location, average scores of all students in KCPE- testing results, both HSV-2 and HIV require multiple time of testing - poorly naming variable that might be created for analysis Even though the problem set has 19293 respondents, the number of total participants in this project is supposed to yield 7565, the rest are participants of the authors’ previous study. An important question would be how the authors filtering the data and ruling out problematic entries. Since neither the rules of excluding an entry nor how did they clean the data is mentioned in the essay. Besides, because of the majority data are from surveys, 92 variables are naming with "comments" describing the situation in the data collecting process. For example, in Log_comment there are entries describing as "mentally ill" or "duplicate" even though they are surveyed. After trying all the combinations that seems reasonable to me, I filtered the data by 1. having assigned to a treatment group2. labeled as surveyed3. labeled as tracked4. discarded all the entries that have LOG_comments Through this, I gaining an approximate participation number. Unfortunately, this means all my statistics would be different in various degrees with the paper.
###Code
ind = df_filter.group.value_counts()
total = ind.sum()
print(ind)
print(f'total participant {total}')
###Output
Control 1889
VCT only 1866
Condom only 1855
Condom and VCT 1848
Name: group, dtype: int64
total participant 7458
###Markdown
5.2. Tables 5.2.1. Table 1: Summary statistics at baslineTable 1 shows baseline characteristics of man and woman across 4 trial arm. This table only includes individuals that have been surveyed at follow up. Because of lacking the original code, these variables are recreating myself. The below list describes the method I choose. This is the only table I forgo recreate percentage. As previously mentioned, the baseline HSV-2 result is nowhere to find, thus, second phase HSV-2 is used here.| **Variables** | **Method** ||------------------------|-------------------------|| Number of individuals | counting unique pupilid in each group || Age | create dummy through variable "age2009" || Total year of schooling| create dummy through answer to "Q_b1_13_school_years"|| HSV-2 positive | using HSV-2 result of phase 2 instead || Currently married | people who have "an_spouse_age" || Ever or partner ever pregnant|people who have "evpreg07v2" (womwn only) or "Track_children_number" > 0 (both gender)|| Ever had sex | using "an_everhadsex" || Last sex used condom | using answer of "Q_b4_127_last_sex_use_condom" || Ever had multiple partners| using "an_multiplepartners" || Ever tested for HIV | using "an_everHIVtested" || Believed current HIV infection| using answer to "Q_b3_99" || Believed future HIV infection | using answer to "Q_b3_100" || Named 3 prevention | from Dummies "an_Q_b3_80_how_prevent_hiv_1" to 19 has at least 3 of them being 1 || Answered 3 questions |"an_Q_b3_83", "an_Q_b3_84" negative & "an_Q_b3_88" being positive || Showed positive attitude|"an_Q_b3_112_agree_with_statement" is 1 & for question number 109 to 111 is 0|3 HIV knowledge question is defined as follow by the author:1. Can HIV be transmitted to a baby in the womb? (Q_b3_83)2. Can HIV be transmitted to a baby during breastfeeding? (Q_b3_84)3. Can HIV spread through mosquito bites? (Q_b3_88)positive attitude is defined as follow by the author:1. Agreed that people with HIV/AIDS should be treated the same as people without HIV/AIDS (an_Q_b3_112_agree_with_statement)2. Disagreed that prostitutes or promiscuous men are responsible for spreading HIV("(an_Q_b3_110_agree_with_statement, an_Q_b3_111_agree_with_statement)3. Disagreed that HIV was punishment for bad behavior (an_Q_b3_109_agree_with_statement)
###Code
#variables in table 1
varlist = ['Number of individuals', 'Age at baseline_<19', 'Age at baseline_19-20', 'Age at baseline_21-22', 'Age at baseline_>22',
'Not enrolled in school',
'Total year of schooling_<10', 'Total year of schooling_10-11', 'Total year of schooling_12-13', 'Total year of schooling_>13',
'HSV-2 positive', 'Currently married', 'Ever or partner ever pregnant', 'Ever had sex', 'Last sex used condom', 'Ever had multiple partners', 'Ever tested for HIV',
'Believed current HIV infection', 'Believed future HIV infection',
'Named 3 prevention', 'Answered 3 questions', 'Showed positive attitude']
table1(df_merge, varlist)
###Output
_____no_output_____
###Markdown
5.2.2. Table 2: Intention to treat analysisThis table shows HSV-2 and HIV outcomes across males and females. None of the intervention turns out to reduce the infection rate of any of these diseases significantly. Either adjusted odd ratio or weighted prevalence appears similar across interventions. For recreation, query & key was passed into the function to create panels for all and both gender. And also variables are passed in as a pair of (consonant, denominator) to calculate weighted percent.due to these variables are all recorded in different forms and hidden in the variable sea. Hence it requires searching it out, transforming, and passing it in manually. The replication does not contain HSV-2 at baseline, hence the incidence rate is also not in this table. Although HIV cases can be found in both stages, the infection case is too low to have a valid estimation.| **Variables** | **Method** ||------------------------|-------------------------|| HSV-2 | using "hsv2_positive" || HIV | using answer of "p2_c2_233_hiv_result" |
###Code
query = ['', 'sex == "2 Female"', 'sex == "1 Male"']
keys=['all', 'female', 'male']
varlist2 = [('HSV-2 positive', 'hsv2_accept'), ('HIV', 'p2_c2_233_hiv_result')]
Table(df_merge, query, keys, varlist2, option = 'T2')
###Output
_____no_output_____
###Markdown
5.2.3. Table 3: Effects of Intervention on other outcomesIn this table, some other behavioral outcome at phase 2 is analyzed across 4 treatment groups and by gender. For the treatment groups receiving condoms, "Ever received free condoms" and "Ever used the free condoms" are statistically significant comparing to the control or VCT treatment group. Similarly in the outcome "Ever had VCT", groups that have receive VCT are more significant comparing to the rest. While other variables, including childbearing, remains statistically insignificant.| **Variables** | **Method** ||-----------------------------|-------------------------|| Ever received free condoms | answer yes to "p2_b8_200_condomsfree_ever" || Ever used the free condems | answer 1 or 2 to "p2_b8_202_condfree_use" || Ever sold/gave condoms | answer 3 or 4 to "p2_b8_202_condfree_use" || Ever had VCT | answer yes to "p2_b9_204_evervct" || VCT more than once | answer "p2_b9_205_timesvct" > 1 || VCT more than twice | answer "p2_b9_205_timesvct" > 2 || Currently married p2 | answer currently married to "p2_b5_115_married"|| Sex in last 6 months | using "p2_b5_118_sexpartners_6mos" > 0 || Partner number last 6 months| using answer to "p2_b5_118_sexpartners_6mos" || Partner number lifetime | using answer of "p2_b5_119_sexpartners_life" || Ever used condoms | answer yes to "p2_b8_182_everusedcondom" || Used condoms last time | answer yes to "p2_b8_186_condom_lasttime" || Unprotected sex non-monogamous| "p2_b5_119_sexpartners_life" > 1 & answer never to all of p2_b5_141 (using condoms with 3 most recent partner respectively) || Self reported STI | answer yes to "p2_b10_214_sti" || Ever or partner ever pregnant p2| answer to "p2_b7_148_pregnancies_number" > 0|| Named 3 prevention p2 | having more at least 3 answer in "p2_b4_72_protecthiv"|| Answered 3 questions p2 | answer 1 to "p2_b4_74_hivinwomb", "p2_b4_75_hivbreastfeed" 2 to "p2_b4_76_hivmosquitoes" || Showed positive attitude p2 | answer 4 or 5 to "p2_b4_111_hivpunishment", "p2_b4_112_hivprostitutes", "p2_b4_113_hivpromiscuousmen" and 1 or 2 to "p2_b4_114_hivtreatedsame"|| Child number p2 | using "p2_b7_148_pregnancies_number" |
###Code
varlist3 = [
("Ever received free condoms", "everfreecondoms_base"),
("Ever used the free condems", "everfreecondoms_base"),
('Ever sold/gave condoms', 'Ever received free condoms'),
('Ever had VCT', 'evervct_base'),
('VCT more than once', 'evervct_base'),
('VCT more than twice', 'evervct_base'),
('Currently married p2', 'currentmarried_base'),
('Sex in last 6 months', 'sexpartners6mon_base'),
('Partner number last 6 months', None),
('Partner number lifetime', None),
('Ever used condoms', 'everusedcondom_base'),
('Used condoms last time', 'lasttimecondom_base'),
('Unprotected sex non-monogamous', 'polynocondom_base'),
('Self reported STI', 'sti_base'),
('Ever or partner ever pregnant p2', 'pregnant_base'),
('Named 3 prevention p2', 'named3prev_base'),
('Answered 3 questions p2', 'ans3q_base'),
('Showed positive attitude p2', 'posplhiv_base'),
('Child number', None)]
Table(df_t3, query, keys, varlist3)
###Output
_____no_output_____
###Markdown
5.2.4. Table 4: Heterogenetiy testTable 4 presents heterogeneity tests on those who had started childbearing and those who hadn’t in the first 2 panels, and comparing those respondents believed they have been infected at baseline to those who don’t in the last 2 panels, to show the effect of interventions which yield no significant result as well.| **Panels** | **Method** ||-----------------------------------|-------------------------|| Started child bearing | using "started_childbearing" || Not started child bearing | using "started_childbearing" || Believed current HIV infection > 0| using "Q_b3_99" (what is the likelihood (chance) that you are infected with HIV/AIDS now)|| Believed current HIV infection = 0| using "Q_b3_99" (what is the likelihood (chance) that you are infected with HIV/AIDS now)|
###Code
query4 = ['started_childbearing == 1', 'started_childbearing == 0', 'Q_b3_99 > 1', 'Q_b3_99 == 1']
keys4=['Started child bearing', 'Not started child bearing', 'Believed current HIV infection > 0', 'Believed current HIV infection = 0']
varlist4 = [('HSV-2 positive', 'hsv2_accept'), ('HSV-2 female', 'hsv2_accept_f'), ('HSV-2 male', 'hsv2_accept_m')]
Table(df_t3, query4, keys4, varlist4)
###Output
_____no_output_____
###Markdown
--- 6. Discussion ---Overall the analysis does not yield any significant result except for the condom related behavioral outcomes to groups receiving condoms and VCT related behavioral outcomes to groups having VCT. Duflo et al. (2019) providing two possible explanations from previous studies for the non-significant result.1. Most of the past research only find behavioral changes in people testing positive of HIV. In a meta-analysis (Fonner 2012 et al.) summing up 17 studies, it appears that no significant changes in STI or HIV prevalence rate of those receiving VCT. However, to who being test positive for HIV, there is a reduction in sex partners and an increase in condom use. This cannot be assessed in our data due to overly low HIV prevalence.2. The age group. Another multi-component HIV prevention program conducted in Africa and Thailand suggests there is a significant reduction among older women, but no effect among ages 18 to 24 (Coates 2014 et al.). In this study, the subjects are between 17 to 24, which is consistent with their finding.3. There were several large scale HIV testing events in the area. At baseline, 44% of the control group had been tested for HIV, while 82.4% had been tested at follow up. The high participation rate is likely to interfere and weaken the effect of treatments.I have considered changing paper because my replication process doesn’t feel like having closed connections to the course contents and the fact I failed to find HSV-2 at baseline. But I didn’t, subjecting to sunk cost fallacy. --- 7. Debug: HSV-2 at baseline---This section document all my failing attempts on searching for variable represent HSV-2 testing result at baseline. 7.1. Searching dataset and verifying
###Code
hsv = df_filter.filter(regex='.*(?i)hsv.*',axis=1)
hsv.head()
###Output
_____no_output_____
###Markdown
First, rule out variables that are obviously irrelevant.- **hsv2id** is assigned to almost all individuals in the data set 19202 out of 19293 regardless of having HSV-2 result at stage 2 or not. Here, in the filtered data frame total number equals 7458 as the surveyed individuals that are filtered out previously.- **merge_HSVcode** seems to be some sort of merging key generated in data processing process of the authors, having only value 3.- **HSV_od1** to **HSV_od4** are supposed to be the numerical value records of some special cases in the testing process. It takes values that range from 0 to about 5, only have value if HSV_result is not null.- **HSV_batch** something takes value from 1 to 8, I assume it relates to testing branches.- **merge_HSVresults** taking value 1 & 3 with 144 & 7313 entries each, it supposed to be something related to merge_HSVcode- **HSV2_blood_sample**, **hsv2_decline**, **hsv2_accept** could be gauged by the number of entries and name that denoted taking hsv2 blood sample or not, accepting testing or not, declining test or not.- **HSV-2 positive** is variable I created for table 2 based on hsv-positiveAs for HSV_result1 to HSV_result4, HSV_finalresult, hsv2_positive, it can be seemn from below, HSV_finalresult is the same thing as hsv-positive.
###Code
pd.crosstab(hsv['hsv2_positive'], hsv['HSV_finalresult'])
###Output
_____no_output_____
###Markdown
As for the 4 HSV_result, if counting how many non-null values in each column, it will appear to be declined sharply.
###Code
hsv_results = hsv.filter(regex='HSV_result.',axis=1)
hsv_results[hsv_results != ''].count()
###Output
_____no_output_____
###Markdown
If comparing the only one testing that is covered almost the whole sample with the final result, it’ll show those who have been testing negative in HSV_result1 are all considered as negative in the final result. This explains that it can’t be testing at baseline because it suggests there is no new infections and is inconsistent with the number in Duflo et al. (2019). Combing these two characteristics and existence of HSV_od1 to 4, I’ll say these are just variables recording repeated testing results for the follow-up test.
###Code
pd.crosstab(hsv['HSV_result1'], hsv['HSV_finalresult'])
###Output
_____no_output_____ |
09_ImageWang_Leadboard_SS_5.ipynb | ###Markdown
Image网 Submission `128x128` This contains a submission for the Image网 leaderboard in the `128x128` category.In this notebook we:1. Train on 1 pretext task: - Train a network to do image inpatining on Image网's `/train`, `/unsup` and `/val` images. 2. Train on 4 downstream tasks: - We load the pretext weights and train for `5` epochs. - We load the pretext weights and train for `20` epochs. - We load the pretext weights and train for `80` epochs. - We load the pretext weights and train for `200` epochs. Our leaderboard submissions are the accuracies we get on each of the downstream tasks.
###Code
import json
import torch
import numpy as np
from functools import partial
from fastai2.layers import Mish, MaxPool, LabelSmoothingCrossEntropy
from fastai2.learner import Learner
from fastai2.metrics import accuracy, top_k_accuracy
from fastai2.basics import DataBlock, RandomSplitter, GrandparentSplitter, CategoryBlock
from fastai2.optimizer import ranger, Adam, SGD, RMSProp
from fastai2.vision.all import *
from fastai2.vision.core import *
from fastai2.vision.augment import *
from fastai2.vision.learner import unet_learner, unet_config
from fastai2.vision.models.xresnet import xresnet50, xresnet34
from fastai2.data.transforms import Normalize, parent_label
from fastai2.data.external import download_url, URLs, untar_data
from fastcore.utils import num_cpus
from torch.nn import MSELoss
from torchvision.models import resnet34
torch.cuda.set_device(1)
###Output
_____no_output_____
###Markdown
Pretext Task: Image Inpainting
###Code
# Chosen parameters
lr=8e-3
sqrmom=0.99
mom=0.95
beta=0.
eps=1e-4
bs=64
opt='ranger'
sa=1
fp16=1
pool='MaxPool'
sym=0
# NOTE: Normally loaded from their corresponding string
m = xresnet34
act_fn = Mish
pool = MaxPool
nc=20
source = untar_data(URLs.IMAGEWANG_160)
len(get_image_files(source/'unsup')), len(get_image_files(source/'train'))
# [s.name for s in (source/'train').ls()], [s.name for s in (source/'val').ls()]
def get_dbunch(size, bs, workers=8):
path = URLs.IMAGEWANG_160 if size <= 160 else URLs.IMAGEWANG
source = untar_data(path)
files = get_image_files(source)
tfms = [[PILImage.create, ToTensor, Resize(size)],
[lambda x: x.parent.name, Categorize()]]
dsets = Datasets(files, tfms=tfms, splits=RandomSplitter(valid_pct=0.1)(files))
batch_tfms = [IntToFloatTensor, Normalize.from_stats(*imagenet_stats)]
dls = dsets.dataloaders(bs=bs, num_workers=workers, after_batch=batch_tfms)
dls.path = source
return dls
# Use the Ranger optimizer
opt_func = partial(ranger, mom=mom, sqr_mom=sqrmom, eps=eps, beta=beta)
size = 128
bs = 256
dbunch = get_dbunch(160, bs)
dbunch.c = nc
len(dbunch.train.dataset)
xb,yb = dbunch.one_batch()
xb.shape, yb.shape
dbunch.show_batch()
from rsna_retro.self_supervised import *
from rsna_retro.contrastive_loss import *
#export
def get_aug_pipe(size, min_scale=0.4, stats=(mean,std), **kwargs):
tfms = [Normalize.from_stats(*stats)] + aug_transforms(size=size, min_scale=min_scale, **kwargs)
return Pipeline(tfms)
m_part = partial(m, c_out=nc, act_cls=torch.nn.ReLU, sa=sa, sym=sym, pool=pool)
ss_name = 'imagewang_contrast_ss4'
aug = get_aug_pipe(size, min_scale=0.20, mult=1, stats=imagenet_stats)
aug2 = get_aug_pipe(size, min_scale=0.15, mult=2, stats=imagenet_stats)
cbs = SSCallback(XentLoss(0.1), size=size, aug_targ=aug, aug_pos=aug2, multi_loss=False)
# cbs = SSCallback(BatchContrastiveLoss(XentLoss(0.1)), size=size, aug_targ=aug, aug_pos=aug2, multi_loss=False)
# ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 128))
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 64))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func,
metrics=[], loss_func=CrossEntropyLossFlat(), cbs=cbs,
config={'custom_head':ch}, pretrained=False,
)
# metrics=[], loss_func=LabelSmoothingCrossEntropy())
learn.unfreeze()
learn.fit_flat_cos(30, 1e-2, wd=1e-3)
learn.save(f'{ss_name}')
learn.load(f'{ss_name}')
learn.unfreeze()
learn.fit_flat_cos(30, 4e-3, wd=1e-3)
learn.save(f'{ss_name}-1')
###Output
_____no_output_____
###Markdown
Downstream Task: Image Classification
###Code
def get_dbunch(size, bs, workers=8):
path = URLs.IMAGEWANG_160 if size <= 160 else URLs.IMAGEWANG
source = untar_data(path)
files = get_image_files(source)
item_aug = [RandomResizedCrop(size, min_scale=0.35), FlipItem(0.5)]
tfms = [[PILImage.create, ToTensor, *item_aug],
[lambda x: x.parent.name, Categorize()]]
dsets = Datasets(files, tfms=tfms, splits=GrandparentSplitter(valid_name='val')(files))
batch_tfms = [IntToFloatTensor, Normalize.from_stats(*imagenet_stats)]
dls = dsets.dataloaders(bs=bs, num_workers=workers, after_batch=batch_tfms)
dls.path = source
return dls
size=128
bs = 64
dbunch = get_dbunch(size, bs)
dbunch.show_batch()
###Output
_____no_output_____
###Markdown
5 Epochs
###Code
epochs = 5
runs = 1
for run in range(runs):
print(f'Run: {run}')
# aug = get_aug_pipe(size, min_scale=1.0, mult=1)
# aug2 = get_aug_pipe(size, min_scale=0.3, mult=1)
# cbs = SSCallback(BatchContrastiveLoss(XentContrastiveLoss(0.05)), size=size, aug_targ=aug, aug_pos=aug2, multi_loss=True)
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 256))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy], loss_func=CrossEntropyLossFlat(),
pretrained=False,
config={'custom_head':ch})#, cbs=cbs)
# if fp16: learn = learn.to_fp16()
learn.load(f'{ss_name}-1', strict=False)
learn.model[1][-1] = nn.Linear(512, 20)
learn.unfreeze()
# learn.freeze()
# learn.lr_find()
learn.fit_flat_cos(epochs, 1e-2, wd=1e-3)
for run in range(runs):
print(f'Run: {run}')
aug = get_aug_pipe(size, min_scale=0.35, mult=1)
aug2 = get_aug_pipe(size, min_scale=0.3, mult=1)
cbs = SSCallback(XentLoss(0.1), size=size, aug_targ=aug, aug_pos=aug2, multi_loss=True)
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 256))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy], loss_func=CrossEntropyLossFlat(),
pretrained=True,
config={'custom_head':ch}, cbs=cbs)
# if fp16: learn = learn.to_fp16()
# learn.load(f'{ss_name}-1', strict=False)
learn.model[1][-1] = nn.Linear(512, 20)
learn.unfreeze()
# learn.freeze()
# learn.lr_find()
learn.fit_flat_cos(epochs, 2e-2, wd=1e-2)
len(learn.opt.opt.param_groups)
###Output
_____no_output_____
###Markdown
- Run 1: 0.403156- Run 2: 0.404429- Run 3: 0.416645- Run 4: 0.407228- Run 5: 0.412064Average: **40.87%** 20 Epochs
###Code
epochs = 20
runs = 3
for run in range(runs):
print(f'Run: {run}')
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 20))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy], loss_func=CrossEntropyLossFlat(),
config={'custom_head':ch})#, cbs=cbs)
if dump: print(learn.model); exit()
# if fp16: learn = learn.to_fp16()
cbs = MixUp(mixup) if mixup else []
learn.load(ss_name, strict=True)
learn.freeze()
learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs)
###Output
_____no_output_____
###Markdown
- Run 1: 0.610333- Run 2: 0.618733- Run 3: 0.605498Average: **61.15%** 80 epochs
###Code
epochs = 80
runs = 1
for run in range(runs):
print(f'Run: {run}')
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 20))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy], loss_func=CrossEntropyLossFlat(),
config={'custom_head':ch})#, cbs=cbs)
if dump: print(learn.model); exit()
# if fp16: learn = learn.to_fp16()
cbs = MixUp(mixup) if mixup else []
learn.load(ss_name, strict=True)
learn.freeze()
learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs)
###Output
_____no_output_____
###Markdown
Accuracy: **62.18%** 200 epochs
###Code
epochs = 200
runs = 1
for run in range(runs):
print(f'Run: {run}')
ch = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(512, 20))
learn = cnn_learner(dbunch, m_part, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy], loss_func=CrossEntropyLossFlat(),
config={'custom_head':ch})#, cbs=cbs)
if dump: print(learn.model); exit()
# if fp16: learn = learn.to_fp16()
cbs = MixUp(mixup) if mixup else []
learn.load(ss_name, strict=True)
learn.freeze()
learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs)
###Output
_____no_output_____ |
homeworks/hw_02_Moreno.ipynb | ###Markdown
Tarea N°02 Instrucciones1.- Completa tus datos personales (nombre y rol USM) en siguiente celda.* __Nombre__: Javiera Moreno Peña* __Rol__: 201710506-22.- Debes _pushear_ este archivo con tus cambios a tu repositorio personal del curso, incluyendo datos, imágenes, scripts, etc.3.- Se evaluará: - Soluciones - Código - Que Binder esté bien configurado. - Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error. I.- Clasificación de dígitosEn este laboratorio realizaremos el trabajo de reconocer un dígito a partir de una imagen.  El objetivo es a partir de los datos, hacer la mejor predicción de cada imagen. Para ellos es necesario realizar los pasos clásicos de un proyecto de _Machine Learning_, como estadística descriptiva, visualización y preprocesamiento. * Se solicita ajustar al menos tres modelos de clasificación: * Regresión logística * K-Nearest Neighbours * Uno o más algoritmos a su elección [link](https://scikit-learn.org/stable/supervised_learning.htmlsupervised-learning) (es obligación escoger un _estimator_ que tenga por lo menos un hiperparámetro). * En los modelos que posean hiperparámetros es mandatorio buscar el/los mejores con alguna técnica disponible en `scikit-learn` ([ver más](https://scikit-learn.org/stable/modules/grid_search.htmltuning-the-hyper-parameters-of-an-estimator)).* Para cada modelo, se debe realizar _Cross Validation_ con 10 _folds_ utilizando los datos de entrenamiento con tal de determinar un intervalo de confianza para el _score_ del modelo.* Realizar una predicción con cada uno de los tres modelos con los datos _test_ y obtener el _score_. * Analizar sus métricas de error (**accuracy**, **precision**, **recall**, **f-score**) Exploración de los datosA continuación se carga el conjunto de datos a utilizar, a través del sub-módulo `datasets` de `sklearn`.
###Code
import numpy as np
import pandas as pd
from sklearn import datasets
import matplotlib.pyplot as plt
%matplotlib inline
digits_dict = datasets.load_digits()
print(digits_dict["DESCR"])
digits_dict.keys()
digits_dict["target"]
###Output
_____no_output_____
###Markdown
A continuación se crea dataframe declarado como `digits` con los datos de `digits_dict` tal que tenga 65 columnas, las 6 primeras a la representación de la imagen en escala de grises (0-blanco, 255-negro) y la última correspondiente al dígito (`target`) con el nombre _target_.
###Code
digits = (
pd.DataFrame(
digits_dict["data"],
)
.rename(columns=lambda x: f"c{x:02d}")
.assign(target=digits_dict["target"])
.astype(int)
)
digits.head()
#for i in digits["target"]:
# print(digits["target"][i])
###Output
_____no_output_____
###Markdown
Ejercicio 1**Análisis exploratorio:** Realiza tu análisis exploratorio, no debes olvidar nada! Recuerda, cada análisis debe responder una pregunta.Algunas sugerencias:* ¿Cómo se distribuyen los datos?* ¿Cuánta memoria estoy utilizando?* ¿Qué tipo de datos son?* ¿Cuántos registros por clase hay?* ¿Hay registros que no se correspondan con tu conocimiento previo de los datos?
###Code
#¿Qué tipo de datos son?
digits.dtypes
#¿Cuántos registros por clase hay?
digits.count()
#¿Cuánta memoria estoy utilizando?
digits.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1797 entries, 0 to 1796
Data columns (total 65 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 c00 1797 non-null int32
1 c01 1797 non-null int32
2 c02 1797 non-null int32
3 c03 1797 non-null int32
4 c04 1797 non-null int32
5 c05 1797 non-null int32
6 c06 1797 non-null int32
7 c07 1797 non-null int32
8 c08 1797 non-null int32
9 c09 1797 non-null int32
10 c10 1797 non-null int32
11 c11 1797 non-null int32
12 c12 1797 non-null int32
13 c13 1797 non-null int32
14 c14 1797 non-null int32
15 c15 1797 non-null int32
16 c16 1797 non-null int32
17 c17 1797 non-null int32
18 c18 1797 non-null int32
19 c19 1797 non-null int32
20 c20 1797 non-null int32
21 c21 1797 non-null int32
22 c22 1797 non-null int32
23 c23 1797 non-null int32
24 c24 1797 non-null int32
25 c25 1797 non-null int32
26 c26 1797 non-null int32
27 c27 1797 non-null int32
28 c28 1797 non-null int32
29 c29 1797 non-null int32
30 c30 1797 non-null int32
31 c31 1797 non-null int32
32 c32 1797 non-null int32
33 c33 1797 non-null int32
34 c34 1797 non-null int32
35 c35 1797 non-null int32
36 c36 1797 non-null int32
37 c37 1797 non-null int32
38 c38 1797 non-null int32
39 c39 1797 non-null int32
40 c40 1797 non-null int32
41 c41 1797 non-null int32
42 c42 1797 non-null int32
43 c43 1797 non-null int32
44 c44 1797 non-null int32
45 c45 1797 non-null int32
46 c46 1797 non-null int32
47 c47 1797 non-null int32
48 c48 1797 non-null int32
49 c49 1797 non-null int32
50 c50 1797 non-null int32
51 c51 1797 non-null int32
52 c52 1797 non-null int32
53 c53 1797 non-null int32
54 c54 1797 non-null int32
55 c55 1797 non-null int32
56 c56 1797 non-null int32
57 c57 1797 non-null int32
58 c58 1797 non-null int32
59 c59 1797 non-null int32
60 c60 1797 non-null int32
61 c61 1797 non-null int32
62 c62 1797 non-null int32
63 c63 1797 non-null int32
64 target 1797 non-null int32
dtypes: int32(65)
memory usage: 456.4 KB
###Markdown
- Los datos de digits se distribuyen en columnas llamadas C_i con i entre 0 y 63 más una columna llamada Target. Estos datos son del tipo int32.- memory usage: 456.4 KB.
###Code
#¿Hay registros que no se correspondan con tu conocimiento previo de los datos?
#No hay registros que no corresponda a mi conocimiento previo de los datos.
digits.describe()
###Output
_____no_output_____
###Markdown
Notamos que hay tres columnas con valores ceros en todas sus casillas (lo podemos ver en la tabla anterior ya que mean = 0): c00, c32 y c39. Ejercicio 2**Visualización:** Para visualizar los datos utilizaremos el método `imshow` de `matplotlib`. Resulta necesario convertir el arreglo desde las dimensiones (1,64) a (8,8) para que la imagen sea cuadrada y pueda distinguirse el dígito. Superpondremos además el label correspondiente al dígito, mediante el método `text`. Esto nos permitirá comparar la imagen generada con la etiqueta asociada a los valores. Realizaremos lo anterior para los primeros 25 datos del archivo.
###Code
digits_dict["images"][0]
digits_dict["images"][1]
###Output
_____no_output_____
###Markdown
Visualiza imágenes de los dígitos utilizando la llave `images` de `digits_dict`. Sugerencia: Utiliza `plt.subplots` y el método `imshow`. Puedes hacer una grilla de varias imágenes al mismo tiempo!
###Code
nx, ny = 5, 5
fig, axs = plt.subplots(nx, ny, figsize=(12, 12))
labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
for i in range(0, nx):
for j in range(0, ny):
axs[i, j].text(2 ,5 , labels[(5*i+j) % 10], fontsize = 80, color = 'hotpink')
axs[i, j].imshow(digits_dict["images"][5*i+j] )
###Output
_____no_output_____
###Markdown
Ejercicio 3**Machine Learning**: En esta parte usted debe entrenar los distintos modelos escogidos desde la librería de `skelearn`. Para cada modelo, debe realizar los siguientes pasos:* **train-test** * Crear conjunto de entrenamiento y testeo (usted determine las proporciones adecuadas). * Imprimir por pantalla el largo del conjunto de entrenamiento y de testeo. * **modelo**: * Instanciar el modelo objetivo desde la librería sklearn. * *Hiper-parámetros*: Utiliza `sklearn.model_selection.GridSearchCV` para obtener la mejor estimación de los parámetros del modelo objetivo.* **Métricas**: * Graficar matriz de confusión. * Analizar métricas de error.__Preguntas a responder:__* ¿Cuál modelo es mejor basado en sus métricas?* ¿Cuál modelo demora menos tiempo en ajustarse?* ¿Qué modelo escoges?
###Code
import os
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', 500) # Ver más columnas de los dataframes
# Ver gráficos de matplotlib en jupyter notebook/lab
%matplotlib inline
from sklearn import linear_model
from sklearn import tree
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
from time import time
#Regresion Logistica
from metrics_classification import *
from sklearn.metrics import r2_score
from sklearn.linear_model import LogisticRegression
#KNeighbors
from sklearn.neighbors import KNeighborsClassifier
from sklearn import neighbors
from sklearn import preprocessing
#Tree Classifier
from sklearn.tree import DecisionTreeClassifier
#Matriz de Confusion
from sklearn.metrics import confusion_matrix
import time
X = digits.drop(columns="target").values
Y = digits["target"].values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# print rows train and test sets
print('Separando informacion:\n')
print('numero de filas data original : ',len(X))
print('numero de filas train set : ',len(X_train))
print('numero de filas test set : ',len(X_test))
###Output
Separando informacion:
numero de filas data original : 1797
numero de filas train set : 1437
numero de filas test set : 360
###Markdown
Regresión logística
###Code
# Creando el modelo
rlog = LogisticRegression()
rlog.fit(X_train, Y_train) # ajustando el modelo
# metrics
from metrics_classification import *
from sklearn.metrics import confusion_matrix
y_true = list(Y_test)
y_pred = list(rlog.predict(X_test))
print('Valores:\n')
print('originales:\n ', y_true)
print('\npredicho: \n ', y_pred)
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_true,
'yhat':y_pred
}
)
df_metrics = summary_metrics(df_temp)
print("\nMetricas para los regresores:")
print("")
df_metrics
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
#Diccionario de Hiperparámetros
dicc_rlog = {
'penalty' : ['l1', 'l2'],
'C' : [100, 10 , 1, 0.1, 0.01],
'class_weight' : ['balanced', None],
'solver' : ['liblinear'],
}
grid_rlog = GridSearchCV(estimator = rlog, param_grid = dicc_rlog, refit = True, n_jobs=-1)
start_time = time.time() #Tiempo de inicio
grid_result_rlog = grid_rlog.fit(X_train, Y_train)
print("Este modelo se ajustó en",(time.time() - start_time),"segundos.")
# print best parameter after tuning
print("El mejor score tuvo un valor de:", grid_result_rlog.best_score_)
print("usando los siguientes parámetros:",grid_result_rlog.best_params_)
grid_predictions = grid_result_rlog.predict(X_test)
###Output
El mejor score tuvo un valor de: 0.9672861014324429
usando los siguientes parámetros: {'C': 0.1, 'class_weight': None, 'penalty': 'l1', 'solver': 'liblinear'}
###Markdown
KNN:
###Code
reg_knn = neighbors.KNeighborsClassifier(5,weights='uniform')
reg_knn.fit(X_train, Y_train)
# metrics
from metrics_classification import *
from sklearn.metrics import confusion_matrix
y_true = list(Y_test)
y_pred = list(reg_knn.predict(X_test))
print('Valores:\n')
print('originales:\n ', y_true)
print('\npredicho: \n ', y_pred)
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_true,
'yhat':y_pred
}
)
df_metrics = summary_metrics(df_temp)
print("\nMetricas para los regresores:")
print("")
df_metrics
#Diccionario de Hiperparámetros
dicc_knn = {
'n_neighbors' : [3, 6, 15,30],
'weights' : ['uniform', 'distance'],
'metric' : ['euclidean', 'minkowski'],
'algorithm' : ['auto','brute', 'kd_tree']
}
grid_knn = GridSearchCV(estimator = reg_knn, param_grid = dicc_knn, cv = 10)
start_time = time.time() #Tiempo de inicio
grid_result_knn = grid_knn.fit(X_train, Y_train)
print("Este modelo se ajustó en",(time.time() - start_time),"segundos.")
# print best parameter after tuning
print("El mejor score tuvo un valor de:", grid_result_knn.best_score_)
print("usando los siguientes parámetros:",grid_result_knn.best_params_)
grid_predictions = grid_result_knn.predict(X_test)
###Output
El mejor score tuvo un valor de: 0.9874757187257188
usando los siguientes parámetros: {'algorithm': 'auto', 'metric': 'euclidean', 'n_neighbors': 3, 'weights': 'distance'}
###Markdown
Decision Tree:
###Code
reg_tree = tree.DecisionTreeClassifier(max_depth=5)
reg_tree.fit(X_train, Y_train)
# metrics
from metrics_classification import *
from sklearn.metrics import confusion_matrix
y_true = list(Y_test)
y_pred = list(reg_tree.predict(X_test))
print('Valores:\n')
print('originales:\n ', y_true)
print('\npredicho: \n ', y_pred)
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_true,
'yhat':y_pred
}
)
df_metrics = summary_metrics(df_temp)
print("\nMetricas para los regresores:")
print("")
df_metrics
#Diccionario de Hiperparámetros
dicc_tree = {
'criterion' : ['gini', 'entropy'],
'splitter' : ['best', 'random'],
'max_features' : ['auto', 'sqrt', 'log2'],
'max_depth': [6,10,15,20,30]
}
#grid_rlog = GridSearchCV(estimator = rlog, param_grid = dicc, refit = True, cv = 10)
grid_tree = GridSearchCV(estimator = reg_tree, param_grid = dicc_tree, cv = 10)
start_time = time.time() #Tiempo de inicio
grid_result_tree = grid_tree.fit(X_train, Y_train)
print("Este modelo se ajustó en",(time.time() - start_time),"segundos.")
# print best parameter after tuning
print("El mejor score tuvo un valor de:", grid_result_tree.best_score_)
print("usando los siguientes parámetros:",grid_result_tree.best_params_)
grid_predictions = grid_result_tree.predict(X_test)
###Output
El mejor score tuvo un valor de: 0.8142385392385393
usando los siguientes parámetros: {'criterion': 'entropy', 'max_depth': 15, 'max_features': 'auto', 'splitter': 'best'}
###Markdown
- ¿Cuál modelo es mejor basado en sus métricas?: el mejor modelo según las métrias es el modelo de K-Nearest Neighbors.- ¿Cuál modelo demora menos tiempo en ajustarse?: el modelo que demora menos tiempo en ajustarse es el Decission Tree Classifier con 2.4 segundos aproximadamente.- ¿Qué modelo escoges?: escojo el modelo de K-Nearest Neighbors porque tiene las mejores métricas y aunque su tiempo de ejecución fue de alrededor 11 segundos creo que no es tampoco un tiempo de ejecución tan elevado. Ejercicio 4__Comprensión del modelo:__ Tomando en cuenta el mejor modelo entontrado en el `Ejercicio 3`, debe comprender e interpretar minuciosamente los resultados y gráficos asocados al modelo en estudio, para ello debe resolver los siguientes puntos: * **Cross validation**: usando **cv** (con n_fold = 10), sacar una especie de "intervalo de confianza" sobre alguna de las métricas estudiadas en clases: * $\mu \pm \sigma$ = promedio $\pm$ desviación estandar * **Curva de Validación**: Replica el ejemplo del siguiente [link](https://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.htmlsphx-glr-auto-examples-model-selection-plot-validation-curve-py) pero con el modelo, parámetros y métrica adecuada. Saque conclusiones del gráfico. * **Curva AUC–ROC**: Replica el ejemplo del siguiente [link](https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.htmlsphx-glr-auto-examples-model-selection-plot-roc-py) pero con el modelo, parámetros y métrica adecuada. Saque conclusiones del gráfico.
###Code
#Cross validation
from sklearn.model_selection import cross_val_score
scores = cross_val_score(estimator = reg_knn,
X = X_train,
y = Y_train,
cv = 10)
print("La precisión es de: %0.2f ± %0.2f" % (scores.mean(), scores.std() * 2))
#Curva de Validación:
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
#X, y = load_digits(return_X_y=True)
param_range = np.array([i for i in range (1,10)])
train_scores, test_scores = validation_curve(
#KNeighborsClassifier(algorithm = 'auto', metric = 'euclidean', weights = 'distance'),
KNeighborsClassifier(5,weights='uniform'),
X_train,
Y_train,
param_name="n_neighbors",
param_range=param_range,
scoring="accuracy",
n_jobs=1,
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve for KNN")
plt.xlabel("n_neighbors")
plt.ylabel("Score")
plt.ylim(0.95, 1.05)
lw = 2
plt.semilogx(
param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw
)
plt.fill_between(
param_range,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.2,
color="darkorange",
lw=lw,
)
plt.semilogx(
param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw
)
plt.fill_between(
param_range,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.2,
color="navy",
lw=lw,
)
plt.legend(loc="best")
plt.show()
#Curva AUC-ROC
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
# Binarize the output
y = label_binarize(Y, classes=digits["target"].unique())
n_classes = y.shape[1]
n_samples, n_features = X.shape
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
train_size = 0.7)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(KNeighborsClassifier())
y_score = classifier.fit(X_train, y_train).predict(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#Plot ROC curves for the multiclass problem
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(
fpr["micro"],
tpr["micro"],
label="micro-average ROC curve (area = {0:0.2f})".format(roc_auc["micro"]),
color="deeppink",
linestyle=":",
linewidth=4,
)
plt.plot(
fpr["macro"],
tpr["macro"],
label="macro-average ROC curve (area = {0:0.2f})".format(roc_auc["macro"]),
color="navy",
linestyle=":",
linewidth=4,
)
colors = cycle(["aqua", "darkorange", "cornflowerblue"])
for i, color in zip(range(n_classes), colors):
plt.plot(
fpr[i],
tpr[i],
color=color,
lw=lw,
label="ROC curve of class {0} (area = {1:0.2f})".format(i, roc_auc[i]),
)
plt.plot([0, 1], [0, 1], "k--", lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Some extension of Receiver operating characteristic to multiclass")
plt.legend(loc="lower right")
plt.show()
###Output
<ipython-input-39-f87f6eed5040>:8: DeprecationWarning: scipy.interp is deprecated and will be removed in SciPy 2.0.0, use numpy.interp instead
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
###Markdown
Ejercicio 5__Reducción de la dimensión:__ Tomando en cuenta el mejor modelo encontrado en el `Ejercicio 3`, debe realizar una redcción de dimensionalidad del conjunto de datos. Para ello debe abordar el problema ocupando los dos criterios visto en clases: * **Selección de atributos*** **Extracción de atributos**__Preguntas a responder:__Una vez realizado la reducción de dimensionalidad, debe sacar algunas estadísticas y gráficas comparativas entre el conjunto de datos original y el nuevo conjunto de datos (tamaño del dataset, tiempo de ejecución del modelo, etc.)
###Code
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
###Output
_____no_output_____
###Markdown
Selección de atributos:
###Code
# Separamos las columnas objetivo
#x_training = digits.drop(columns = "target")
x_training = digits.drop(['c00','c32','c39',"target"], axis = 1)
y_training = digits["target"]
# Aplicando el algoritmo univariante de prueba F.
k = 20 # número de atributos a seleccionar
columnas = list(x_training.columns.values)
seleccionadas = SelectKBest(f_classif, k=k).fit(x_training, y_training)
catrib = seleccionadas.get_support()
atributos = [columnas[i] for i in list(catrib.nonzero()[0])]
atributos #printea la selección de atributos con la cual trabajaremos.
###Output
_____no_output_____
###Markdown
Extracción de atributos: PCA
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
X2 = StandardScaler().fit_transform(X_train) #reescala los datos
#ajuste del modelo
embedding = PCA(n_components=64)
X_transform = embedding.fit_transform(X)
# graficar varianza por componente
percent_variance = embedding.explained_variance_ratio_* 100
plt.figure(figsize=(20,4))
plt.bar(x= range(1,65), height=percent_variance)
plt.xticks(rotation=90)
plt.ylabel('Componente principal')
plt.xlabel('Por. varianza explicada')
plt.title('Porcentaje de varianza explicada por cada componente')
plt.show()
# graficar varianza por la suma acumulada de los componente
percent_variance_cum = np.cumsum(percent_variance)
plt.figure(figsize=(20,4))
plt.bar(x= range(1,65), height=percent_variance_cum)
plt.ylabel('Percentate of Variance Explained')
plt.xlabel('Principal Component Cumsum')
plt.title('PCA Scree Plot')
plt.show()
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from metrics_classification import summary_metrics
import time
start_time = time.time()
# Entrenamiento con todas las variables
X = digits.drop(columns = "target")
Y = digits["target"]
# split dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state = 2)
# Creando el modelo
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train) # ajustando el modelo
predicciones = knn.predict(X_test)
time = time.time() - start_time
print("El tiempo de ejecución es;", time)
df_pred = pd.DataFrame({
'y':Y_test,
'yhat':predicciones
})
df_s1 = summary_metrics(df_pred).assign(name = 'Todas las variables')
import time
start_time = time.time()
# Entrenamiento con las variables seleccionadas
X = digits[atributos]
Y = digits['target']
# split dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state = 2)
# Creando el modelo
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train) # ajustando el modelo
predicciones = knn.predict(X_test)
time = time.time() - start_time
print("El tiempo de ejecución es:", time)
df_pred = pd.DataFrame({
'y':Y_test,
'yhat':predicciones
})
df_s2 = summary_metrics(df_pred).assign(name = 'Variables Seleccionadas')
# juntar resultados en formato dataframe
pd.concat([df_s1,df_s2])
###Output
_____no_output_____
###Markdown
Notamos que el tiempo de ejecución es menor para el modelo con menos variables. También las métricas para el modelo con variables seleccionadas es un poco mejor que el modelo con todas las variables. Ejercicio 6__Visualizando Resultados:__ A continuación se provee código para comparar las etiquetas predichas vs las etiquetas reales del conjunto de _test_.
###Code
def mostar_resultados(digits,model,nx=5, ny=5,label = "correctos"):
"""
Muestra los resultados de las prediciones de un modelo
de clasificacion en particular. Se toman aleatoriamente los valores
de los resultados.
- label == 'correcto': retorna los valores en que el modelo acierta.
- label == 'incorrecto': retorna los valores en que el modelo no acierta.
Observacion: El modelo que recibe como argumento debe NO encontrarse
'entrenado'.
:param digits: dataset 'digits'
:param model: modelo de sklearn
:param nx: numero de filas (subplots)
:param ny: numero de columnas (subplots)
:param label: datos correctos o incorrectos
:return: graficos matplotlib
"""
X = digits.drop(columns="target").values
y = digits["target"].values
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2, random_state = 42)
model.fit(X_train, Y_train) # ajustando el modelo
y_pred = list(model.predict(X_test))
# Mostrar los datos correctos
if label=="correctos":
mask = (y_pred == Y_test)
color = "green"
# Mostrar los datos correctos
elif label=="incorrectos":
mask = (y_pred != Y_test)
color = "red"
else:
raise ValueError("Valor incorrecto")
X_aux = X_test[mask]
#y_aux_true = Y_test[mask]
#y_aux_pred = y_pred[mask]
y_aux_true = np.array(Y_test)[mask]
y_aux_pred = np.array(y_pred)[mask]
# We'll plot the first 100 examples, randomly choosen
fig, ax = plt.subplots(nx, ny, figsize=(12,12))
for i in range(nx):
for j in range(ny):
index = j + ny * i
data = X_aux[index, :].reshape(8,8)
label_pred = str(int(y_aux_pred[index]))
label_true = str(int(y_aux_true[index]))
ax[i][j].imshow(data, interpolation='nearest', cmap='gray_r')
ax[i][j].text(0, 0, label_pred, horizontalalignment='center', verticalalignment='center', fontsize=10, color=color)
ax[i][j].text(7, 0, label_true, horizontalalignment='center', verticalalignment='center', fontsize=10, color='blue')
ax[i][j].get_xaxis().set_visible(False)
ax[i][j].get_yaxis().set_visible(False)
plt.show()
###Output
_____no_output_____
###Markdown
**Pregunta*** Tomando en cuenta el mejor modelo entontrado en el `Ejercicio 3`, grafique los resultados cuando: * el valor predicho y original son iguales * el valor predicho y original son distintos * Cuando el valor predicho y original son distintos , ¿Por qué ocurren estas fallas?
###Code
mostar_resultados(digits,KNeighborsClassifier(),nx=5, ny=5,label = "correctos")
mostar_resultados(digits,KNeighborsClassifier(),nx=2, ny=2,label = "incorrectos")
###Output
_____no_output_____
###Markdown
Ejercicio 7**Conclusiones**: Entrega tu veredicto, responde las preguntas iniciales, visualizaciones, trabajos futuros, dificultades, etc. Las principales dificultades que encontré fueron que no entendía lo que se pedía y, por ejemplo, la función dada en el ejercicio 6 estaba mal escrita y me demoré mucho en entender qué estaba mal para que funcionase. Creo también que la parte de Machine Learning se dictó muy rápido por parte del profesor por lo que me costó aún más desarrollar los ejercicios porque constantemente debía volver a la documentación de la clase o ir a google para entender qué debía hacer. II.- California Housing PricesLos datos se refieren a las casas encontradas en un distrito determinado de California y algunas estadísticas resumidas sobre ellas basadas en los datos del censo de 1990. Tenga en cuenta que los datos no se limpian, por lo que se requieren algunos pasos de procesamiento previo.Las columnas son las siguientes, sus nombres se explican por sí mismos:* longitude* latitude* housingmedianage* total_rooms* total_bedrooms* population* households* median_income* medianhousevalue* ocean_proximityEl objetivo es poder predecir el valor promedio de cada propiedad. Para poder completar correctamente este laboratorio, es necesario seguir la siguiente rúbrica de trabajo:1. Definición del problema2. Estadística descriptiva3. Visualización descriptiva4. Preprocesamiento5. Selección de modelo 1. Por lo menos debe comparar cuatro modelos 2. Al menos tres de estos modelos tienen que tener hiperparámetros. 3. Realizar optimización de hiperparámetros.6. Métricas y análisis de resultados7. Visualizaciones del modelo8. Conclusiones> **Observación**: Puede tomar como referencia lo realizado en la sección **I.- Clasificación de dígitos**. El alumno tiene la libertad de desarrollar un análisis más completo del problema (si así lo desea). Puede tomar como referencia el siguiente [link](https://www.kaggle.com/camnugent/california-housing-prices).
###Code
# read data
housing = pd.read_csv('data/housing.csv',sep=',')
housing.head()
print("Shape:", housing.shape)
print("Types:\n", housing.dtypes)
housing.info()
print(housing.isnull().sum())
###Output
longitude 0
latitude 0
housing_median_age 0
total_rooms 0
total_bedrooms 207
population 0
households 0
median_income 0
median_house_value 0
ocean_proximity 0
dtype: int64
###Markdown
Notamos que en total_bedroom hay 207 valores nulos que reemplazaremos con el promedio de total_bedrooms:
###Code
housing['total_bedrooms'] = housing['total_bedrooms'].fillna(housing['total_bedrooms'].mean())
housing.boxplot(column=['median_house_value'])
housing['median_house_value'].describe()
###Output
_____no_output_____
###Markdown
Se puede inferir que el valor max: 500001.000000 posiblemente es un outlier (se aleja mucho de los otros valores). Contemos cuántos valores hay con ese número y su porcentaje con respecto al total de los valores (que son 20640).
###Code
housing[housing['median_house_value'] == 500001]['median_house_value'].count()
print((965/ 20640)*100)
###Output
4.675387596899225
###Markdown
Los outliers representan al rededor del 4.7% de los datos totales. Decidiremos no quitarlos porque es un bajo porcentaje.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50,figsize=(20,15))
plt.show()
housing.plot(kind="scatter",x="longitude",y="latitude", alpha = 0.1)
X = housing.drop(columns=["median_house_value","ocean_proximity"]).values
Y = housing["median_house_value"].values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# print rows train and test sets
print('Separando informacion:\n')
print('numero de filas data original : ',len(X))
print('numero de filas train set : ',len(X_train))
print('numero de filas test set : ',len(X_test))
###Output
Separando informacion:
numero de filas data original : 20640
numero de filas train set : 16512
numero de filas test set : 4128
###Markdown
Logistic Regression:
###Code
# Creando el modelo
rlog = LogisticRegression()
rlog.fit(X_train, Y_train) # ajustando el modelo
# metrics
from metrics_classification import *
from sklearn.metrics import confusion_matrix
y_true = list(Y_test)
y_pred = list(rlog.predict(X_test))
print('Valores:\n')
print('originales:\n ', y_true)
print('\npredicho: \n ', y_pred)
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_true,
'yhat':y_pred
}
)
df_metrics = summary_metrics(df_temp)
print("\nMetricas para los regresores:")
print("")
df_metrics
#NO PUDE HACER CORRER EL SIGUIENTE CÓDIGO: estuvo media hora ejecutándose y nada, nunca se ejecutó.
"""#Diccionario de Hiperparámetros
dicc_rlog = {
'penalty' : ['l1', 'l2'],
'C' : [100, 10 , 1, 0.1, 0.01],
'class_weight' : ['balanced', None],
'solver' : ['liblinear'],
}
grid_rlog = GridSearchCV(estimator = rlog, param_grid = dicc_rlog, refit = True, n_jobs=-1)
start_time = time.time() #Tiempo de inicio
grid_result_rlog = grid_rlog.fit(X_train, Y_train)
print("Este modelo se ajustó en",(time.time() - start_time),"segundos.")
"""
"""# print best parameter after tuning
print("El mejor score tuvo un valor de:", grid_result_rlog.best_score_)
print("usando los siguientes parámetros:",grid_result_rlog.best_params_)
grid_predictions = grid_result_rlog.predict(X_test)
"""
###Output
_____no_output_____
###Markdown
K-Neighbors Classifier:
###Code
reg_knn = neighbors.KNeighborsClassifier(5,weights='uniform')
reg_knn.fit(X_train, Y_train)
# metrics
from metrics_classification import *
from sklearn.metrics import confusion_matrix
y_true = list(Y_test)
y_pred = list(reg_knn.predict(X_test))
print('Valores:\n')
print('originales:\n ', y_true)
print('\npredicho: \n ', y_pred)
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_true,
'yhat':y_pred
}
)
df_metrics = summary_metrics(df_temp)
print("\nMetricas para los regresores:")
print("")
df_metrics
import time
#Diccionario de Hiperparámetros
dicc_knn = {
'n_neighbors' : [3, 6, 15,30],
'weights' : ['uniform', 'distance'],
'metric' : ['euclidean', 'minkowski'],
'algorithm' : ['auto','brute', 'kd_tree']
}
grid_knn = GridSearchCV(estimator = reg_knn, param_grid = dicc_knn, cv = 10)
start_time = time.time() #Tiempo de inicio
grid_result_knn = grid_knn.fit(X_train, Y_train)
print("Este modelo se ajustó en",(time.time() - start_time),"segundos.")
# print best parameter after tuning
print("El mejor score tuvo un valor de:", grid_result_knn.best_score_)
print("usando los siguientes parámetros:",grid_result_knn.best_params_)
grid_predictions = grid_result_knn.predict(X_test)
###Output
El mejor score tuvo un valor de: 0.04045541406411553
usando los siguientes parámetros: {'algorithm': 'auto', 'metric': 'euclidean', 'n_neighbors': 30, 'weights': 'uniform'}
###Markdown
Decision Tree
###Code
reg_tree = tree.DecisionTreeClassifier(max_depth=5)
reg_tree.fit(X_train, Y_train)
# metrics
from metrics_classification import *
from sklearn.metrics import confusion_matrix
y_true = list(Y_test)
y_pred = list(reg_tree.predict(X_test))
print('Valores:\n')
print('originales:\n ', y_true)
print('\npredicho: \n ', y_pred)
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_true,
'yhat':y_pred
}
)
df_metrics = summary_metrics(df_temp)
print("\nMetricas para los regresores:")
print("")
df_metrics
#NO PUDE HACER CORRER EL SIGUIENTE CÓDIGO TAMPOCO (se tarda demasiado):
"""import time
#Diccionario de Hiperparámetros
dicc_tree = {
'criterion' : ['gini', 'entropy'],
'splitter' : ['best', 'random'],
'max_features' : ['auto', 'sqrt', 'log2'],
'max_depth': [6,10,15,20,30]
}
#grid_rlog = GridSearchCV(estimator = rlog, param_grid = dicc, refit = True, cv = 10)
grid_tree = GridSearchCV(estimator = reg_tree, param_grid = dicc_tree, cv = 10)
start_time = time.time() #Tiempo de inicio
grid_result_tree = grid_tree.fit(X_train, Y_train)
print("Este modelo se ajustó en",(time.time() - start_time),"segundos.") """
"""# print best parameter after tuning
print("El mejor score tuvo un valor de:", grid_result_tree.best_score_)
print("usando los siguientes parámetros:",grid_result_tree.best_params_)
grid_predictions = grid_result_tree.predict(X_test) """
###Output
_____no_output_____
###Markdown
Random Forest Classifier:
###Code
random_forest = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
random_forest.fit(X_train, Y_train)
# metrics
y_true = list(Y_test)
y_pred = list(random_forest.predict(X_test))
print('Valores:\n')
print('originales:\n ', y_true)
print('\npredicho: \n ', y_pred)
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_true,
'yhat':y_pred
}
)
df_metrics = summary_metrics(df_temp)
print("\nMetricas para los regresores:")
print("")
df_metrics
###Output
Metricas para los regresores:
|
TP2/Matias/3_XGBoost_OneHotEncoding.ipynb | ###Markdown
One Hot Encoding
###Code
dummies_train = pd.get_dummies(train["keyword"], prefix="keyword")
dummies_test = pd.get_dummies(test["keyword"], prefix="keyword")
print(dummies_train.shape)
print(dummies_test.shape)
dummies_train.head()
dummies_train.columns
dummies_test.columns
train_ohe = pd.concat([train,dummies_train], axis="columns")
train_ohe.head()
test_ohe = pd.concat([test,dummies_test], axis="columns")
test_ohe.head()
X = train_ohe.drop(["id","keyword","location","text","target"], axis=1)
y = train_ohe["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=100)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
estimator = LogisticRegression(solver='lbfgs')
selector = RFE(estimator, 100, step=1) #nos quedamos con las 100 features mas relevantes
selector = selector.fit(X_train, y_train)
# selector = selector.fit(X_train, y_train.ravel())
print(selector.support_)
print(selector.ranking_)
best_100_features = selector.support_
X.loc[:, best_100_features].head()
X.loc[:, best_100_features].columns
#20 features mas relevantes
estimator = LogisticRegression(solver='lbfgs')
selector = RFE(estimator, 20, step=1)
selector = selector.fit(X_train, y_train)
print(selector.support_)
print(selector.ranking_)
best_20_features = selector.support_
X.loc[:, best_20_features].head()
X.loc[:, best_20_features].columns
#Tomamos las features mas relevantes
estimator = LogisticRegression(solver='lbfgs')
selector = RFE(estimator, 50, step=1)
selector = selector.fit(X_train, y_train)
print(selector.support_)
print(selector.ranking_)
best_50_features = selector.support_
X.loc[:, best_50_features].columns
X_reduced = X.loc[:, best_100_features]
X_train, X_test, y_train, y_test = train_test_split(X_reduced, y, test_size=0.25, random_state=100)
###Output
_____no_output_____
###Markdown
Ajustando hiper-parametros (usando todos los features(223)): - n_estimators=10, max_depth=6, learning_rate=0.05, subsample=0.8, colsample_bytree=0.8 SCORE 0.646534 - n_estimators=10, max_depth=6, learning_rate=0.1, subsample=0.8, colsample_bytree=0.8 SCORE 0.660714 - n_estimators=20, max_depth=6, learning_rate=0.1, subsample=0.8, colsample_bytree=0.8 SCORE 0.668592 - n_estimators=20, max_depth=7, learning_rate=0.1, subsample=1, colsample_bytree=0.7 SCORE 0.670693 - n_estimators=20, max_depth=9, learning_rate=0.1, subsample=1, colsample_bytree=0.7 SCORE 0.673319 - n_estimators=20, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.7 SCORE 0.677521 - n_estimators=50, max_depth=7, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.686450 - n_estimators=50, max_depth=9, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.701155 - n_estimators=50, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.703782 - n_estimators=50, max_depth=13, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.711660 - n_estimators=50, max_depth=15, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.716387 - n_estimators=100, max_depth=9, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.724790 - n_estimators=100, max_depth=9, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.724790 - n_estimators=200, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.733193 - n_estimators=200, max_depth=13, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.734244 - n_estimators=300, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.740546 - n_estimators=300, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.3 SCORE 0.740021 - Mejor SCORE: 0.740546 Ajustando hiper-parametros (100 features): - n_estimators=50, max_depth=7, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.669643 - n_estimators=50, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.682773 - n_estimators=50, max_depth=15, learning_rate=0.1, subsample=1, colsample_bytree=0.7 SCORE 0.707983 - n_estimators=50, max_depth=15, learning_rate=0.1, subsample=1, colsample_bytree=0.7 SCORE 0.713761 - Mejor SCORE: 0.713761 Ajustando hiper-parametros (50 features): - n_estimators=50, max_depth=7, learning_rate=0.1, subsample=1, colsample_bytree=0.3 SCORE 0.668067 - Mejor SCORE: 0.668067 Ajustando hiper-parametros (30 features): - n_estimators=10, max_depth=9, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.652836 - n_estimators=20, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.658613 - n_estimators=100, max_depth=11, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.658613 - n_estimators=500, max_depth=13, learning_rate=0.1, subsample=1, colsample_bytree=0.5 SCORE 0.658613 - Mejor SCORE: 0.658613 Ajustando hiper-parametros (10 features): - Mejor SCORE: 0.619748
###Code
model = xgb.XGBClassifier(n_estimators=300, objective='binary:logistic', max_depth=11, learning_rate=0.1,
subsample=1, colsample_bytree=0.5, n_jobs=1)
model.fit(X_train, y_train)
y_test_hat = model.predict(X_test)
print("Accuracy score: %f" % (accuracy_score(y_test, y_test_hat)))
model.score(X_test, y_test)*100
#Convertimos los datos a DMatrix
data_dmatrix = xgb.DMatrix(data=X.loc[:, best_20_features], label=y)
parametros = {'objective':'binary:logistic','colsample_bytree':0.5, 'learning_rate':0.1, 'max_depth':11}
cv_results = xgb.cv(dtrain=data_dmatrix, params=parametros, nfold=3, num_boost_round=50, early_stopping_rounds=10,\
metrics='error', as_pandas=True, seed=100)
cv_results.head()
cv_results["test-error-mean"].tail(1)
#Visualizamos los arboles
xgb_clasif = xgb.train(params=parametros, dtrain=data_dmatrix, num_boost_round=10)
#Importancia de los features (cuantes veces aparecen en los arboles)
plt.rcParams['figure.figsize'] = [12,7]
xgb.plot_importance(xgb_clasif)
###Output
_____no_output_____
###Markdown
XGBoost - mejor resultado: 0.740546
###Code
X_train = train_ohe.drop(["id","keyword","location","text","target"], axis=1)
y_train = train_ohe["target"]
X_test = test_ohe.drop(["id","keyword","location","text"], axis=1)
model = xgb.XGBClassifier(n_estimators=300, objective='binary:logistic', max_depth=11, learning_rate=0.1,
subsample=1, colsample_bytree=0.5, n_jobs=1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_pred
test["target"] = y_pred
test.head()
test[["id","target"]]
test[["id","target"]].to_csv("../data/pred2_XGBwithOHE", index=False)
###Output
_____no_output_____ |
Scaled Tanh layer.ipynb | ###Markdown
Batchnorm Training- `model_bn` is the model with batchnorm layers- train this model on the CIFAR 10 dataset
###Code
model_bn = nn.Sequential(
nn.Conv2d(3, 16, 3),
nn.FractionalMaxPool2d(2, output_ratio=(1/np.sqrt(2))),
nn.Conv2d(16, 32, 3),
nn.FractionalMaxPool2d(2, output_ratio=(1/np.sqrt(2))),
nn.Flatten(),
nn.Linear(5408, 1152),
nn.Tanh(),
nn.BatchNorm1d(1152),
nn.Linear(1152, 512),
nn.Tanh(),
nn.BatchNorm1d(512),
nn.Linear(512, 128),
nn.Tanh(),
nn.BatchNorm1d(128),
nn.Linear(128, 10)
)
loss_history_bn = []
writer_bn = SummaryWriter(log_dir='runs/bn_stats-%s' % date.today(), flush_secs=10)
ce_loss = nn.CrossEntropyLoss(reduction='sum')
def criterion(op, y, model_p):
params = list(model_p.parameters())
return ce_loss(op, y) #+ (1/len(params))*(torch.norm(torch.cat([param.view(-1) for param in params]))) #+ torch.sum((1/len(params))*torch.exp(torch.abs(torch.cat([param.view(-1) for param in params]))))
loss_tanh = train_model(model_bn, trainloader, criterion, optimizer = optim.Adam(list(model_bn.parameters()), lr=3e-4))
stats_bn = print_stats(model_bn)
bn_model_err = get_model_error(model_bn, testloader)
###Output
test error: 0.3032
|
01-Neural Networks and Deep Learning/week3/answer-Planar data classification with one hidden layer.ipynb | ###Markdown
Planar data classification with one hidden layerWelcome to your week 3 programming assignment. It's time to build your first neural network, which will have a hidden layer. You will see a big difference between this model and the one you implemented using logistic regression. **You will learn how to:**- Implement a 2-class classification neural network with a single hidden layer- Use units with a non-linear activation function, such as tanh - Compute the cross entropy loss - Implement forward and backward propagation 1 - Packages Let's first import all the packages that you will need during this assignment.- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [sklearn](http://scikit-learn.org/stable/) provides simple and efficient tools for data mining and data analysis. - [matplotlib](http://matplotlib.org) is a library for plotting graphs in Python.- testCases provides some test examples to assess the correctness of your functions- planar_utils provide various useful functions used in this assignment
###Code
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
np.random.seed(1) # set a seed so that the results are consistent
###Output
_____no_output_____
###Markdown
2 - Dataset First, let's get the dataset you will work on. The following code will load a "flower" 2-class dataset into variables `X` and `Y`.
###Code
X, Y = load_planar_dataset()
###Output
_____no_output_____
###Markdown
Visualize the dataset using matplotlib. The data looks like a "flower" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data.
###Code
# Visualize the data:
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
###Output
_____no_output_____
###Markdown
You have: - a numpy-array (matrix) X that contains your features (x1, x2) - a numpy-array (vector) Y that contains your labels (red:0, blue:1).Lets first get a better sense of what our data is like. **Exercise**: How many training examples do you have? In addition, what is the `shape` of the variables `X` and `Y`? **Hint**: How do you get the shape of a numpy array? [(help)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html)
###Code
### START CODE HERE ### (≈ 3 lines of code)
shape_X = X.shape
shape_Y = Y.shape
m = shape_Y[1] # training set size
### END CODE HERE ###
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
###Output
The shape of X is: (2, 400)
The shape of Y is: (1, 400)
I have m = 400 training examples!
###Markdown
**Expected Output**: **shape of X** (2, 400) **shape of Y** (1, 400) **m** 400 3 - Simple Logistic RegressionBefore building a full neural network, lets first see how logistic regression performs on this problem. You can use sklearn's built-in functions to do that. Run the code below to train a logistic regression classifier on the dataset.
###Code
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);
###Output
C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\validation.py:526: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
y = column_or_1d(y, warn=True)
###Markdown
You can now plot the decision boundary of these models. Run the code below.
###Code
# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")
# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
###Output
Accuracy of logistic regression: 47 % (percentage of correctly labelled datapoints)
###Markdown
**Expected Output**: **Accuracy** 47% **Interpretation**: The dataset is not linearly separable, so logistic regression doesn't perform well. Hopefully a neural network will do better. Let's try this now! 4 - Neural Network modelLogistic regression did not work well on the "flower dataset". You are going to train a Neural Network with a single hidden layer.**Here is our model**:**Mathematically**:For one example $x^{(i)}$:$$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1] (i)}\tag{1}$$ $$a^{[1] (i)} = \tanh(z^{[1] (i)})\tag{2}$$$$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2] (i)}\tag{3}$$$$\hat{y}^{(i)} = a^{[2] (i)} = \sigma(z^{ [2] (i)})\tag{4}$$$$y^{(i)}_{prediction} = \begin{cases} 1 & \mbox{if } a^{[2](i)} > 0.5 \\ 0 & \mbox{otherwise } \end{cases}\tag{5}$$Given the predictions on all the examples, you can also compute the cost $J$ as follows: $$J = - \frac{1}{m} \sum\limits_{i = 0}^{m} \large\left(\small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large \right) \small \tag{6}$$**Reminder**: The general methodology to build a Neural Network is to: 1. Define the neural network structure ( of input units, of hidden units, etc). 2. Initialize the model's parameters 3. Loop: - Implement forward propagation - Compute loss - Implement backward propagation to get the gradients - Update parameters (gradient descent)You often build helper functions to compute steps 1-3 and then merge them into one function we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data. 4.1 - Defining the neural network structure **Exercise**: Define three variables: - n_x: the size of the input layer - n_h: the size of the hidden layer (set this to 4) - n_y: the size of the output layer**Hint**: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.
###Code
# GRADED FUNCTION: layer_sizes
def layer_sizes(X, Y):
"""
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
### START CODE HERE ### (≈ 3 lines of code)
n_x = X.shape[0] # size of input layer
n_h = 4
n_y = Y.shape[0] # size of output layer
### END CODE HERE ###
return (n_x, n_h, n_y)
X_assess, Y_assess = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
###Output
The size of the input layer is: n_x = 5
The size of the hidden layer is: n_h = 4
The size of the output layer is: n_y = 2
###Markdown
**Expected Output** (these are not the sizes you will use for your network, they are just used to assess the function you've just coded). **n_x** 5 **n_h** 4 **n_y** 2 4.2 - Initialize the model's parameters **Exercise**: Implement the function `initialize_parameters()`.**Instructions**:- Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.- You will initialize the weights matrices with random values. - Use: `np.random.randn(a,b) * 0.01` to randomly initialize a matrix of shape (a,b).- You will initialize the bias vectors as zeros. - Use: `np.zeros((a,b))` to initialize a matrix of shape (a,b) with zeros.
###Code
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
#
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[-0.00416758 -0.00056267]
[-0.02136196 0.01640271]
[-0.01793436 -0.00841747]
[ 0.00502881 -0.01245288]]
b1 = [[ 0.]
[ 0.]
[ 0.]
[ 0.]]
W2 = [[-0.01057952 -0.00909008 0.00551454 0.02292208]]
b2 = [[ 0.]]
###Markdown
**Expected Output**: **W1** [[-0.00416758 -0.00056267] [-0.02136196 0.01640271] [-0.01793436 -0.00841747] [ 0.00502881 -0.01245288]] **b1** [[ 0.] [ 0.] [ 0.] [ 0.]] **W2** [[-0.01057952 -0.00909008 0.00551454 0.02292208]] **b2** [[ 0.]] 4.3 - The Loop **Question**: Implement `forward_propagation()`.**Instructions**:- Look above at the mathematical representation of your classifier.- You can use the function `sigmoid()`. It is built-in (imported) in the notebook.- You can use the function `np.tanh()`. It is part of the numpy library.- The steps you have to implement are: 1. Retrieve each parameter from the dictionary "parameters" (which is the output of `initialize_parameters()`) by using `parameters[".."]`. 2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).- Values needed in the backpropagation are stored in "`cache`". The `cache` will be given as an input to the backpropagation function.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Implement Forward Propagation to calculate A2 (probabilities)
### START CODE HERE ### (≈ 4 lines of code)
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
### END CODE HERE ###
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
X_assess, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(X_assess, parameters)
# Note: we use the mean here just to make sure that your output matches ours.
print(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))
###Output
-0.000499755777742 -0.000496963353232 0.000438187450959 0.500109546852
###Markdown
**Expected Output**: -0.000499755777742 -0.000496963353232 0.000438187450959 0.500109546852 Now that you have computed $A^{[2]}$ (in the Python variable "`A2`"), which contains $a^{[2](i)}$ for every example, you can compute the cost function as follows:$$J = - \frac{1}{m} \sum\limits_{i = 0}^{m} \large{(} \small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large{)} \small\tag{13}$$**Exercise**: Implement `compute_cost()` to compute the value of the cost $J$.**Instructions**:- There are many ways to implement the cross-entropy loss. To help you, we give you how we would have implemented$- \sum\limits_{i=0}^{m} y^{(i)}\log(a^{[2](i)})$:```pythonlogprobs = np.multiply(np.log(A2),Y)cost = - np.sum(logprobs) no need to use a for loop!```(you can use either `np.multiply()` and then `np.sum()` or directly `np.dot()`).
###Code
# GRADED FUNCTION: compute_cost
def compute_cost(A2, Y, parameters):
"""
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
"""
m = Y.shape[1] # number of example
# Retrieve W1 and W2 from parameters
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters['W1']
W2 = parameters['W2']
### END CODE HERE ###
# Compute the cross-entropy cost
### START CODE HERE ### (≈ 2 lines of code)
logprobs = np.multiply(np.log(A2),Y)
cost = - np.sum(np.multiply(np.log(A2), Y) + np.multiply(np.log(1. - A2), 1. - Y)) / m
#cost = np.sum(Y * np.log(A2) + (1 - Y) * np.log(1 - A2))/(-m)
### END CODE HERE ###
cost = np.squeeze(cost) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
A2, Y_assess, parameters = compute_cost_test_case()
print("cost = " + str(compute_cost(A2, Y_assess, parameters)))
###Output
cost = 0.692919893776
###Markdown
**Expected Output**: **cost** 0.692919893776 Using the cache computed during forward propagation, you can now implement backward propagation.**Question**: Implement the function `backward_propagation()`.**Instructions**:Backpropagation is usually the hardest (most mathematical) part in deep learning. To help you, here again is the slide from the lecture on backpropagation. You'll want to use the six equations on the right of this slide, since you are building a vectorized implementation. <!--$\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } = \frac{1}{m} (a^{[2](i)} - y^{(i)})$$\frac{\partial \mathcal{J} }{ \partial W_2 } = \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } a^{[1] (i) T} $$\frac{\partial \mathcal{J} }{ \partial b_2 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)}}}$$\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } = W_2^T \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } * ( 1 - a^{[1] (i) 2}) $$\frac{\partial \mathcal{J} }{ \partial W_1 } = \frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } X^T $$\frac{\partial \mathcal{J} _i }{ \partial b_1 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)}}}$- Note that $*$ denotes elementwise multiplication.- The notation you will use is common in deep learning coding: - dW1 = $\frac{\partial \mathcal{J} }{ \partial W_1 }$ - db1 = $\frac{\partial \mathcal{J} }{ \partial b_1 }$ - dW2 = $\frac{\partial \mathcal{J} }{ \partial W_2 }$ - db2 = $\frac{\partial \mathcal{J} }{ \partial b_2 }$ !-->- Tips: - To compute dZ1 you'll need to compute $g^{[1]'}(Z^{[1]})$. Since $g^{[1]}(.)$ is the tanh activation function, if $a = g^{[1]}(z)$ then $g^{[1]'}(z) = 1-a^2$. So you can compute $g^{[1]'}(Z^{[1]})$ using `(1 - np.power(A1, 2))`.
###Code
# GRADED FUNCTION: backward_propagation
def backward_propagation(parameters, cache, X, Y):
"""
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
"""
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters['W1']
W2 = parameters['W2']
### END CODE HERE ###
# Retrieve also A1 and A2 from dictionary "cache".
### START CODE HERE ### (≈ 2 lines of code)
A1 = cache['A1']
A2 = cache['A2']
### END CODE HERE ###
# Backward propagation: calculate dW1, db1, dW2, db2.
### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)
dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T) / m
db2 = np.sum(dZ2, axis = 1, keepdims = True) / m
dZ1 = np.dot(W2.T, dZ2) * (1 - A1**2)
dW1 = np.dot(dZ1, X.T) / m
db1 = np.sum(dZ1, axis = 1, keepdims = True) / m
### END CODE HERE ###
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
parameters, cache, X_assess, Y_assess = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, X_assess, Y_assess)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("db2 = "+ str(grads["db2"]))
###Output
dW1 = [[ 0.01018708 -0.00708701]
[ 0.00873447 -0.0060768 ]
[-0.00530847 0.00369379]
[-0.02206365 0.01535126]]
db1 = [[-0.00069728]
[-0.00060606]
[ 0.000364 ]
[ 0.00151207]]
dW2 = [[ 0.00363613 0.03153604 0.01162914 -0.01318316]]
db2 = [[ 0.06589489]]
###Markdown
**Expected output**: **dW1** [[ 0.01018708 -0.00708701] [ 0.00873447 -0.0060768 ] [-0.00530847 0.00369379] [-0.02206365 0.01535126]] **db1** [[-0.00069728] [-0.00060606] [ 0.000364 ] [ 0.00151207]] **dW2** [[ 0.00363613 0.03153604 0.01162914 -0.01318316]] **db2** [[ 0.06589489]] **Question**: Implement the update rule. Use gradient descent. You have to use (dW1, db1, dW2, db2) in order to update (W1, b1, W2, b2).**General gradient descent rule**: $ \theta = \theta - \alpha \frac{\partial J }{ \partial \theta }$ where $\alpha$ is the learning rate and $\theta$ represents a parameter.**Illustration**: The gradient descent algorithm with a good learning rate (converging) and a bad learning rate (diverging). Images courtesy of Adam Harley.
###Code
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate = 1.2):
"""
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Retrieve each gradient from the dictionary "grads"
### START CODE HERE ### (≈ 4 lines of code)
dW1 = grads['dW1']
db1 = grads['db1']
dW2 = grads['dW2']
db2 = grads['db2']
## END CODE HERE ###
# Update rule for each parameter
### START CODE HERE ### (≈ 4 lines of code)
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[-0.00643025 0.01936718]
[-0.02410458 0.03978052]
[-0.01653973 -0.02096177]
[ 0.01046864 -0.05990141]]
b1 = [[ -1.02420756e-06]
[ 1.27373948e-05]
[ 8.32996807e-07]
[ -3.20136836e-06]]
W2 = [[-0.01041081 -0.04463285 0.01758031 0.04747113]]
b2 = [[ 0.00010457]]
###Markdown
**Expected Output**: **W1** [[-0.00643025 0.01936718] [-0.02410458 0.03978052] [-0.01653973 -0.02096177] [ 0.01046864 -0.05990141]] **b1** [[ -1.02420756e-06] [ 1.27373948e-05] [ 8.32996807e-07] [ -3.20136836e-06]] **W2** [[-0.01041081 -0.04463285 0.01758031 0.04747113]] **b2** [[ 0.00010457]] 4.4 - Integrate parts 4.1, 4.2 and 4.3 in nn_model() **Question**: Build your neural network model in `nn_model()`.**Instructions**: The neural network model has to use the previous functions in the right order.
###Code
# GRADED FUNCTION: nn_model
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
"""
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: "n_x, n_h, n_y". Outputs = "W1, b1, W2, b2, parameters".
### START CODE HERE ### (≈ 5 lines of code)
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Loop (gradient descent)
import pdb
for i in range(0, num_iterations):
### START CODE HERE ### (≈ 4 lines of code)
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2, Y, parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters, cache, X, Y)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters, grads)
### END CODE HERE ###
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
X_assess, Y_assess = nn_model_test_case()
parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=false)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
Cost after iteration 0: 0.692584
Cost after iteration 1000: -inf
###Markdown
**Expected Output**: **W1** [[-4.18494056 5.33220609] [-7.52989382 1.24306181] [-4.1929459 5.32632331] [ 7.52983719 -1.24309422]] **b1** [[ 2.32926819] [ 3.79458998] [ 2.33002577] [-3.79468846]] **W2** [[-6033.83672146 -6008.12980822 -6033.10095287 6008.06637269]] **b2** [[-52.66607724]] 4.5 Predictions**Question**: Use your model to predict by building predict().Use forward propagation to predict results.**Reminder**: predictions = $y_{prediction} = \mathbb 1 \text{{activation > 0.5}} = \begin{cases} 1 & \text{if}\ activation > 0.5 \\ 0 & \text{otherwise} \end{cases}$ As an example, if you would like to set the entries of a matrix X to 0 and 1 based on a threshold you would do: ```X_new = (X > threshold)```
###Code
# GRADED FUNCTION: predict
def predict(parameters, X):
"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (≈ 2 lines of code)
A2, cache = forward_propagation(X, parameters)
# Y_prediction = np.zeros((1, X.shape[1]))
predictions = np.array([0 if i <= 0.5 else 1 for i in np.squeeze(A2)])
### END CODE HERE ###
return predictions
parameters, X_assess = predict_test_case()
predictions = predict(parameters, X_assess)
print("predictions mean = " + str(np.mean(predictions)))
###Output
predictions mean = 0.666666666667
###Markdown
**Expected Output**: **predictions mean** 0.666666666667 It is time to run the model and see how it performs on a planar dataset. Run the following code to test your model with a single hidden layer of $n_h$ hidden units.
###Code
# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
###Output
Cost after iteration 0: 0.693048
Cost after iteration 1000: 0.288083
Cost after iteration 2000: 0.254385
Cost after iteration 3000: 0.233864
Cost after iteration 4000: 0.226792
Cost after iteration 5000: 0.222644
Cost after iteration 6000: 0.219731
Cost after iteration 7000: 0.217504
Cost after iteration 8000: 0.219471
Cost after iteration 9000: 0.218612
###Markdown
**Expected Output**: **Cost after iteration 9000** 0.218607
###Code
# Print accuracy
predictions = predict(parameters, X)
print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')
###Output
Accuracy: 90%
###Markdown
**Expected Output**: **Accuracy** 90% Accuracy is really high compared to Logistic Regression. The model has learnt the leaf patterns of the flower! Neural networks are able to learn even highly non-linear decision boundaries, unlike logistic regression. Now, let's try out several hidden layer sizes. 4.6 - Tuning hidden layer size (optional/ungraded exercise) Run the following code. It may take 1-2 minutes. You will observe different behaviors of the model for various hidden layer sizes.
###Code
# This may take about 2 minutes to run
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer of size %d' % n_h)
parameters = nn_model(X, Y, n_h, num_iterations = 5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
predictions = predict(parameters, X)
accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)
print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
###Output
Accuracy for 1 hidden units: 67.5 %
Accuracy for 2 hidden units: 67.25 %
Accuracy for 3 hidden units: 90.75 %
Accuracy for 4 hidden units: 90.5 %
Accuracy for 5 hidden units: 91.25 %
Accuracy for 20 hidden units: 90.0 %
Accuracy for 50 hidden units: 90.25 %
###Markdown
**Interpretation**:- The larger models (with more hidden units) are able to fit the training set better, until eventually the largest models overfit the data. - The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to fits the data well without also incurring noticable overfitting.- You will also learn later about regularization, which lets you use very large models (such as n_h = 50) without much overfitting. **Optional questions**:**Note**: Remember to submit the assignment but clicking the blue "Submit Assignment" button at the upper-right. Some optional/ungraded questions that you can explore if you wish: - What happens when you change the tanh activation for a sigmoid activation or a ReLU activation?- Play with the learning_rate. What happens?- What if we change the dataset? (See part 5 below!) **You've learnt to:**- Build a complete neural network with a hidden layer- Make a good use of a non-linear unit- Implemented forward propagation and backpropagation, and trained a neural network- See the impact of varying the hidden layer size, including overfitting. Nice work! 5) Performance on other datasets If you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets.
###Code
# Datasets
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
### START CODE HERE ### (choose your dataset)
dataset = "noisy_moons"
### END CODE HERE ###
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y%2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
###Output
_____no_output_____ |
_ipynb_checkpoints/Section3RStudio-checkpoint.ipynb | ###Markdown
> This is meant to be used in RStudio not CoCalc! Open your Rproj fileFirst, open your R Project file (library_carpentry.Rproj) created in the Before We Start lesson.If you did not complete that step, do the following:- Under the File menu, click on New project, choose New directory, then New project- Enter the name library_carpentry for this new folder (or “directory”). This will be your working directory for the rest of the day.- Click on Create project- Create a new file where we will type our scripts. Go to File > New File > R script. Click the save icon on your toolbar and save your script as “script.R”. Presentation of the dataThis data was downloaded from the University of Houston–Clear Lake Integrated Library System in 2018. It is a relatively random sample of books from the catalog. It consists of 10,000 observations of 11 variables.These variables are:- CALL...BIBLIO. : Bibliographic call number. Most of these are cataloged with the Library of Congress classification, but there are also items cataloged in the Dewey Decimal System (including fiction and non-fiction), and Superintendent of Documents call numbers. Character.- X245.ab : The title and remainder of title. Exported from MARC tag 245|ab fields. Separated by a | pipe character. Character.- X245.c : The author (statement of responsibility). Exported from MARC tag 245 c. Character.- TOT.CHKOUT : The total number of checkouts. Integer.- LOUTDATE : The last date the item was checked out. Date. YYYY-MM-DDThh:mmTZD- SUBJECT : Bibliographic subject in Library of Congress Subject Headings. Separated by a | pipe character. Character.- ISN : ISBN or ISSN. Exported from MARC field 020 a. Character- CALL...ITEM : Item call number. Most of these are NA but there are some secondary call numbers.- X008.Date.One : Date of publication. Date. YYYY- BCODE2 : Item format. Character.- BCODE1 Sub-collection. Character. Getting data into RWays to get data into RIn order to use your data in R, you must import it and turn it into an R object. There are many ways to get data into R.- Manually: You can manually create it using the data.frame() function in Base R, or the tibble() function in the tidyverse.- Import it from a file Below is a very incomplete list - Text: TXT (readLines() function) - Tabular data: CSV, TSV (read.table() function or readr package) - Excel: XLSX (xlsx package) - Google sheets: (googlesheets package) - Statistics program: SPSS, SAS (haven package) - Databases: MySQL (RMySQL package)- Gather it from the web: You can connect to webpages, servers, or APIs directly from within R, or you can create a data scraped from HTML webpages using the rvest package. For example - the Twitter API with twitteR - Crossref data with rcrossref - World Bank’s World Development Indicators with WDI. Organizing your working directoryUsing a consistent folder structure across your projects will help keep things organized and make it easy to find/file things in the future. This can be especially helpful when you have multiple projects. In general, you might create directories (folders) for scripts, data, and documents. Here are some examples of suggested directories: - data/ Use this folder to store your raw data and intermediate datasets. For the sake of transparency and provenance, you should always keep a copy of your raw data accessible and do as much of your data cleanup and preprocessing programmatically (i.e., with scripts, rather than manually) as possible. - data_output/ When you need to modify your raw data, it might be useful to store the modified versions of the datasets in a different folder. - documents/ Used for outlines, drafts, and other text. - fig_output/ This folder can store the graphics that are generated by your scripts. - scripts/ A place to keep your R scripts for different analyses or plotting.You may want additional directories or subdirectories depending on your project needs, but these should form the backbone of your working directory. The working directoryThe working directory is an important concept to understand. It is the place on your computer where R will look for and save files. When you write code for your project, your scripts should refer to files in relation to the root of your working directory and only to files within this structure.Using RStudio projects makes this easy and ensures that your working directory is set up properly. If you need to check it, you can use getwd(). If for some reason your working directory is not what it should be, you can change it in the RStudio interface by navigating in the file browser to where your working directory should be, clicking on the blue gear icon “More”, and selecting “Set As Working Directory”. Alternatively, you can use setwd("/path/to/working/directory") to reset your working directory. However, your scripts should not include this line, because it will fail on someone else’s computer.> Setting your working directory with setwd() Some points to note about setting your working directory: The directory must be in quotation marks. On Windows computers, directories in file paths are separated with a backslash \. However, in R, you must use a forward slash /. You can copy and paste from the Windows Explorer window directly into R and use find/replace (Ctrl/Cmd + F) in R Studio to replace all backslashes with forward slashes. On Mac computers, open the Finder and navigate to the directory you wish to set as your working directory. Right click on that folder and press the options key on your keyboard. The ‘Copy “Folder Name”’ option will transform into ‘Copy “Folder Name” as Pathname. It will copy the path to the folder to the clipboard. You can then paste this into your setwd() function. You do not need to replace backslashes with forward slashes. After you set your working directory, you can use ./ to represent it. So if you have a folder in your directory called data, you can use read.csv(“./data”) to represent that sub-directory. Downloading the data and getting set upNow that you have set your working directory, we will create our folder structure using the dir.create() function.For this lesson we will use the following folders in our working directory: data/, data_output/ and fig_output/. Let’s write them all in lowercase to be consistent. We can create them using the RStudio interface by clicking on the “New Folder” button in the file pane (bottom right), or directly from R by typing at console:
###Code
dir.create("data")
dir.create("data_output")
dir.create("fig_output")
###Output
_____no_output_____
###Markdown
Go to the Figshare page for this curriculum and download the dataset called “books.csv”. The direct download link is: https://ndownloader.figshare.com/files/22031487. Place this downloaded file in the data/ you just created. Alternatively, you can do this directly from R by copying and pasting this in your terminal
###Code
download.file("https://ndownloader.figshare.com/files/22031487",
"data/books.csv", mode = "wb")
###Output
_____no_output_____ |
Kaggle/Google Analytics Customer Revenue Prediction/Deep learning & Keras - GA Revenue Prediction.ipynb | ###Markdown
Deep Learning with Keras - Google Analytics Customer Revenue Prediction* Note: this is just a starting point, there's a lot of work to be done.* A begginer version with [LGBM](https://www.kaggle.com/dimitreoliveira/lgbm-google-store-revenue-prediction) Dependencies
###Code
import os
import json
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.io.json import json_normalize
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization, Activation, Dropout
from keras import optimizers
%matplotlib inline
pd.options.display.max_columns = 999
###Output
_____no_output_____
###Markdown
Auxiliar functions
###Code
def add_time_features(df):
df['date'] = pd.to_datetime(df['date'], format='%Y%m%d', errors='ignore')
df['year'] = df['date'].apply(lambda x: x.year)
df['month'] = df['date'].apply(lambda x: x.month)
df['day'] = df['date'].apply(lambda x: x.day)
df['weekday'] = df['date'].apply(lambda x: x.weekday())
return df
def plot_metrics(loss, val_loss):
fig, (ax1) = plt.subplots(1, 1, sharex='col', figsize=(20,7))
ax1.plot(loss, label='Train loss')
ax1.plot(val_loss, label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
plt.xlabel('Epochs')
###Output
_____no_output_____
###Markdown
Function to load and convert files borrowed from this [kernel](https://www.kaggle.com/julian3833/1-quick-start-read-csv-and-flatten-json-fields/notebook), thanks!
###Code
def load_df(csv_path='../input/train.csv', nrows=None):
JSON_COLUMNS = ['device', 'geoNetwork', 'totals', 'trafficSource']
df = pd.read_csv(csv_path, dtype={'fullVisitorId': 'str'}, nrows=nrows)
for column in JSON_COLUMNS:
df = df.join(pd.DataFrame(df.pop(column).apply(pd.io.json.loads).values.tolist(), index=df.index))
return df
train = load_df("../input/train.csv")
test = load_df("../input/test.csv")
###Output
_____no_output_____
###Markdown
About the train data
###Code
train.head()
###Output
_____no_output_____
###Markdown
This is how our data looks like
###Code
print('TRAIN SET')
print('Rows: %s' % train.shape[0])
print('Columns: %s' % train.shape[1])
print('Features: %s' % train.columns.values)
print()
print('TEST SET')
print('Rows: %s' % test.shape[0])
print('Columns: %s' % test.shape[1])
print('Features: %s' % test.columns.values)
###Output
TRAIN SET
Rows: 903653
Columns: 50
Features: ['channelGrouping' 'date' 'fullVisitorId' 'sessionId'
'socialEngagementType' 'visitId' 'visitNumber' 'visitStartTime' 'browser'
'browserSize' 'browserVersion' 'deviceCategory' 'flashVersion' 'isMobile'
'language' 'mobileDeviceBranding' 'mobileDeviceInfo'
'mobileDeviceMarketingName' 'mobileDeviceModel' 'mobileInputSelector'
'operatingSystem' 'operatingSystemVersion' 'screenColors'
'screenResolution' 'city' 'cityId' 'continent' 'country' 'latitude'
'longitude' 'metro' 'networkDomain' 'networkLocation' 'region'
'subContinent' 'bounces' 'hits' 'newVisits' 'pageviews'
'transactionRevenue' 'visits' 'adContent' 'adwordsClickInfo' 'campaign'
'campaignCode' 'isTrueDirect' 'keyword' 'medium' 'referralPath' 'source']
TEST SET
Rows: 804684
Columns: 48
Features: ['channelGrouping' 'date' 'fullVisitorId' 'sessionId'
'socialEngagementType' 'visitId' 'visitNumber' 'visitStartTime' 'browser'
'browserSize' 'browserVersion' 'deviceCategory' 'flashVersion' 'isMobile'
'language' 'mobileDeviceBranding' 'mobileDeviceInfo'
'mobileDeviceMarketingName' 'mobileDeviceModel' 'mobileInputSelector'
'operatingSystem' 'operatingSystemVersion' 'screenColors'
'screenResolution' 'city' 'cityId' 'continent' 'country' 'latitude'
'longitude' 'metro' 'networkDomain' 'networkLocation' 'region'
'subContinent' 'bounces' 'hits' 'newVisits' 'pageviews' 'visits'
'adContent' 'adwordsClickInfo' 'campaign' 'isTrueDirect' 'keyword'
'medium' 'referralPath' 'source']
###Markdown
Feature engineering
###Code
train = add_time_features(train)
test = add_time_features(test)
# Convert target feature to 'float' type.
train["transactionRevenue"] = train["transactionRevenue"].astype('float')
train['hits'] = train['hits'].astype(float)
test['hits'] = test['hits'].astype(float)
train['pageviews'] = train['pageviews'].astype(float)
test['pageviews'] = test['pageviews'].astype(float)
###Output
_____no_output_____
###Markdown
Agregated features
###Code
# Train
gp_fullVisitorId_train = train.groupby(['fullVisitorId']).agg('sum')
gp_fullVisitorId_train['fullVisitorId'] = gp_fullVisitorId_train.index
gp_fullVisitorId_train['mean_hits_per_day'] = gp_fullVisitorId_train.groupby(['day'])['hits'].transform('mean')
gp_fullVisitorId_train['mean_pageviews_per_day'] = gp_fullVisitorId_train.groupby(['day'])['pageviews'].transform('mean')
gp_fullVisitorId_train['sum_hits_per_day'] = gp_fullVisitorId_train.groupby(['day'])['hits'].transform('sum')
gp_fullVisitorId_train['sum_pageviews_per_day'] = gp_fullVisitorId_train.groupby(['day'])['pageviews'].transform('sum')
gp_fullVisitorId_train = gp_fullVisitorId_train[['fullVisitorId', 'mean_hits_per_day', 'mean_pageviews_per_day', 'sum_hits_per_day', 'sum_pageviews_per_day']]
train = train.join(gp_fullVisitorId_train, on='fullVisitorId', how='inner', rsuffix='_')
train.drop(['fullVisitorId_'], axis=1, inplace=True)
# Test
gp_fullVisitorId_test = test.groupby(['fullVisitorId']).agg('sum')
gp_fullVisitorId_test['fullVisitorId'] = gp_fullVisitorId_test.index
gp_fullVisitorId_test['mean_hits_per_day'] = gp_fullVisitorId_test.groupby(['day'])['hits'].transform('mean')
gp_fullVisitorId_test['mean_pageviews_per_day'] = gp_fullVisitorId_test.groupby(['day'])['pageviews'].transform('mean')
gp_fullVisitorId_test['sum_hits_per_day'] = gp_fullVisitorId_test.groupby(['day'])['hits'].transform('sum')
gp_fullVisitorId_test['sum_pageviews_per_day'] = gp_fullVisitorId_test.groupby(['day'])['pageviews'].transform('sum')
gp_fullVisitorId_test = gp_fullVisitorId_test[['fullVisitorId', 'mean_hits_per_day', 'mean_pageviews_per_day', 'sum_hits_per_day', 'sum_pageviews_per_day']]
test = test.join(gp_fullVisitorId_test, on='fullVisitorId', how='inner', rsuffix='_')
test.drop(['fullVisitorId_'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Exploratory data analysis Let's take a look at our target value through the time.
###Code
time_agg = train.groupby('date')['transactionRevenue'].agg(['count', 'sum'])
year_agg = train.groupby('year')['transactionRevenue'].agg(['sum'])
month_agg = train.groupby('month')['transactionRevenue'].agg(['sum'])
day_agg = train.groupby('day')['transactionRevenue'].agg(['sum'])
weekday_agg = train.groupby('weekday')['transactionRevenue'].agg(['count','sum'])
###Output
_____no_output_____
###Markdown
Here is sum of our tagert feature "transactionRevenue" through the time.
###Code
plt.figure(figsize=(20,7))
plt.ticklabel_format(axis='y', style='plain')
plt.ylabel('Sum transactionRevenue', fontsize=12)
plt.xlabel('Date', fontsize=12)
plt.scatter(time_agg.index.values, time_agg['sum'])
plt.show()
###Output
_____no_output_____
###Markdown
Seems we had more transactions on late 2016 and early 2017, date features seems to be a good addition to our model. And here count of our target feature "transactionRevenue".
###Code
plt.figure(figsize=(20,7))
plt.ticklabel_format(axis='y', style='plain')
plt.ylabel('Frequency', fontsize=12)
plt.xlabel('Date', fontsize=12)
plt.scatter(time_agg.index.values, time_agg['count'])
plt.show()
###Output
_____no_output_____
###Markdown
Again we had higher frequency at a similar time period. Let's take a look at other time features.
###Code
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(20,7))
ax1.scatter(year_agg.index.values, year_agg['sum'])
ax1.locator_params(nbins=2)
ax1.ticklabel_format(axis='y', style='plain')
ax1.set_xlabel('Year', fontsize=12)
ax2.scatter(month_agg.index.values, month_agg['sum'])
ax2.locator_params(nbins=12)
ax2.ticklabel_format(axis='y', style='plain')
ax2.set_xlabel('Month', fontsize=12)
ax3.scatter(day_agg.index.values, day_agg['sum'])
ax3.locator_params(nbins=10)
ax3.ticklabel_format(axis='y', style='plain')
ax3.set_xlabel('Day', fontsize=12)
ax4.scatter(weekday_agg.index.values, weekday_agg['sum'])
ax4.locator_params(nbins=7)
ax4.ticklabel_format(axis='y', style='plain')
ax4.set_xlabel('Weekday', fontsize=12)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
About the engineered time features* Year: It seem transactions had a large increase from 2016 to 2017* Month: Lager transaction on december seems ok, but about months but im not sure why high values on april and august (maybe because of easter (april) or Tax-free weekend, back-to-school season(august)?)* Day: Here it seems that not really important is going on, seems this features can be discarded.* Weekday: Something strange is going on here, seems that weekends have less transactions? The let's do some cleaning
###Code
# Drop stange 'dict' column
train = train.drop(['adwordsClickInfo'], axis=1)
test = test.drop(['adwordsClickInfo'], axis=1)
# Drop column that exists only in train data
train = train.drop(['campaignCode'], axis=1)
# Input missing transactionRevenue values
train["transactionRevenue"].fillna(0, inplace=True)
test_ids = test["fullVisitorId"].values
###Output
_____no_output_____
###Markdown
Drop unwanted columns
###Code
# Unwanted columns
unwanted_columns = ['fullVisitorId', 'sessionId', 'visitId', 'visitStartTime',
'browser', 'browserSize', 'browserVersion', 'flashVersion',
'mobileDeviceInfo', 'mobileDeviceMarketingName', 'mobileDeviceModel',
'mobileInputSelector', 'operatingSystemVersion', 'screenColors',
'metro','networkDomain', 'networkLocation', 'adContent', 'campaign',
'isTrueDirect', 'keyword', 'referralPath', 'source', 'operatingSystem', 'day']
train = train.drop(unwanted_columns, axis=1)
test = test.drop(unwanted_columns, axis=1)
# Constant columns
constant_columns = [c for c in train.columns if train[c].nunique()<=1]
print('Columns with constant values: ', constant_columns)
train = train.drop(constant_columns, axis=1)
test = test.drop(constant_columns, axis=1)
# Columns with more than 50% null data
high_null_columns = [c for c in train.columns if train[c].count()<=len(train) * 0.5]
print('Columns more than 50% null values: ', high_null_columns)
train = train.drop(high_null_columns, axis=1)
test = test.drop(high_null_columns, axis=1)
###Output
Columns with constant values: ['socialEngagementType', 'language', 'mobileDeviceBranding', 'screenResolution', 'cityId', 'latitude', 'longitude', 'bounces', 'newVisits', 'visits']
Columns more than 50% null values: []
###Markdown
This is our new data with some cleaning and engineering.
###Code
print('TRAIN SET')
print('Rows: %s' % train.shape[0])
print('Columns: %s' % train.shape[1])
print('Features: %s' % train.columns.values)
print()
print('TEST SET')
print('Rows: %s' % test.shape[0])
print('Columns: %s' % test.shape[1])
print('Features: %s' % test.columns.values)
train.head()
###Output
_____no_output_____
###Markdown
One-hot encode categorical data
###Code
categorical_features = ['isMobile', 'month', 'weekday']
train = pd.get_dummies(train, columns=categorical_features)
test = pd.get_dummies(test, columns=categorical_features)
# align both data sets (by outer join), to make they have the same amount of features,
# this is required because of the mismatched categorical values in train and test sets
train, test = train.align(test, join='outer', axis=1)
# replace the nan values added by align for 0
train.replace(to_replace=np.nan, value=0, inplace=True)
test.replace(to_replace=np.nan, value=0, inplace=True)
###Output
_____no_output_____
###Markdown
Split data in train and validation by date* This time based split will result in approximated 85% train and 15% validation.
###Code
X_train = train[train['date']<=datetime.date(2017, 5, 31)]
X_val = train[train['date']>datetime.date(2017, 5, 31)]
# Get labels
Y_train = X_train['transactionRevenue'].values
Y_val = X_val['transactionRevenue'].values
X_train = X_train.drop(['transactionRevenue'], axis=1)
X_val = X_val.drop(['transactionRevenue'], axis=1)
test = test.drop(['transactionRevenue'], axis=1)
# Log transform the labels
Y_train = np.log1p(Y_train)
Y_val = np.log1p(Y_val)
reduce_features = ['city', 'medium', 'channelGrouping', 'region',
'subContinent', 'country', 'continent', 'deviceCategory',
'year', 'date']
X_train = X_train.drop(reduce_features, axis=1)
X_val = X_val.drop(reduce_features, axis=1)
test = test.drop(reduce_features, axis=1)
X_train.head()
###Output
_____no_output_____
###Markdown
Normalize the data
###Code
normalized_features = ['visitNumber', 'hits', 'pageviews',
'mean_hits_per_day', 'mean_pageviews_per_day',
'sum_hits_per_day', 'sum_pageviews_per_day']
# Normalize using Min-Max scaling
scaler = preprocessing.MinMaxScaler()
X_train[normalized_features] = scaler.fit_transform(X_train[normalized_features])
X_val[normalized_features] = scaler.transform(X_val[normalized_features])
test[normalized_features] = scaler.transform(test[normalized_features])
X_train.head()
###Output
_____no_output_____
###Markdown
Model* Now let's try some deep learning to model our data.
###Code
BATCH_SIZE = 128
EPOCHS = 50
LEARNING_RATE = 0.0003
model = Sequential()
model.add(Dense(256, kernel_initializer='glorot_normal', activation='relu', input_dim=X_train.shape[1]))
model.add(Dense(128, kernel_initializer='glorot_normal', activation='relu'))
model.add(Dense(1))
adam = optimizers.adam(lr=LEARNING_RATE)
model.compile(loss='mse', optimizer=adam)
print('Dataset size: %s' % X_train.shape[0])
print('Epochs: %s' % EPOCHS)
print('Learning rate: %s' % LEARNING_RATE)
print('Batch size: %s' % BATCH_SIZE)
print('Input dimension: %s' % X_train.shape[1])
print('Features used: %s' % X_train.columns.values)
model.summary()
history = model.fit(x=X_train.values, y=Y_train, batch_size=BATCH_SIZE, epochs=EPOCHS,
verbose=1, validation_data=(X_val.values, Y_val))
###Output
Train on 765707 samples, validate on 137946 samples
Epoch 1/50
765707/765707 [==============================] - 25s 32us/step - loss: 3.2894 - val_loss: 3.7535
Epoch 2/50
765707/765707 [==============================] - 25s 32us/step - loss: 3.1254 - val_loss: 3.7109
Epoch 3/50
765707/765707 [==============================] - 24s 32us/step - loss: 3.0905 - val_loss: 3.7193
Epoch 4/50
765707/765707 [==============================] - 24s 32us/step - loss: 3.0741 - val_loss: 3.6586
Epoch 5/50
765707/765707 [==============================] - 24s 32us/step - loss: 3.0607 - val_loss: 3.6683
Epoch 6/50
765707/765707 [==============================] - 24s 32us/step - loss: 3.0525 - val_loss: 3.6947
Epoch 7/50
765707/765707 [==============================] - 25s 32us/step - loss: 3.0446 - val_loss: 3.6056
Epoch 8/50
765707/765707 [==============================] - 24s 32us/step - loss: 3.0356 - val_loss: 3.7445
Epoch 9/50
765707/765707 [==============================] - 25s 32us/step - loss: 3.0281 - val_loss: 3.6146
Epoch 10/50
765707/765707 [==============================] - 24s 32us/step - loss: 3.0206 - val_loss: 3.6358
Epoch 11/50
765707/765707 [==============================] - 24s 32us/step - loss: 3.0130 - val_loss: 3.6084
Epoch 12/50
765707/765707 [==============================] - 25s 32us/step - loss: 3.0036 - val_loss: 3.6542
Epoch 13/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9967 - val_loss: 3.6514
Epoch 14/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9890 - val_loss: 3.6300
Epoch 15/50
765707/765707 [==============================] - 25s 32us/step - loss: 2.9810 - val_loss: 3.6023
Epoch 16/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9718 - val_loss: 3.5519
Epoch 17/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9631 - val_loss: 3.5596
Epoch 18/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9552 - val_loss: 3.5900
Epoch 19/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9467 - val_loss: 3.6084
Epoch 20/50
765707/765707 [==============================] - 25s 32us/step - loss: 2.9371 - val_loss: 3.5770
Epoch 21/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9313 - val_loss: 3.6111
Epoch 22/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9197 - val_loss: 3.6365
Epoch 23/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.9152 - val_loss: 3.6273
Epoch 24/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.9044 - val_loss: 3.7386
Epoch 25/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.9023 - val_loss: 3.7204
Epoch 26/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8915 - val_loss: 3.7558
Epoch 27/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8872 - val_loss: 3.7266
Epoch 28/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8844 - val_loss: 3.6717
Epoch 29/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8782 - val_loss: 3.7827
Epoch 30/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8748 - val_loss: 3.7887
Epoch 31/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8672 - val_loss: 3.7872
Epoch 32/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8648 - val_loss: 3.8931
Epoch 33/50
765707/765707 [==============================] - 25s 32us/step - loss: 2.8589 - val_loss: 3.8026
Epoch 34/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8559 - val_loss: 3.8983
Epoch 35/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8536 - val_loss: 3.8749
Epoch 36/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8485 - val_loss: 3.8871
Epoch 37/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8463 - val_loss: 3.8935
Epoch 38/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8441 - val_loss: 3.9446
Epoch 39/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8436 - val_loss: 3.9365
Epoch 40/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8379 - val_loss: 3.9330
Epoch 41/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8388 - val_loss: 3.9937
Epoch 42/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8314 - val_loss: 4.0120
Epoch 43/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8338 - val_loss: 4.0615
Epoch 44/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8315 - val_loss: 4.0947
Epoch 45/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8277 - val_loss: 4.2245
Epoch 46/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8241 - val_loss: 4.0790
Epoch 47/50
765707/765707 [==============================] - 24s 32us/step - loss: 2.8186 - val_loss: 4.2750
Epoch 48/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8210 - val_loss: 4.1487
Epoch 49/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8170 - val_loss: 4.1804
Epoch 50/50
765707/765707 [==============================] - 24s 31us/step - loss: 2.8171 - val_loss: 4.3558
###Markdown
Model metrics and plot
###Code
val_predictions = model.predict(X_val)
mse = mean_squared_error(val_predictions, Y_val)
rmse = np.sqrt(mean_squared_error(val_predictions, Y_val))
print('Model validation metrics')
print('MSE: %.2f' % mse)
print('RMSE: %.2f' % rmse)
plot_metrics(history.history['loss'], history.history['val_loss'])
predictions = model.predict(test)
submission = pd.DataFrame({"fullVisitorId":test_ids})
predictions[predictions<0] = 0
submission["PredictedLogRevenue"] = predictions
submission = submission.groupby("fullVisitorId")["PredictedLogRevenue"].sum().reset_index()
submission.columns = ["fullVisitorId", "PredictedLogRevenue"]
submission["PredictedLogRevenue"] = submission["PredictedLogRevenue"]
submission.to_csv("submission.csv", index=False)
submission.head(10)
###Output
_____no_output_____ |
pcg_notebooks/simulation/model_generator.ipynb | ###Markdown
Model generator
###Code
from pcg_gazebo.generators.creators import create_models_from_config
from pcg_gazebo.task_manager import Server
from pcg_gazebo.generators import WorldGenerator
import random
# Start an empty world Gazebo simulation
server = Server()
server.create_simulation('default')
simulation = server.get_simulation('default')
simulation.create_gazebo_empty_world_task()
print(simulation.get_task_list())
print('Is Gazebo running: {}'.format(
simulation.is_task_running('gazebo')))
simulation.run_all_tasks()
# Create a Gazebo proxy
gazebo_proxy = simulation.get_gazebo_proxy()
# Use the generator to spawn the model to the Gazebo instance
# running at the moment
generator = WorldGenerator(gazebo_proxy=gazebo_proxy)
def create_and_spawn(config):
models = create_models_from_config(config)
for model in models:
generator.spawn_model(
model=model,
robot_namespace=model.name,
pos=[
20 * random.random() - 10,
20 * random.random() - 10,
2 * random.random()])
###Output
_____no_output_____
###Markdown
Box-shaped models Creating box models
###Code
config = [
dict(
type='box',
args=dict(
size=[2, 2, 2],
mass=10,
name='box',
pose=[0, 0, 1, 0, 0, 0],
color='random'
)
)
]
create_and_spawn(config)
###Output
_____no_output_____
###Markdown
Creating multiple boxes models using fixed arguments
###Code
config = [
dict(
type='box_factory',
args=dict(
size=[
[0.1, 0.4, 0.5],
[1, 2, 3]
],
name='box_static_var_size',
use_permutation=True,
color='xkcd'
)
),
dict(
type='box_factory',
args=dict(
size=[
[0.1, 0.4, 0.5],
[1, 2, 3]
],
mass=12,
name='box_dynamic_var_size',
use_permutation=False,
color='xkcd'
)
),
dict(
type='box_factory',
args=dict(
size=[
[0.2, 0.4, 0.15],
[1.2, 0.25, 0.7]
],
mass=[5, 2],
name='box_dynamic_permutate_size_mass',
use_permutation=True,
color='xkcd'
)
)
]
create_and_spawn(config)
###Output
_____no_output_____
###Markdown
Creating multiple boxes with lambda arguments
###Code
config = [
dict(
type='box_factory',
args=dict(
size="__import__('numpy').random.random((2, 3))",
use_permutation=True,
name='box_static_lambdas',
color='random'
)
),
dict(
type='box_factory',
args=dict(
size="__import__('numpy').random.random((4, 3))",
mass="__import__('numpy').arange(1, 10, 4)",
use_permutation=True,
name='box_dynamic_lambdas',
color='random'
)
)
]
create_and_spawn(config)
###Output
_____no_output_____
###Markdown
 Cylinder-shaped models
###Code
config = [
dict(
type='cylinder',
args=dict(
radius=3,
length=2,
mass=10,
name='cylinder',
pose=[0, 0, 1, 0, 0, 0]
)
),
dict(
type='cylinder_factory',
args=dict(
length=[0.3, 0.5],
radius=[0.2, 0.4],
mass=[5, 2],
name='cylinder_dynamic_permutate_radius_length_mass',
use_permutation=True,
color='xkcd'
)
),
dict(
type='cylinder_factory',
args=dict(
length="__import__('numpy').linspace(0.1, 10, 2)",
radius="__import__('numpy').random.random(2)",
mass="__import__('numpy').arange(1, 4, 1)",
use_permutation=True,
name='cylinder_dynamic_lambdas',
color='xkcd'
)
)
]
create_and_spawn(config)
###Output
_____no_output_____
###Markdown
 Sphere-shaped models
###Code
config = [
dict(
type='sphere',
args=dict(
radius=3,
mass=10,
name='sphere',
pose=[0, 0, 1.5, 0, 0, 0]
)
),
dict(
type='sphere_factory',
args=dict(
radius=[0.3, 0.9],
mass=[5, 2],
name='sphere_dynamic_permutate_radius_mass',
use_permutation=True,
color='xkcd'
)
),
dict(
type='sphere_factory',
args=dict(
radius="__import__('numpy').random.random(2) * 3",
mass="__import__('numpy').arange(1, 4, 1)",
use_permutation=True,
name='sphere_dynamic_lambdas',
color='xkcd'
)
)
]
create_and_spawn(config)
###Output
_____no_output_____
###Markdown
 Mesh models
###Code
mesh_filename = 'package://pcg_examples/meshes/monkey_offset.dae'
config = [
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=False,
name='monkey_static_no_approx_collision',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=False,
mass=10,
name='monkey_dynamic_no_approx_collision',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
approximated_collision_model='box',
name='monkey_static_with_approx_collision_box',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
approximated_collision_model='box',
mass=20,
name='monkey_dynamic_with_approx_collision_box',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
approximated_collision_model='cylinder',
name='monkey_static_with_approx_collision_cylinder',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
mass=15,
approximated_collision_model='cylinder',
name='monkey_dynamic_with_approx_collision_cylinder',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
approximated_collision_model='sphere',
name='monkey_static_with_approx_collision_sphere',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
mass=3,
approximated_collision_model='sphere',
name='monkey_dynamic_with_approx_collision_sphere',
color='xkcd'
)
),
dict(
type='mesh',
args=dict(
visual_mesh_filename=mesh_filename,
visual_mesh_scale=[1, 1, 1],
use_approximated_collision=True,
mass=3,
approximated_collision_model='sphere',
name='monkey_dynamic_defined_inertia',
color='xkcd',
use_approximated_inertia=False,
inertia=dict(
ixx=0.1,
iyy=0.1,
izz=0.1
)
)
)
]
create_and_spawn(config)
###Output
_____no_output_____ |
example_for_Pawel.ipynb | ###Markdown
To try it, just click on the following badge:[](https://mybinder.org/v2/gh/VincentRouvreau/mybinder_for_tutorial/HEAD?filepath=example_for_Pawel.ipynb)
###Code
#Here we will compute Betti numbers of
#0 0 0
#0 1 0
#0 0 0
import gudhi
import numpy as np
dims = np.array( [3,3] )
data = np.array( [0, 0, 0, 0, 1, 0, 0, 0, 0] )
cubical_complex = gudhi.CubicalComplex( dimensions = dims , top_dimensional_cells = data )
#If we do this:
cubical_complex.persistence()
b_nrs = cubical_complex.betti_numbers()
print( 'Betti numbers (something is wrong): ', b_nrs )
###Output
Betti numbers (something is wrong): [1, 0, 0]
|
demos/Record Disambiguation - Objects.ipynb | ###Markdown
Record DisambiguationIn this notebook we perform entity disambiguation on records, specifically person records.
###Code
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import display, Markdown
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, balanced_accuracy_score
from tqdm.auto import tqdm
import sys
sys.path.append("..")
from heritageconnector.disambiguation.helpers import load_training_data, plot_performance_curves
from heritageconnector.disambiguation.pipelines import Disambiguator
from heritageconnector.disambiguation.postprocessing import filter_cased_wikidata_labels, remove_wikidata_items_with_no_claims
from heritageconnector.utils.wikidata import get_sparql_results, url_to_qid
from heritageconnector.utils.generic import paginate_list
from heritageconnector.config import config
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
1. Load dataThis data has already been generated using `Disambiguator.save_training_data_to_folder` and `Disambiguator.save_test_data_to_folder`.
###Code
train_dir = "/Volumes/Kalyan_SSD/SMG/disambiguation/objects_131120/train/"
# test_dir = "/Volumes/Kalyan_SSD/SMG/disambiguation/objects_131120/test_computing_space/"
test_dir = "/Volumes/Kalyan_SSD/SMG/disambiguation/objects_131120/test_photographic_aeronautics/"
# test_dir = "/Volumes/Kalyan_SSD/SMG/disambiguation/objects_131120/test_art_locomotives_and_rolling_stock/"
X, y, pairs, pids = load_training_data(train_dir)
X_new, pairs_new, pids_new = load_training_data(test_dir)
pairs['y'] = y
len(pairs[pairs['y'] == True])
pids, pids_new
X.shape, X_new.shape
X.sum(axis=0), X_new.sum(axis=0)
pairs.head()
###Output
_____no_output_____
###Markdown
2. Train classifierThe disambiguator wraps `sklearn.tree.DecisionTreeClassifier` and takes its parameters as inputs. 2a. Test classifier performanceWe'll perform a train/test split on the labelled data to quickly test the classifier's performance using its `score` method. The `score` method here returns [balanced accuracy](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html): accuracy weighted so that each class is considered evenly.
###Code
X_train, X_test, y_train, y_test = train_test_split(pd.DataFrame(X), y, random_state=42, test_size=0.2, stratify=y)
len(X_train), len(X_test)
train_idx = X_train.index.tolist()
test_idx = X_test.index.tolist()
pairs_train = pairs.loc[train_idx, :]
pairs_test = pairs.loc[test_idx, :]
len(X_train), len(pairs_train), len(X_test), len(pairs_test)
clf = Disambiguator('OBJECT', max_depth=None).fit(X_train, y_train)
for threshold in [0.5, 0.6, 0.7, 0.8, 0.9, 0.95]:
print(str(threshold) + " --- \n" + clf.score(X_test, y_test, threshold))
pairs_predicted = clf.get_predictions_table(X_test, pairs_test, threshold=0.8)
pairs_predicted_filtered = filter_cased_wikidata_labels(pairs_predicted)
pairs_predicted_filtered = remove_wikidata_items_with_no_claims(pairs_predicted_filtered)
y_filtered = pairs_predicted_filtered['y'].tolist()
y_pred_filtered = pairs_predicted_filtered['y_pred'].tolist()
precision_score(y_filtered, y_pred_filtered), recall_score(y_filtered, y_pred_filtered), balanced_accuracy_score(y_filtered, y_pred_filtered)
###Output
_____no_output_____
###Markdown
2b. Analyse classifier results
###Code
y_pred = clf.predict(X_test, threshold=0.8)
TPs = np.where((y_pred==True) & (y_test==True))
FPs = np.where((y_pred==True) & (y_test==False))
FNs = np.where((y_pred==False) & (y_test==True))
TNs = np.where((y_pred==False) & (y_test==False))
print(len(TPs[0]), len(FPs[0]), len(TNs[0]), len(FNs[0]))
pairs_test.iloc[FPs]
X_test.iloc[FPs]
###Output
_____no_output_____
###Markdown
2c. Use classifier to predict new Wikidata links
###Code
clf = Disambiguator('OBJECT').fit(X, y)
y_pred = clf.predict(X_new, threshold=0.9)
y_pred_proba = clf.predict_proba(X_new)
print(f"{np.unique(y_pred, return_counts=True)[1][1]} potential new links found")
pairs_new = clf.get_predictions_table(X_new, pairs_new, threshold=0.9)
display(Markdown("The graph below shows the distribution of the number of predicted matches per SMG ID. Around 75% have a unique match, and most of the remainder have two matches."))
sns.distplot(pairs_new.loc[pairs_new["y_pred"] == True, "internal_id"].value_counts(), kde=False, norm_hist=True).set_ylabel('proportion')
plt.gca().set_title('Count of Number of SMG IDs per True Prediction');
pairs_new.sort_values('y_pred_proba', ascending=False).head(10)
###Output
_____no_output_____
###Markdown
2d. Filter matches
###Code
pairs_true = pairs_new[pairs_new['y_pred'] == True]
print(len(pairs_true))
pairs_true_filtered = filter_cased_wikidata_labels(pairs_true)
pairs_true_filtered = remove_wikidata_items_with_no_claims(pairs_true_filtered)
pairs_true_filtered = filter_max_wikidata_links(pairs_true_filtered, 2)
print("-- After Filtering --")
print(f"No. new links: {len(pairs_true_filtered)}")
print(f"No. SMG items with new links: {len(pairs_true_filtered['internal_id'].unique())}")
###Output
702
###Markdown
3. Explain classifierWe can see that the classifier prioritises P569/P570 (birth and death dates), P21 (gender), label similarity, and occupation.It's interesting to note that P31 (instance of), which tells the classifier whether the Wikidata record is a human, is not used. This is likely because P569/P570/P106/P21 are qualities which only humans can have.P31 is likely to be much more prevalent when classifying objects, and distinguishing between e.g. paintings and posters.
###Code
clf.print_tree(feature_names=pids)
###Output
|--- label <= 0.97
| |--- label <= 0.80
| | |--- class: False
| |--- label > 0.80
| | |--- label <= 0.81
| | | |--- class: True
| | |--- label > 0.81
| | | |--- label <= 0.81
| | | | |--- class: False
| | | |--- label > 0.81
| | | | |--- class: False
|--- label > 0.97
| |--- P31 <= 0.13
| | |--- P31 <= 0.00
| | | |--- class: True
| | |--- P31 > 0.00
| | | |--- class: True
| |--- P31 > 0.13
| | |--- P31 <= 0.75
| | | |--- class: True
| | |--- P31 > 0.75
| | | |--- class: True
###Markdown
4. Export model and final predictions
###Code
# clf.save_classifier_to_disk("/Volumes/Kalyan_SSD/SMG/disambiguation/objects_131120/clf.pkl")
pairs_true_filtered.sort_values('y_pred_proba', ascending=False).to_csv(
"/Volumes/Kalyan_SSD/SMG/disambiguation/objects_131120/computing_space_preds_positive.csv", index=False)
###Output
_____no_output_____
###Markdown
You can also use the below cell to export a sample of positive and negative samples to an Excel document for manual review
###Code
pairs_pos_sample = pairs_new[pairs_new['y_pred'] == True].sample(30, random_state=42)
pairs_neg_sample = pairs_new[pairs_new['y_pred'] == False].sample(30, random_state=42)
pairs_sample = pd.concat([pairs_pos_sample, pairs_neg_sample], ignore_index=False)
pairs_sample = pairs_sample.copy()
pairs_sample['wikidata_id'] = "https://www.wikidata.org/entity/" + pairs_sample['wikidata_id']
pairs_sample.to_excel("objects_classifier_sample_for_review.xlsx")
###Output
_____no_output_____ |
notebooks/Credit Risk Analysis.ipynb | ###Markdown
Credit Risk Analysis (1) Data Cleaning
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Remember to inclue index_col=0 or it will generate "unnamed 0" column
###Code
data = pd.read_csv("FiveYearData.csv", index_col=0)
###Output
D:\Work\Anaconda\lib\site-packages\IPython\core\interactiveshell.py:2785: DtypeWarning: Columns (30,33,34,50,51) have mixed types.Specify dtype option on import or set low_memory=False.
interactivity=interactivity, compiler=compiler, result=result)
D:\Work\Anaconda\lib\site-packages\numpy\lib\arraysetops.py:568: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
mask |= (ar1 == a)
###Markdown
select the columns from Original Data File, which are information used for predictionalso select the final column:"Default" as target labels
###Code
data = pd.concat([data[data.columns[:26]],data[data.columns[-1]]],1)
data.head()
###Output
_____no_output_____
###Markdown
We can see there are NaN values. So first, we want to take a look at these missing values.
###Code
#check the missing fraction of each variable and sort them with missing fractions
missing_fractions = data.isnull().mean().sort_values(ascending=False)
missing_fractions[:10]
###Output
_____no_output_____
###Markdown
We can see only four variables have missing values. flag_sc has a very large fraction of missing values. So we drop this variable.
###Code
#drop the variable with missing values over 50%
drop_list = sorted(list(missing_fractions[missing_fractions > 0.5].index))
print(drop_list)
data.drop(labels=drop_list, axis=1, inplace=True)
#check again the missing_fractions
missing_fractions = data.isnull().mean().sort_values(ascending=False)
missing_fractions[:10]
#check the three variables with some missing values one by one
data.cd_msa.sample(10)
#fill missing values with the median of this column
data.cd_msa.fillna(data.cd_msa.median(),inplace=True)
data.ppmt_pnlty.describe()
#there's only one unique value for ppmt_pnlty, we can fill it with this value 'N'
#but actually, this column should be dropped since it has only one unique value
# data.ppmt_pnlty.fillna('N',inplace = True)
data.drop(['ppmt_pnlty'],axis=1, inplace=True)
data.zipcode.describe()
#similarly, fill missing zipcodes with median
data.zipcode.fillna(data.zipcode.median(),inplace=True)
###Output
_____no_output_____
###Markdown
Next, we want to check if there are other variables with only one unique value.
###Code
for col in data.columns:
unique_list = data[col].unique().tolist()
if len(unique_list) < 2:
print(col)
###Output
_____no_output_____
###Markdown
No other unique variables, good! Next, do some data transformation
###Code
import numpy as np
#find the columns with numerical values.
numeric_cols = data.select_dtypes(include=[np.number]).columns
numeric_cols
from scipy.stats import zscore
#apply z-score transformation to numerical columns
data[numeric_cols] = data[numeric_cols].apply(zscore)
#find non-numerical columns
non_numeric_cols = []
for col in data.columns:
if not col in numeric_cols:
non_numeric_cols.append(col)
data[non_numeric_cols]
###Output
_____no_output_____
###Markdown
We notice that prod_type looks unique, but we didn't find it in previous unique check. Why?
###Code
data.prod_type.describe()
data.prod_type.unique()
###Output
_____no_output_____
###Markdown
That's it! There's one sample annotated as '_'. So let's drop this column.
###Code
data.drop(['prod_type'],axis=1, inplace=True)
#define again the non-numerical columns since prod_type is dropped
non_numeric_cols = []
for col in data.columns:
if not col in numeric_cols:
non_numeric_cols.append(col)
data[non_numeric_cols]
###Output
_____no_output_____
###Markdown
Next, we can create dummy variables for these categorical variables. But pd.dummy_variables() failed with Memory Error, so I created a manual function, which maps each unique value to an integer starting from 0
###Code
def replace_cate(data, non_numerical_cols):
'''
This function is for creating dummy variables for non_numerical columns
'''
list_cols = data.columns
#exclude id_loan
for col in non_numeric_cols[1:]:
col_dict = {}
count = 0
unique_list = data[col].unique().tolist()
for val in unique_list:
col_dict[val] = count
count += 1
getattr(data,col).replace(col_dict,inplace=True)
replace_cate(data, non_numeric_cols)
data[non_numeric_cols].sample(5)
###Output
_____no_output_____
###Markdown
We can see these categorical variables are mapped to integers, but some variables, like st, have too many discrete values.so we may need to create bins for them. Before that,we drop the id_loan variable since it's a unique label for each sample.
###Code
data = data[data.columns[1:]]
###Output
_____no_output_____
###Markdown
(2) Feature Selection with WoE
###Code
import scorecardpy as sc
###Output
_____no_output_____
###Markdown
()Split train and test set with a ratio 7:3
###Code
train, test = sc.split_df(data,
y = 'Default',
ratio = 0.7, seed = 251120887).values()
train.to_csv('train_noWoE.csv', index=False)
test.to_csv('test_noWoE.csv', index=False)
###Output
_____no_output_____
###Markdown
Create woe bins with scorecardpy. The parameters are set as default according to the sample code.
###Code
bins = sc.woebin(train, y = 'Default',
min_perc_fine_bin=0.05, # How many bins to cut initially into
min_perc_coarse_bin=0.05, # Minimum percentage per final bin
stop_limit=0.1, # Minimum information value
max_num_bin=8, # Maximum number of bins
method='tree')
bins
train_woe = sc.woebin_ply(train, bins) # Calculate WoE dataset (train)
test_woe = sc.woebin_ply(test, bins) # Calculate WoE dataset (test)
train_woe.head()
bins.get('dt_first_pi')
###Output
_____no_output_____
###Markdown
Compute the Information Values for each variable.
###Code
sc.iv(train_woe, 'Default')
###Output
_____no_output_____
###Markdown
We can observe that some variables have IV<0.02,which must be dropped.and some between 0.02 and 0.1, which are suggested to drop. I set a medium threshold 0.05, and drop those IV < 0.05
###Code
drop_list = ['cnt_units_woe','prop_type_woe','channel_woe','occpy_sts_woe','loan_purpose_woe',
'flag_fthb_woe','orig_upb_woe','cd_msa_woe','seller_name_woe','orig_loan_term_woe','zipcode_woe','dt_matr_woe']
train_woe.drop(labels=drop_list, axis=1, inplace=True)
train_woe.columns
test_woe.drop(labels=drop_list, axis=1, inplace=True)
#store these woe data
train_woe.to_csv("train_woe.csv", index = False)
test_woe.to_csv("test_woe.csv", index = False)
###Output
_____no_output_____
###Markdown
Next, we want to compute correlations between variables to get some insights of unknown correlations.
###Code
corr = train_woe.corr()
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
#It's necessary to keep numpy<0.18.4, or seaborn.diverging_palette will fail.
np.__version__
sns.set_context(context='notebook')
fig, ax = plt.subplots(figsize=(10,10))
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.tril_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, cmap=cmap,linewidths=1, vmin=-1, vmax=1, square=True, cbar=True, annot=True,fmt="0.2f",center=0, ax=ax, mask=mask)
###Output
_____no_output_____
###Markdown
We find that (ltv_woe, cltv_woe, mi_pct_woe) have high correlations. We should pay attention to these variables.But for the moment, we don't drop any of them. (3) Scorecard model with Logistic Regression
###Code
#read data files if necessary
train_woe = pd.read_csv('train_woe.csv')
test_woe = pd.read_csv('test_woe.csv')
train_noWoE = pd.read_csv('train_noWoE.csv')
test_noWoE = pd.read_csv('test_noWoE.csv')
from sklearn.linear_model import LogisticRegressionCV
LRClassifier = LogisticRegressionCV(penalty='l1', # Type of penalization l1 = lasso, l2 = ridge
Cs = 10, # How many parameters to try. Can also be a vector with parameters to try.
tol=0.0001, # Tolerance for parameters
cv = 3, # How many CV folds to try. 3 or 5 should be enough.
fit_intercept=True, # Use constant?
class_weight='balanced', # Weights, see below
random_state=251120887, # Random seed
max_iter=100, # Maximum iterations
verbose=0, # Show process. 1 is yes.
solver = 'saga', # How to optimize.
n_jobs = 2, # Processes to use. Set to number of physical cores.
refit = True # If to retrain with the best parameter and all data after finishing.
)
LRClassifier.fit(X = train_woe.iloc[:, 1:], # All rows and from the second var to end
y = train_woe['Default'] # The target
)
###Output
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
D:\Work\Anaconda\lib\site-packages\sklearn\linear_model\sag.py:326: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
###Markdown
The LR model cannot converge. But it costs too much time to run this, so we do not add more trials like increasing the max iteration number. Let's check the parameters
###Code
coef_df = pd.concat([pd.DataFrame({'column': train_woe.columns[1:]}),
pd.DataFrame(np.transpose(LRClassifier.coef_))],
axis = 1
)
coef_df
LRClassifier.intercept_
###Output
_____no_output_____
###Markdown
Apply trained model to test data
###Code
test_woe['Default'].sum()
pred_class_test = LRClassifier.predict(test_woe.iloc[:, 1:])
probs_test = LRClassifier.predict_proba(test_woe.iloc[:, 1:])
pred_class_test.sum()
###Output
_____no_output_____
###Markdown
It looks the model predict many more good loans as default. Check the classification results.
###Code
from sklearn.metrics import roc_auc_score, confusion_matrix, roc_curve
#Calculate confusion matrix
confusion_matrix_lr = confusion_matrix(y_true = test_woe['Default'],
y_pred = pred_class_test)
confusion_matrix_lr
# # Turn matrix to percentages
confusion_matrix_lr = confusion_matrix_lr.astype('float') / confusion_matrix_lr.sum(axis=1)[:, np.newaxis]
# Turn to dataframe
df_cm = pd.DataFrame(
confusion_matrix_lr, index=['good', 'bad'], columns=['good', 'bad'],
)
# Parameters of the image
figsize = (10,7)
fontsize=14
# Create image
fig = plt.figure(figsize=figsize)
heatmap = sns.heatmap(df_cm, annot=True, fmt='.2f')
# Make it nicer
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0,
ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45,
ha='right', fontsize=fontsize)
# Add labels
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Plot!
plt.show()
from sklearn.metrics import classification_report
print("LR",classification_report(test_woe['Default'], pred_class_test, target_names=None,digits=4))
###Output
LR precision recall f1-score support
0 0.9890 0.6563 0.7890 614209
1 0.0491 0.7085 0.0918 15369
avg / total 0.9661 0.6576 0.7720 629578
###Markdown
The precision, recall and F1-score looks not so bad though. Let's look at AUC.
###Code
# Calculate the ROC curve points
fpr, tpr, thresholds = roc_curve(test['Default'], probs_test[:,1])
# Save the AUC in a variable to display it. Round it first
auc = np.round(roc_auc_score(y_true = test['Default'],
y_score = probs_test[:,1]),
decimals = 3)
# Create and show the plot
plt.plot(fpr,tpr,label="LR, auc="+str(auc))
plt.legend(loc=4)
plt.show()
###Output
_____no_output_____
###Markdown
The AUC score is not very ideal.:( Create scorecard.
###Code
# Calculate scorecard
LR_sc = sc.scorecard(bins, LRClassifier,
train_woe.columns[1:], # The column names in the trained LR
points0=750, # Base points
odds0=0.01, # Base odds
pdo=50) # PDO
# Applying the credit score. Applies over the original data!
train_score = sc.scorecard_ply(train, LR_sc,
print_step=0)
test_score = sc.scorecard_ply(test, LR_sc,
print_step=0)
train_score.describe()
###Output
_____no_output_____
###Markdown
Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
#Define the classifier
RFClassifier = RandomForestClassifier(n_estimators=1000, # Number of trees to train
criterion='gini', # How to train the trees. Also supports entropy.
max_depth=None, # Max depth of the trees. Not necessary to change.
min_samples_split=2, # Minimum samples to create a split.
min_samples_leaf=0.001, # Minimum samples in a leaf. Accepts fractions for %. This is 0.1% of sample.
min_weight_fraction_leaf=0.0, # Same as above, but uses the class weights.
max_features='auto', # Maximum number of features per split (not tree!) by default is sqrt(vars)
max_leaf_nodes=None, # Maximum number of nodes.
min_impurity_decrease=0.0001, # Minimum impurity decrease. This is 10^-3.
bootstrap=True, # If sample with repetition. For large samples (>100.000) set to false.
oob_score=True, # If report accuracy with non-selected cases.
n_jobs=-1, # Parallel processing. Set to -1 for all cores. Watch your RAM!!
random_state=251120887, # Seed
verbose=1, # If to give info during training. Set to 0 for silent training.
warm_start=False, # If train over previously trained tree.
class_weight='balanced'
)
# Train the RF.
RFClassifier.fit(train_noWoE.iloc[:,:-1], # X
train_noWoE['Default']) # y
rf_pred_class_test = RFClassifier.predict(test_noWoE.iloc[:, :-1])
rf_probs_test = RFClassifier.predict_proba(test_noWoE.iloc[:, :-1])
confusion_matrix_rf = confusion_matrix(y_true = test_noWoE['Default'],
y_pred = rf_pred_class_test)
# Turn matrix to percentages
confusion_matrix_rf = confusion_matrix_rf.astype('float') / confusion_matrix_rf.sum(axis=1)[:, np.newaxis]
confusion_matrix_rf
# Turn to dataframe
df_cm = pd.DataFrame(
confusion_matrix_rf, index=['good', 'bad'], columns=['good', 'bad'],
)
# Parameters of the image
figsize = (10,7)
fontsize=14
# Create image
fig = plt.figure(figsize=figsize)
heatmap = sns.heatmap(df_cm, annot=True, fmt='.2f')
# Make it nicer
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0,
ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45,
ha='right', fontsize=fontsize)
# Add labels
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Plot!
plt.show()
###Output
_____no_output_____
###Markdown
The confusion matrix looks much better than Logistic Regression.
###Code
from sklearn.metrics import classification_report
print("RF",classification_report(test_woe['Default'], rf_pred_class_test, target_names=None,digits=4))
###Output
RF precision recall f1-score support
0 0.9919 0.7472 0.8523 614209
1 0.0697 0.7573 0.1277 15369
avg / total 0.9694 0.7474 0.8346 629578
###Markdown
But the recall and F1-score is inferior to Logistic Regression.
###Code
# Calculate the ROC curve points
fpr, tpr, thresholds = roc_curve(test['Default'], rf_probs_test[:,1])
# Save the AUC in a variable to display it. Round it first
auc = np.round(roc_auc_score(y_true = test['Default'],
y_score = rf_probs_test[:,1]),
decimals = 3)
# Create and show the plot
plt.plot(fpr,tpr,label="RF, auc="+str(auc))
plt.legend(loc=4)
plt.show()data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xt8ldWd7/HPLzcSSEISQiAkgYCA3K/hYq23gpZ2LFalFeqtHVo7M7U9U6edOp1znI4zp51aW6vnUFuqtuqoeDlWqaXF8VatLZegCHI1QCAXQm7knuxkZ6/zRyITQyCbsJMne+f7fr3yYj97r+z9W0n4ZmXt9azHnHOIiEhkifK6ABERCT2Fu4hIBFK4i4hEIIW7iEgEUriLiEQghbuISARSuIuIRCCFu4hIBFK4i4hEoBivXjg9Pd3l5uZ69fIiImFpx44dlc650b218yzcc3Nzyc/P9+rlRUTCkpkdDaadpmVERCKQwl1EJAIp3EVEIpDCXUQkAincRUQiUK/hbmaPmFm5mb1/hsfNzB4wswIz22VmC0JfpoiInItgRu6/Blac5fFPAVM6P24DHjz/skRE5Hz0us7dOfemmeWepck1wGOu43p9W8wsxcwynXPHQ1SjiMiAc87RHnD4A4629gD+dofPH6CtPYDP305di7/j8faOdm2BAO3tjqpGH7HRUTS3tdPQ4scfcAQCjoCDgHM451g2fQxzc1L6tf5QnMSUBRR1OS7uvO+0cDez2+gY3TN+/PgQvLSIyH9zztHg81PT1EZtc1uX261UNrRSVN1ES1s79S1+WvztnKjzER8bRWV9K42tfobFRNHqD1DX4u+3Gs0gIzk+LMLderivx6tuO+fWA+sB8vLydGVuETkj5xxNre2U1/uobvRx8EQDrf4Atc1t7C+rIzoqiuZWP7XNbZxsaqO6sZX6ljba2s8eLVEGCbHRjEtJIDY6iviYaOaPT6GptZ1RI+JIToglNtpo8LUzOjGO+LhoYqOiiIk2osxoaw8wJjmeYTFRtLU7UkfEEhsdRXSUERvV8W9MtDEyIZaEuGjiY6KJMogywwzMeorM0AtFuBcDOV2Os4HSEDyviESQQMBR3+KnstFHeZ2PfcfrKKlpxoDS2mZa/Q6fv53a5jbKalsor/ed8bmShsXQ3NbO1DFJJMXHMCUjkdQRcSTHxzJqRByJ8TGkjYhjRFwMqSNiSY6PJW1EHMPjogcsXL0WinDfCNxuZhuAJUCt5ttFhgbnHHUtfkprmimv91Fe1xHKJTXNVNT7KKxsJCY6ipONrZTVtZzxedJGxNHc2s7kjERGJcaRnZpAdFQUIxNimJudQnriMMYkx5OeGEfK8DjiYrSKuze9hruZPQVcDqSbWTHwL0AsgHPu58Am4NNAAdAEfKm/ihWRgRUIOCobfVTUd3xsOVxNS1s7xSebOVbdyNGqJnz+wBk/PyctgaToKC6enE5G8jCcg4ykYUwaPYLs1OGMHRlP4jDP9i+MaMGsllnTy+MO+FrIKhKRAdXS1k5BeQOHKhrYXlhNWW0Lx2tb2F9WT3ug5/nraWOTyE4dzsWT0xmbHE/q8DjGpSSQmRJPUnwMqcPjiI3W6NpL+pUpEuFqm9s4XtvM4YpGmlvb+aAzyIuqm6hsaOVkU+tHQnxscjzTM5OYkpEIQE7acGZkJpM6Io7MkfHkpA4nKmpozFuHM4W7SJhzzlFe72NvaR0HT9Rzos7H8dpmjlY1cay6iQbfR5f1xUQZE9NHMGHUcOaPTyU9MY7JGYlkJMUzf3wK8bHRHvVEQknhLhIGWtraKattYVdJLXtKazla2USLv53CykbK6lpoafvovHd6YhyzskaSl5vaMV0yMp6MpHgykoeRlZKgAB8CFO4ig0hTq5/CyiYKKhrYW1p3avpkf1n9aW0zR8YzLyeF5dPHkJWawIRRw8lKGU5u+nCGxSi8hzqFu4gHapva2HO8lq2Hqymv93GoooFtR6o/0iYmyshNH0FOagJLJqaRlZrAwgmpTBubzAitMJFe6CdEpB/VNLXyXnEtR6saOXiing9ONFBQ3kBVY+tH2s3LSeG6+VkkJ8RywegRzB+fyuSMRE2fSJ8p3EVCpD3geK+4hj99UMme0lpO1PnYWVRz6vERcdFMGZPE5RdmMGn0CGZkJjNlTCJZKQlD5qxJGTgKd5E+amsPsP94PTuLTrLlSDV/PFDxkZUp40bG89XLJpE3IY1ZWcmMTY5XiMuAUbiLBMHnb2dH4UmOVDWyu7iW7YXVFFY1nVofnpE0jE9My2DppFHMGJfM7KyRRGstuHhI4S5yBk2tft48WMkL75bwxsHyU8sNE4fFkJebylUzxzJ1TCLzclLJHTVco3IZVBTuIp3aA479ZXW8uq+c7YXVbD1cTWt7gNThsaxamM3SSaOYNjaJC0YnKshl0FO4y5B2pLKRp7cX8fKeMkpqmk9tgjU8LpobFuVw5YwxLJ00SrsQSthRuMuQUlTdxKv7TrCntI78oyc5UtkIwJjkjjnzy6aOZsGEVKaOSfK4UpHzo3CXiBYIOHYcO8kr+07wX3tOcLgzzAGWTctg9aIcLr8wgwvHKswlsijcJeL4/O28caCC1/eX88q+ciobfJjBRZNGsXpxDrOzUlgyMU07G0pEU7hLRCitaeaNAxW8ebCCtw9VUt/iJ3FYDJddOJpl0zK4dOpo0hOHeV2myIBRuEtYavT5eXlvGa/vr2DbkepTl3BLTxzGJ2eO5eo5mXzsgnS9ESpDlsJdwsaJuhZe3nuCPx6o4M+HKmlqbSd1eCyXTh3N7KyRXDw5nQvHJGm6RQSFuwxyx2ubeS6/mM17y3i/pA6ArJQErp6TyWfnZbF00iiFuUgPFO4y6JTUNLNxZymbdh9nd0ktAAvGp/Ctq6ZyxbQMZmQm6yQikV4o3GVQaPT5eWFnCRu2FZ0K9BmZyXz7kxfy6dmZTEwf4XGFIuFF4S6ecc7xdkEVz79TzEu7j9PqDzB1TCLfWTGNZdMzdCKRyHlQuMuAK61p5untRbyws4SjVU0ArFk8ns/MzeSiSaM05SISAgp3GRDNre38Yc9xnthyjB3HTuIcLMpN5bZLJ3HNvCwSddk4kZDS/yjpV82t7Tz4x0P87PUC/AFHeuIwVi8az1cvnUSu5tFF+o3CXfpFIODYvKeM+145yMETDVxx4WhuvmgCl0/N0NJFkQGgcJeQqmlq5entRWzYXsSRykbGJsez/uaFXDVzrNeliQwpCncJiZqmVp7YeoxH/1xIeb2P2Vkj+ekN8/irOZnERmsLAJGBpnCX83KiroVH/nSEJ7ceo97nJ29CKg/etJCFE1K9Lk1kSFO4S58UVjbyq7eP8OS2Y7S1O5ZNy+D2T0xm/niFushgEFS4m9kK4H4gGnjIOfcf3R4fDzwKpHS2udM5tynEtcogUFDewH2vHOR3u44DsHLuOG7/xGSdcCQyyPQa7mYWDawDrgSKge1mttE5t7dLs/8JPOOce9DMZgCbgNx+qFc8cqCsnnv+sJ9X95cTHWV8bmE2t39iMhNGaTmjyGAUzMh9MVDgnDsMYGYbgGuAruHugOTO2yOB0lAWKd45WtXIj18+yO92Hyc+JoqvXXEBt1yUy5jkeK9LE5GzCCbcs4CiLsfFwJJubb4HvGxmXwdGAMtDUp14xjnH8++UcNeL79MWcKxelMMdV05llK5mJBIWggn3ns44cd2O1wC/ds792MwuAh43s1nOucBHnsjsNuA2gPHjx/elXulngYDj6fwi1r95mCOVjUwbm8SDNy3UrowiYSaYcC8GcrocZ3P6tMtaYAWAc+4vZhYPpAPlXRs559YD6wHy8vK6/4IQjx08Uc+3n32P94prmZuTwg+vn831C7KJ0Tp1kbATTLhvB6aY2USgBFgNfKFbm2PAMuDXZjYdiAcqQlmo9J/6ljbu+cMBntp2jOFx0dz7ublcNz9L2wSIhLFew9055zez24HNdCxzfMQ5t8fM7gbynXMbgX8Afmlm36RjyuaLzjmNzAe59oDj2fwi/s9rBZTUNLNm8XjuuHIqo5M0ry4S7oJa5965Zn1Tt/vu6nJ7L3BxaEuT/hIIOP6wp4wfbT7AkcpGZmUl8+PPz2XppFFelyYiIaIzVIeYfcfr+ObTO9lfVs+k0SP42Y0L+NSssbpAhkiEUbgPEc45fv7Hw9z78gGS42O4Z9Ucrp2fpU29RCKUwn0IeK+ohn/97R7eOVbDVTPG8P3rZpOu9eoiEU3hHuEe/tMRvr9pHykJsXz/2tmsXpSjVTAiQ4DCPUI557hn8wEefOMQl0xJ5/7V80kbEed1WSIyQBTuEai6sZV/en4Xm/ec4Lr5WXz/utnEx0Z7XZaIDCCFe4TZXVzL1596h+KTzXzrqqn83eWTNQ0jMgQp3CNEIOBY/9Zh7vnDftJGDOOJLy9hidatiwxZCvcI0NLWzh3P7GTT7jKWTx/DvZ+bQ8pwza+LDGUK9zBX3djKbY/lk3/0JH+/fAr/Y9kUnZAkIgr3cLa3tI5bf7WNqgYf935uLqsWZntdkogMEgr3MPWnDypZ++h2kuJjefFrH2d29kivSxKRQUThHoY27ynj9iffITt1OI/99WJy0oZ7XZKIDDIK9zCz5XAV33jqXWaMG8kjt+bpsnci0iOFexh5aVcp//jcLsalJPCrLy7SGacickbaEjBMbNh2jNuffJcLxybx1FeWKthF5Kw0ch/k2gOOe1/+7z1ifnlLnrYSEJFeKdwHMZ+/ne88t4sXdpayamE2//7ZWQp2EQmKwn2Q8vnb+cpjO3jzYIVOThKRc6ZwH4SqG1v56uP5bC88yf+6egZrPz7R65JEJMwo3AeZ2qY2Pv+Lv3C0qpEfrZrD5/JyvC5JRMKQwn0QKa9r4QsPbeVoVSMP3bqIy6aO9rokEQlTCvdBorqxlZse3krxySZ+9cXFfHxKutcliUgYU7gPAlUNPm58aCuFVU386ouLuHiygl1Ezo/C3WNHqxr5ymP5FFY28YubFyrYRSQkFO4eKqttYfX6LTT6/Dx0ax6Xao5dREJE4e6R4pNNfOlX26lpamPDbUuZm5PidUkiEkEU7h6obPBxyyPbqKj38fAX8xTsIhJyCvcB1uDzs/bX2yk52czja5eweGKa1yWJSATSrpADqLm1na88ms/ukloeWDNfwS4i/UYj9wESCDi++p872HKkih9cO5tPzhzrdUkiEsGCGrmb2QozO2BmBWZ25xnafN7M9prZHjN7MrRlhjfnHHe/tJc3D1bwz5+ezurF470uSUQiXK8jdzOLBtYBVwLFwHYz2+ic29ulzRTgn4CLnXMnzSyjvwoOR4+8Xciv/1zITUvH89cXaxMwEel/wYzcFwMFzrnDzrlWYANwTbc2XwHWOedOAjjnykNbZvh6v6SWH2zax7JpGdy9chZRUdq2V0T6XzDhngUUdTku7ryvq6nAVDN728y2mNmKnp7IzG4zs3wzy6+oqOhbxWGkvK6Frz/1LinDY/nhqjkKdhEZMMGEe0+J5LodxwBTgMuBNcBDZnba4m3n3HrnXJ5zLm/06Mg+G7OlrZ0vP5ZPaU0zD960kPTEYV6XJCJDSDDhXgx03VQ8Gyjtoc2Lzrk259wR4AAdYT9kffc3u9lVXMv9q+exKFdLHkVkYAUT7tuBKWY20czigNXAxm5tXgCuADCzdDqmaQ6HstBw8of3j/P8OyXcfsVkVszK9LocERmCeg1355wfuB3YDOwDnnHO7TGzu81sZWezzUCVme0FXge+7Zyr6q+iB7NDFQ1869ldzMpK5hvLhvQfLyLioaBOYnLObQI2dbvvri63HXBH58eQ1dYe4B+eeY8og/U35xEXoxOARcQbOkM1hH7+xiF2FtVw/+p5jEtJ8LocERnCNLQMka2Hq/jJKwdZMXMsK+eO87ocERniFO4hUFLTzNeefIfxacO59/NzMdN6dhHxlsL9PAUCjjue3klzazu/vCWPxGGa6RIR7yncz9Mjbx9h65FqvvtX05k6JsnrckREAIX7eSmqbuLelw+wbFoGX9BOjyIyiCjc+6jVH+DrT72Lc/C9lTM1zy4ig4omiPvof/9u76lljzlpw70uR0TkIzRy74MXd5bw6F+OctPS8Vwzr/sGmSIi3lO4n6OC8nq++/xuFk5I5a6rZ3pdjohIjxTu56Cp1c+XH80nPjaa//uF+dpeQEQGLc25n4N7Nx+ksKqJ/1y7hMyR2l5ARAYvDT2D9H5JLY/+pZBr52fx8SnpXpcjInJWCvcg3bP5ACMTYrnr6hlelyIi0iuFexDeL6nlzYMVfPFjuaSOiPO6HBGRXince+Gc4/ub9pEyPJZbLprgdTkiIkFRuPfiP7ce48+Hqvjm8qmkDNeoXUTCg8L9LCrqfdzz+/0smZjGzUs1aheR8KFwP4tfvnWYhlY/d18zi6go7R0jIuFD4X4GhZWN/OrtI1w3P5sLx2orXxEJLwr3M1j/1mHMjG99cqrXpYiInDOFew+OVTXxbH4R183P0pmoIhKWFO49+P6mfcRGR/H3yzVqF5HwpHDvZmdRDX/YU8Ztl05i7Mh4r8sREekThXs3979ykLQRcfz1xyd6XYqISJ8p3LvYX1bH6wcquPWiXJLjY70uR0SkzxTuXfz0vz5gRFy0thkQkbCncO9UUF7Py3vLuOmiCdocTETCnsK9089eP0RMVBRfuWSS16WIiJw3hTtQVN3Ei++VcstFE0hPHOZ1OSIi5y2ocDezFWZ2wMwKzOzOs7RbZWbOzPJCV2L/e/hPRzDgyxq1i0iE6DXczSwaWAd8CpgBrDGz0y5HZGZJwDeAraEusj8VlDfw+JajfHZ+lta1i0jECGbkvhgocM4dds61AhuAa3po92/APUBLCOvrdz97vYBoM/5xxYVelyIiEjLBhHsWUNTluLjzvlPMbD6Q45x7KYS19bvDFQ1sfK+UNYtzyEjSqF1EIkcw4d7TRubu1INmUcB9wD/0+kRmt5lZvpnlV1RUBF9lP3nwjUNERRl/c/kFXpciIhJSwYR7MZDT5TgbKO1ynATMAt4ws0JgKbCxpzdVnXPrnXN5zrm80aNH973qEDha1cj/e6eYNYtytPOjiEScYMJ9OzDFzCaaWRywGtj44YPOuVrnXLpzLtc5lwtsAVY65/L7peIQefwvRzEzvnqZRu0iEnl6DXfnnB+4HdgM7AOecc7tMbO7zWxlfxfYH5pa/Ty9vYgVs8YyLkWjdhGJPDHBNHLObQI2dbvvrjO0vfz8y+pfG3eWUu/z66LXIhKxhtwZqs45ntp2jEmjR7BkYprX5YiI9IshF+5bj1TzXnEtX/pYLmY9LQQSEQl/Qy7cn8kvImlYDJ/Ly+m9sYhImBpS4d7g87Np93GunptJfGy01+WIiPSbIRXuG3eW0tIW0KhdRCLe0Ar390rIHTWcBeNTvS5FRKRfDZlwL6xsZMvhaj47P6v3xiIiYW7IhPvvdh8H4POakhGRIWBIhLtzjie3HmNxbprOSBWRIWFIhPvOohpKapr5/CKN2kVkaBgS4f7b944TFx3FVTPHeF2KiMiAiPhwbw84frurlMsvHE1yfKzX5YiIDIiID/f8wmoq6n1cPXec16WIiAyYiA/3F3aWEB8bxSemZXhdiojIgInocPf523npveN8elYmicOC2t1YRCQiRHS4/9feE9T7/HxmnqZkRGRoiehwf2LLMXLSErhsirfXaxURGWgRG+61TW1sOVLFyrnjiIrSvu0iMrREbLj/qaAS5+CyqXojVUSGnogN99+/f5z0xDgWjE/xuhQRkQEXkeEeCDj+cqiKS6aMJiY6IrsoInJWEZl87xXXUNXYyscnp3tdioiIJyIy3DfvOUFMlLF8hvaSEZGhKSLD/bX9J1iUm8bIBO0lIyJDU8SFe2lNMwdPNHDpVK1tF5GhK+LCfVPnFZc+qe19RWQIi7hw/+PBCqaOSWTS6ESvSxER8UxEhXt7wPHO0ZMsnTTK61JERDwVUeG+v6yOxtZ2FoxP9boUERFPRVS4/7mgCoDFE9M8rkRExFsRFe5vflDBlIxExqUkeF2KiIinggp3M1thZgfMrMDM7uzh8TvMbK+Z7TKzV81sQuhLPbv2gOPdYzUatYuIEES4m1k0sA74FDADWGNmM7o1exfIc87NAZ4D7gl1ob05XNFAg8/PfM23i4gENXJfDBQ45w4751qBDcA1XRs45153zjV1Hm4BskNbZu+2HKkG0C6QIiIEF+5ZQFGX4+LO+85kLfD7nh4ws9vMLN/M8isqKoKvMghvHawgKyWBiekjQvq8IiLhKJhw7+kyRq7HhmY3AXnAj3p63Dm33jmX55zLGz06dNsDBAKOrUeq+fjkdMx01SURkZgg2hQDOV2Os4HS7o3MbDnwz8BlzjlfaMoLTkFFA7XNbeTlar5dRASCG7lvB6aY2UQziwNWAxu7NjCz+cAvgJXOufLQl3l27xw9CaA3U0VEOvUa7s45P3A7sBnYBzzjnNtjZneb2crOZj8CEoFnzWynmW08w9P1i22F1aQnxnHBaM23i4hAcNMyOOc2AZu63XdXl9vLQ1zXOXnn6EkWjE/VfLuISKewP0O1vqWNo9VNzBiX7HUpIiKDRtiH+/sldTgHc7O1vl1E5ENhH+7vFnW8mTo3R+EuIvKhsA/3PaV15KQlkDYizutSREQGjbAP993FtczMHOl1GSIig0pYh3tlg49j1U0smKApGRGRrsI63HcV1wB6M1VEpLuwDvcDZQ0ATMvUMkgRka7COtyPVDaQnjiMkQmxXpciIjKohHW47ztez4VjE70uQ0Rk0AnbcHfOcbiigcmjFe4iIt2FbbiX1/tobG1nksJdROQ0YRvuH5zoeDN1yhiFu4hId2Eb7gXl9QCalhER6UHYhvsH5Q0kx8cwOmmY16WIiAw6YRvuhyoamJyRqD3cRUR6ELbhfrSqidxRuvKSiEhPwjLcW/0ByupayEkb7nUpIiKDUliG+/HaZpyDrNQEr0sRERmUwjLcj1Y1AWhaRkTkDMIy3EtrmgHIHBnvcSUiIoNTjNcF9MXx2hbMYEyywl0kGG1tbRQXF9PS0uJ1KRKk+Ph4srOziY3t28aIYRruzaQnDiMuJiz/8BAZcMXFxSQlJZGbm6vlw2HAOUdVVRXFxcVMnDixT88RlulYVufTlIzIOWhpaWHUqFEK9jBhZowaNeq8/tIKy3AvPtnEuJFaKSNyLhTs4eV8v19hGe4VdT4yUzRyFwkn0dHRzJs3j1mzZvGZz3yGmpqOy2QWFhaSkJDAvHnzTn20trZ6XG2HI0eOsGTJEqZMmcINN9zQY11tbW3ceuutzJ49m+nTp/ODH/wAgKKiIq644gqmT5/OzJkzuf/++099zg033HCqr7m5ucybNy/ktYdduDf6/NT7/NpTRiTMJCQksHPnTt5//33S0tJYt27dqccuuOACdu7ceeojLi7Ow0r/23e+8x2++c1v8sEHH5CamsrDDz98Wptnn30Wn8/H7t272bFjB7/4xS8oLCwkJiaGH//4x+zbt48tW7awbt069u7dC8DTTz99qq/XX3891113XchrD7twL6/3ATAmSSN3kXB10UUXUVJS0qfPbWhoYNmyZSxYsIDZs2fz4osvAh1/AcyaNetUu3vvvZfvfe97ABQUFLB8+XLmzp3LggULOHToUK+v45zjtddeY9WqVQDceuutvPDCC6e1MzMaGxvx+/00NzcTFxdHcnIymZmZLFiwAICkpCSmT59+Wp+dczzzzDOsWbOmT1+Lswm71TJVDR3hnq6Ru0if/Otv97C3tC6kzzljXDL/8pmZQbVtb2/n1VdfZe3atafuO3To0KmpiYsvvvgjo/ru4uPj+c1vfkNycjKVlZUsXbqUlStXnvU1b7zxRu68806uvfZaWlpaCAQC1NfXc8kll/TY/sknnyQjI4OUlBRiYjpiMjs7u8dfSKtWreLFF18kMzOTpqYm7rvvPtLS0j7SprCwkHfffZclS5Z85P633nqLMWPGMGXKlLPW3xdhF+71Pj8ASfFhV7rIkNbc3My8efMoLCxk4cKFXHnllace+3BaJhjOOb773e/y5ptvEhUVRUlJCSdOnDhj+/r6ekpKSrj22muBjl8OHzrba1ZUVJx2X09vcm7bto3o6GhKS0s5efIkl1xyCcuXL2fSpElAx18a119/PT/96U9JTk7+yOc+9dRT/TJqhyDD3cxWAPcD0cBDzrn/6Pb4MOAxYCFQBdzgnCsMbakdGjvDPXGYwl2kL4IdYYfah3PutbW1XH311axbt45vfOMb5/w8TzzxBBUVFezYsYPY2Fhyc3NpaWkhJiaGQCBwqt2Hywidcz0+T28j9+nTp1NTU4Pf7ycmJobi4mLGjRvXY9sVK1YQGxtLRkYGF198Mfn5+UyaNIm2tjauv/56brzxxtPm1f1+P88//zw7duw4569BMHqdczezaGAd8ClgBrDGzGZ0a7YWOOmcmwzcB/ww1IV+qLm1HYCE2Oj+egkR6UcjR47kgQce4N5776Wtre2M7bZt28Ytt9xy2v21tbVkZGQQGxvL66+/ztGjRwEYM2YM5eXlVFVV4fP5eOmllwBITk4mOzv71Hy5z+ejqamJpKSkj7yJ2/VjxowZmBlXXHEFzz33HACPPvoo11xzzWn1jB8/ntdeew3nHI2NjWzZsoVp06bhnGPt2rVMnz6dO+6447TPe+WVV5g2bRrZ2dnn/kUMQjBvqC4GCpxzh51zrcAGoHsPrwEe7bz9HLDM+mlRbYu/4zdzvMJdJGzNnz+fuXPnsmHDhjO2OXbsGAkJp5/PcuONN5Kfn09eXh5PPPEE06ZNAyA2Npa77rqLJUuWcPXVV5+6H+Dxxx/ngQceYM6cOXzsYx+jrKwsqDp/+MMf8pOf/ITJkydTVVV16n2CjRs3ctdddwHwta99jYaGBmbNmsWiRYv40pe+xJw5c3j77bd5/PHHee21104te9y0adOp596wYUO/TckA2Jn+ZDnVwGwVsMI59+XO45uBJc6527u0eb+zTXHn8aHONpVnet68vDyXn59/zgU/9NZh/v13+9j1vatIju/bngsiQ81PjXXQAAAE8ElEQVS+ffuYPn2612Wck29/+9vcfPPNzJkzx+tSPNPT983Mdjjn8nr73GAmrnsagXf/jRBMG8zsNuA26PhTpi/Gpw3nU7PGalpGJML96Ec/8rqEsBZMuBcDOV2Os4HSM7QpNrMYYCRQ3f2JnHPrgfXQMXLvS8FXzRzLVTPH9uVTRUSGjGDm3LcDU8xsopnFAauBjd3abARu7by9CnjN9TbfIyIi/abXkbtzzm9mtwOb6VgK+Yhzbo+Z3Q3kO+c2Ag8Dj5tZAR0j9tX9WbSInDvnnDYPCyPnOz4OarG4c24TsKnbfXd1ud0CfO68KhGRfhMfH09VVZW2/Q0TH+7n3vWEq3OlM4FEhoDs7GyKi4t7POtSBqcPr8TUVwp3kSEgNja2z1f0kfAUdrtCiohI7xTuIiIRSOEuIhKBet1+oN9e2KwCONrHT08Hzri1QYRSn4cG9XloOJ8+T3DOje6tkWfhfj7MLD+YvRUiifo8NKjPQ8NA9FnTMiIiEUjhLiISgcI13Nd7XYAH1OehQX0eGvq9z2E55y4iImcXriN3ERE5i0Ed7ma2wswOmFmBmd3Zw+PDzOzpzse3mlnuwFcZWkH0+Q4z22tmu8zsVTOb4EWdodRbn7u0W2VmzszCfmVFMH02s893fq/3mNmTA11jqAXxsz3ezF43s3c7f74/7UWdoWJmj5hZeeeV6np63Mzsgc6vxy4zWxDSApxzg/KDju2FDwGTgDjgPWBGtzZ/B/y88/Zq4Gmv6x6APl8BDO+8/bdDoc+d7ZKAN4EtQJ7XdQ/A93kK8C6Q2nmc4XXdA9Dn9cDfdt6eARR6Xfd59vlSYAHw/hke/zTwezquZLcU2BrK1x/MI/dBdWHuAdJrn51zrzvnmjoPt9BxZaxwFsz3GeDfgHuAloEsrp8E0+evAOuccycBnHPlA1xjqAXTZwckd94eyelXfAsrzrk36eGKdF1cAzzmOmwBUswsM1SvP5jDPQso6nJc3Hlfj22cc36gFhg1INX1j2D63NVaOn7zh7Ne+2xm84Ec59xLA1lYPwrm+zwVmGpmb5vZFjNbMWDV9Y9g+vw94CYzK6bj+hFfH5jSPHOu/9/PyWDe8jdkF+YOI0H3x8xuAvKAy/q1ov531j6bWRRwH/DFgSpoAATzfY6hY2rmcjr+OnvLzGY552r6ubb+Ekyf1wC/ds792MwuouPqbrOcc4H+L88T/Zpfg3nkfi4X5uZsF+YOI8H0GTNbDvwzsNI55xug2vpLb31OAmYBb5hZIR1zkxvD/E3VYH+2X3TOtTnnjgAH6Aj7cBVMn9cCzwA45/4CxNOxB0ukCur/e18N5nAfihfm7rXPnVMUv6Aj2MN9HhZ66bNzrtY5l+6cy3XO5dLxPsNK51y+N+WGRDA/2y/Q8eY5ZpZOxzTN4QGtMrSC6fMxYBmAmU2nI9wj+dJRG4FbOlfNLAVqnXPHQ/bsXr+j3Mu7zZ8GDtLxLvs/d953Nx3/uaHjm/8sUABsAyZ5XfMA9PkV4ASws/Njo9c193efu7V9gzBfLRPk99mAnwB7gd3Aaq9rHoA+zwDepmMlzU7gKq9rPs/+PgUcB9roGKWvBf4G+Jsu3+N1nV+P3aH+udYZqiIiEWgwT8uIiEgfKdxFRCKQwl1EJAIp3EVEIpDCXUQkAincRUQikMJdRCQCKdxFRCLQ/wdzecO5mXMqBgAAAABJRU5ErkJggg==
###Output
_____no_output_____
###Markdown
But the AUC is much better. XGBoost
###Code
from xgboost import XGBClassifier
#Define the classifier.
XGB_loan = XGBClassifier(max_depth=3, # Depth of each tree
learning_rate=0.1, # How much to shrink error in each subsequent training. Trade-off with no. estimators.
n_estimators=100, # How many trees to use, the more the better, but decrease learning rate if many used.
verbosity=1, # If to show more errors or not.
objective='binary:logistic', # Type of target variable.
booster='gbtree', # What to boost. Trees in this case.
n_jobs=2, # Parallel jobs to run. Set your processor number.
gamma=0.001, # Minimum loss reduction required to make a further partition on a leaf node of the tree. (Controls growth!)
subsample=0.632, # Subsample ratio. Can set lower
colsample_bytree=1, # Subsample ratio of columns when constructing each tree.
colsample_bylevel=1, # Subsample ratio of columns when constructing each level. 0.33 is similar to random forest.
colsample_bynode=1, # Subsample ratio of columns when constructing each split.
reg_alpha=1, # Regularizer for first fit. alpha = 1, lambda = 0 is LASSO.
reg_lambda=0, # Regularizer for first fit.
scale_pos_weight=1, # Balancing of positive and negative weights.
base_score=0.5, # Global bias. Set to average of the target rate.
random_state=251120887, # Seed
missing=None # How are nulls encoded?
)
# Define the parameters.
param_grid = dict({'n_estimators': [50, 100, 150],
'max_depth': [2, 3, 4],
'learning_rate' : [0.01, 0.05, 0.1, 0.15]
})
# Create validation set
val_train = train_noWoE.sample(frac = 0.5, # The fraction to extract
random_state = 251120887 # The seed.
)
from sklearn.model_selection import GridSearchCV
# Define grid search object.
GridXGB = GridSearchCV(XGB_loan, # Original XGB.
param_grid, # Parameter grid
cv = 3, # Number of cross-validation folds.
scoring = 'roc_auc', # How to rank outputs.
n_jobs = 2, # Parallel jobs. -1 is "all you have"
refit = False, # If refit at the end with the best. We'll do it manually.
verbose = 1 # If to show what it is doing.
)
# Train grid search.
GridXGB.fit(val_train.iloc[:, :-1], val_train['Default'])
# Show best params
GridXGB.best_params_
# Create XGB with best parameters.
XGB_best = XGBClassifier(max_depth=GridXGB.best_params_.get('max_depth'), # Depth of each tree
learning_rate=GridXGB.best_params_.get('learning_rate'), # How much to shrink error in each subsequent training. Trade-off with no. estimators.
n_estimators=GridXGB.best_params_.get('n_estimators'), # How many trees to use, the more the better, but decrease learning rate if many used.
verbosity=1, # If to show more errors or not.
objective='binary:logistic', # Type of target variable.
booster='gbtree', # What to boost. Trees in this case.
n_jobs=4, # Parallel jobs to run. Set your processor number.
gamma=0.001, # Minimum loss reduction required to make a further partition on a leaf node of the tree. (Controls growth!)
subsample=1, # Subsample ratio. Can set lower
colsample_bytree=1, # Subsample ratio of columns when constructing each tree.
colsample_bylevel=1, # Subsample ratio of columns when constructing each level. 0.33 is similar to random forest.
colsample_bynode=1, # Subsample ratio of columns when constructing each split.
reg_alpha=1, # Regularizer for first fit. alpha = 1, lambda = 0 is LASSO.
reg_lambda=0, # Regularizer for first fit.
scale_pos_weight=1, # Balancing of positive and negative weights.
base_score=0.5, # Global bias. Set to average of the target rate.
random_state=251120887, # Seed
missing=None # How are nulls encoded?
)
# Train over all training data.
XGB_best.fit(train_noWoE.iloc[:, :-1], train_noWoE['Default'])
# Calculate probability
XGBClassTest = XGB_best.predict(test_noWoE.iloc[:, :-1])
xg_probs_test = XGB_best.predict_proba(test_noWoE.iloc[:, :-1])
xg_probs_test = xg_probs_test[:, 1]
# Calculate confusion matrix
confusion_matrix_xgb = confusion_matrix(y_true = test_noWoE['Default'],
y_pred = XGBClassTest)
# Turn matrix to percentages
confusion_matrix_xgb = confusion_matrix_xgb.astype('float') / confusion_matrix_xgb.sum(axis=1)[:, np.newaxis]
# Turn to dataframe
df_cm = pd.DataFrame(
confusion_matrix_xgb, index=['good', 'bad'], columns=['good', 'bad'],
)
# Parameters of the image
figsize = (10,7)
fontsize=14
# Create image
fig = plt.figure(figsize=figsize)
heatmap = sns.heatmap(df_cm, annot=True, fmt='.2f')
# Make it nicer
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0,
ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45,
ha='right', fontsize=fontsize)
# Add labels
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Plot!
plt.show()
###Output
D:\Work\Anaconda\lib\site-packages\sklearn\preprocessing\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.
if diff:
###Markdown
Much worse than LR, the False Positives are too much (0.94).
###Code
# Calculate the ROC curve points
fpr, tpr, thresholds = roc_curve(test_noWoE['Default'],
xg_probs_test)
# Save the AUC in a variable to display it. Round it first
auc = np.round(roc_auc_score(y_true = test_noWoE['Default'],
y_score = xg_probs_test),
decimals = 3)
# Create and show the plot
plt.plot(fpr,tpr,label="AUC - XGBoosting = " + str(auc))
plt.legend(loc=4)
plt.show()
###Output
_____no_output_____
###Markdown
The AUC is good. So the AUC scores for LR, RF, and XGBoost are 0.746, 0.827 and 0.853. XGBoost is the best! (5) Variable Importance Next we want to evaluate the variable importances of these models. Random Forest
###Code
# Plot variable importance
importances = RFClassifier.feature_importances_
indices = np.argsort(importances)[::-1]
f, ax = plt.subplots(figsize=(3, 8))
plt.title("Variable Importance - Random Forest")
sns.set_color_codes("pastel")
sns.barplot(y=[train_noWoE.iloc[:, :-1].columns[i] for i in indices], x=importances[indices],
label="Total", color="b")
ax.set(ylabel="Variable",
xlabel="Variable Importance (Gini)")
sns.despine(left=True, bottom=True)
###Output
_____no_output_____
###Markdown
XGBoost
###Code
# Plot variable importance
importances = XGB_best.feature_importances_
indices = np.argsort(importances)[::-1]
f, ax = plt.subplots(figsize=(3, 8))
plt.title("Variable Importance - XGBoosting")
sns.set_color_codes("pastel")
sns.barplot(y=[train_noWoE.iloc[:, :-1].columns[i] for i in indices], x=importances[indices],
label="Total", color="b")
ax.set(ylabel="Variable",
xlabel="Variable Importance (Gini)")
sns.despine(left=True, bottom=True)
###Output
_____no_output_____
###Markdown
Scorecard
###Code
sc.iv(train_woe, 'Default')
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.